id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
32,033 | def main():
install_logging('Prepare_Content_Packs_For_Testing.log', logger=logging)
option = option_handler()
packs_artifacts_path = option.artifacts_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
ci_branch = option.ci_branch
force_upload = option.force_upload
marketplace = option.marketplace
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path,
storage_base_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, ci_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name), marketplace) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash))
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names, storage_base_path
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
# initiate the statistics handler for marketplace packs
statistics_handler = StatisticsHandler(service_account, index_folder_path)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket, storage_base_path)
# Packages that depend on new packs that are not in the previous index.json
packs_missing_dependencies = []
# starting iteration over packs
for pack in packs_list:
task_status = pack.load_user_metadata(marketplace)
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value
pack.cleanup()
continue
if not pack.should_upload_to_market_place:
logging.warning(f"Skipping {pack.name} pack as it is not supported in the current marketplace.")
pack.status = PackStatus.NOT_RELEVANT_FOR_MARKETPLACE.name
pack.cleanup()
continue
task_status = pack.collect_content_items()
if not task_status:
pack.status = PackStatus.FAILED_COLLECT_ITEMS.name
pack.cleanup()
continue
task_status = pack.upload_integration_images(storage_bucket, storage_base_path, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status = pack.upload_author_image(storage_bucket, storage_base_path, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, modified_rn_files_paths, pack_was_modified = pack.detect_modified(
content_repo, index_folder_path, current_commit_hash, previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status, is_missing_dependencies = pack.format_metadata(index_folder_path,
packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified,
statistics_handler, pack_names)
if is_missing_dependencies:
# If the pack is dependent on a new pack
# (which is not yet in the index.zip as it might not have been iterated yet)
# we will note that it is missing dependencies.
# And finally after updating all the packages in index.zip - i.e. the new pack exists now.
# We will go over the pack again to add what was missing.
# See issue #37290
packs_missing_dependencies.append(pack)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified,
modified_rn_files_paths)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status = pack.remove_unwanted_files(remove_test_playbooks)
if not task_status:
pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS
pack.cleanup()
continue
task_status = pack.sign_pack(signature_key)
if not task_status:
pack.status = PackStatus.FAILED_SIGNING_PACKS.name
pack.cleanup()
continue
task_status, zip_pack_path = pack.zip_pack()
if not task_status:
pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name
pack.cleanup()
continue
task_status, skipped_upload, _ = pack.upload_to_storage(zip_pack_path, pack.latest_version, storage_bucket,
override_all_packs or pack_was_modified,
storage_base_path)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_upload and exists_in_index and pack not in packs_missing_dependencies:
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
logging.info(f"packs_missing_dependencies: {packs_missing_dependencies}")
# Going over all packs that were marked as missing dependencies,
# updating them with the new data for the new packs that were added to the index.zip
for pack in packs_missing_dependencies:
task_status, _ = pack.format_metadata(index_folder_path, packs_dependencies_mapping,
build_number, current_commit_hash, False, statistics_handler,
pack_names, format_dependencies_only=True)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_REFORMATING.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
create_corepacks_config(storage_bucket, build_number, index_folder_path,
os.path.dirname(packs_artifacts_path), storage_base_path, marketplace)
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=statistics_handler.landing_page_sections,
artifacts_dir=os.path.dirname(packs_artifacts_path),
storage_bucket=storage_bucket,
)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids, images_data=get_images_data(packs_list)
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
| def main():
install_logging('Prepare_Content_Packs_For_Testing.log', logger=logging)
option = option_handler()
packs_artifacts_path = option.artifacts_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
ci_branch = option.ci_branch
force_upload = option.force_upload
marketplace = option.marketplace
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path,
storage_base_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, ci_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name), marketplace) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash))
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names, storage_base_path
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
# initiate the statistics handler for marketplace packs
statistics_handler = StatisticsHandler(service_account, index_folder_path)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket, storage_base_path)
# Packages that depend on new packs that are not in the previous index.json
packs_missing_dependencies = []
# starting iteration over packs
for pack in packs_list:
task_status = pack.load_user_metadata(marketplace)
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value
pack.cleanup()
continue
if not pack.should_upload_to_marketplace:
logging.warning(f"Skipping {pack.name} pack as it is not supported in the current marketplace.")
pack.status = PackStatus.NOT_RELEVANT_FOR_MARKETPLACE.name
pack.cleanup()
continue
task_status = pack.collect_content_items()
if not task_status:
pack.status = PackStatus.FAILED_COLLECT_ITEMS.name
pack.cleanup()
continue
task_status = pack.upload_integration_images(storage_bucket, storage_base_path, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status = pack.upload_author_image(storage_bucket, storage_base_path, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, modified_rn_files_paths, pack_was_modified = pack.detect_modified(
content_repo, index_folder_path, current_commit_hash, previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status, is_missing_dependencies = pack.format_metadata(index_folder_path,
packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified,
statistics_handler, pack_names)
if is_missing_dependencies:
# If the pack is dependent on a new pack
# (which is not yet in the index.zip as it might not have been iterated yet)
# we will note that it is missing dependencies.
# And finally after updating all the packages in index.zip - i.e. the new pack exists now.
# We will go over the pack again to add what was missing.
# See issue #37290
packs_missing_dependencies.append(pack)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified,
modified_rn_files_paths)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status = pack.remove_unwanted_files(remove_test_playbooks)
if not task_status:
pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS
pack.cleanup()
continue
task_status = pack.sign_pack(signature_key)
if not task_status:
pack.status = PackStatus.FAILED_SIGNING_PACKS.name
pack.cleanup()
continue
task_status, zip_pack_path = pack.zip_pack()
if not task_status:
pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name
pack.cleanup()
continue
task_status, skipped_upload, _ = pack.upload_to_storage(zip_pack_path, pack.latest_version, storage_bucket,
override_all_packs or pack_was_modified,
storage_base_path)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_upload and exists_in_index and pack not in packs_missing_dependencies:
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
logging.info(f"packs_missing_dependencies: {packs_missing_dependencies}")
# Going over all packs that were marked as missing dependencies,
# updating them with the new data for the new packs that were added to the index.zip
for pack in packs_missing_dependencies:
task_status, _ = pack.format_metadata(index_folder_path, packs_dependencies_mapping,
build_number, current_commit_hash, False, statistics_handler,
pack_names, format_dependencies_only=True)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_REFORMATING.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
create_corepacks_config(storage_bucket, build_number, index_folder_path,
os.path.dirname(packs_artifacts_path), storage_base_path, marketplace)
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=statistics_handler.landing_page_sections,
artifacts_dir=os.path.dirname(packs_artifacts_path),
storage_bucket=storage_bucket,
)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids, images_data=get_images_data(packs_list)
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
|
38,933 | def test_nested_env_with_dict(env):
class Settings(BaseSettings):
top: Dict[str, str]
class Config:
env_prefix = 'APP_'
with pytest.raises(ValidationError):
Settings()
env.set('APP_top', '{"banana": "secret_value"}')
s = Settings(top={'apple': 'value'})
assert s.top['banana'] == 'secret_value'
| def test_nested_env_with_dict(env):
class Settings(BaseSettings):
top: Dict[str, str]
class Config:
env_prefix = 'APP_'
with pytest.raises(ValidationError):
Settings()
env.set('APP_top', '{"banana": "secret_value"}')
s = Settings(top={'apple': 'value'})
assert s.top == {'apple': 'value', 'banana': 'secret_value'}
|
13,019 | def assign_psp_reference_values(apps, schema_editor):
Payment = apps.get_model("payment", "Payment")
payments = []
for payment in Payment.objects.filter(is_active=True).exclude():
txn = (
payment.transactions.filter(searchable_key__isnull=False)
.exclude(searchable_key="")
.first()
)
if txn:
payment.psp_reference = txn.searchable_key
payments.append(payment)
if payments:
Payment.objects.bulk_update(payments, ["psp_reference"])
| def assign_psp_reference_values(apps, schema_editor):
Payment = apps.get_model("payment", "Payment")
payments = []
for payment in Payment.objects.filter(is_active=True).exclude().iterator():
txn = (
payment.transactions.filter(searchable_key__isnull=False)
.exclude(searchable_key="")
.first()
)
if txn:
payment.psp_reference = txn.searchable_key
payments.append(payment)
if payments:
Payment.objects.bulk_update(payments, ["psp_reference"])
|
30,613 | def extract_matching_object_from_id_set(obj_id, obj_set, server_version='0'):
"""Gets first occurrence of object in the object's id_set with matching id/name and valid from/to version"""
for obj_wrpr in obj_set:
# try to get object by id
if obj_id in obj_wrpr:
obj = obj_wrpr.get(obj_id)
# try to get object by name
else:
obj_keys = list(obj_wrpr.keys())
if len(obj_keys) == 0:
continue
obj = obj_wrpr[obj_keys[0]]
if obj.get('name') != obj_id:
continue
# check if object is runnable
fromversion = obj.get('fromversion', '0')
toversion = obj.get('toversion', '99.99.99')
if is_runnable_in_server_version(from_v=fromversion, server_v=server_version, to_v=toversion):
return obj
return None
| def extract_matching_object_from_id_set(obj_id, obj_set, server_version='0'):
"""Gets first occurrence of object in the object's id_set with matching id/name and valid from/to version"""
for obj_wrpr in obj_set:
# try to get object by id
if obj_id in obj_wrpr:
obj = obj_wrpr.get(obj_id)
# try to get object by name
else:
obj_keys = list(obj_wrpr.keys())
if not obj_keys:
continue
obj = obj_wrpr[obj_keys[0]]
if obj.get('name') != obj_id:
continue
# check if object is runnable
fromversion = obj.get('fromversion', '0')
toversion = obj.get('toversion', '99.99.99')
if is_runnable_in_server_version(from_v=fromversion, server_v=server_version, to_v=toversion):
return obj
return None
|
42,963 | def energies(samples: np.ndarray, wp: np.ndarray) -> np.ndarray:
r"""Computes the energy of GBS samples in :math:`\text{cm}^{-1}` unit.
**Example usage:**
>>> samples = np.array([[1, 1, 0], [1, 0, 2]])
>>> wp = np.array([700.0, 600.0, 500.0])
>>> energies(samples, wp)
[1300.0, 1700.0]
Args:
samples (array): GBS samples
wp (array): normal mode frequencies in :math:`\text{cm}^{-1}`
Returns:
E (list): list of GBS sample energies in :math:`\text{cm}^{-1}`
"""
E = []
for sample in samples:
e = sum(sample * wp)
E.append(e)
return E
| def energies(samples: np.ndarray, wp: np.ndarray) -> np.ndarray:
r"""Computes the energy of GBS samples in :math:`\text{cm}^{-1}` unit.
**Example usage:**
>>> samples = np.array([[1, 1, 0], [1, 0, 2]])
>>> wp = np.array([700.0, 600.0, 500.0])
>>> energies(samples, wp)
[1300.0, 1700.0]
Args:
samples (array): GBS samples
wp (array): normal mode frequencies in :math:`\text{cm}^{-1}`
Returns:
E (list): list of GBS sample energies in :math:`\text{cm}^{-1}`
"""
E = []
for sample in samples:
e = sum(sample * wp)
E.append(e)
return E
|
1,824 | def test_robust_scaler_gauss_adjust():
# Check RobustScaler with gaus_adjust=True is equivalent to StandardScaler
# on large normal data
rng = np.random.RandomState(0)
X = rng.randn(100000, 1) * 5 + 2
robust_scaler = RobustScaler(gauss_adjust=True)
standard_scaler = StandardScaler()
assert_array_almost_equal(robust_scaler.fit_transform(X),
standard_scaler.fit_transform(X),
decimal=2)
| def test_robust_scaler_gauss_adjust():
# Check RobustScaler with gauss_adjust=True is equivalent to StandardScaler
# on large normal data
rng = np.random.RandomState(0)
X = rng.randn(100000, 1) * 5 + 2
robust_scaler = RobustScaler(gauss_adjust=True)
standard_scaler = StandardScaler()
assert_array_almost_equal(robust_scaler.fit_transform(X),
standard_scaler.fit_transform(X),
decimal=2)
|
35,831 | def canonicalize_type(t, is_indexed=False):
if isinstance(t, ByteArrayLike):
# Check to see if maxlen is small enough for events
byte_type = 'string' if isinstance(t, StringType) else 'bytes'
if is_indexed:
return f'{byte_type}{t.maxlen}'
else:
return f'{byte_type}'
if isinstance(t, ListType):
if not isinstance(t.subtype, (ListType, BaseType)):
raise InvalidTypeException(f"List of {t.subtype}s not allowed")
return canonicalize_type(t.subtype) + f"[{t.count}]"
if isinstance(t, TupleLike):
return f"({','.join(canonicalize_type(x) for x in t.tuple_members())})"
if not isinstance(t, BaseType):
raise InvalidTypeException(f"Cannot canonicalize non-base type: {t}")
t = t.typ
if t in ('int128', 'uint256', 'bool', 'address', 'bytes32'):
return t
elif t == 'decimal':
return 'fixed168x10'
raise InvalidTypeException("Invalid or unsupported type: " + repr(t))
| def canonicalize_type(t, is_indexed=False):
if isinstance(t, ByteArrayLike):
# Check to see if maxlen is small enough for events
byte_type = 'string' if isinstance(t, StringType) else 'bytes'
if is_indexed:
return f'{byte_type}{t.maxlen}'
else:
return f'{byte_type}'
if isinstance(t, ListType):
if not isinstance(t.subtype, (ListType, BaseType)):
raise InvalidTypeException(f"List of {t.subtype}s not allowed")
return canonicalize_type(t.subtype) + f"[{t.count}]"
if isinstance(t, TupleLike):
return f"({','.join(canonicalize_type(x) for x in t.tuple_members())})"
if not isinstance(t, BaseType):
raise InvalidTypeException(f"Cannot canonicalize non-base type: {t}")
t = t.typ
if t in ('int128', 'uint256', 'bool', 'address', 'bytes32'):
return t
elif t == 'decimal':
return 'fixed168x10'
raise InvalidTypeException(f"Invalid or unsupported type: {repr(t)}")
|
25,993 | def load_arguments(self, _):
with self.argument_context('backup') as c:
c.argument('force', action='store_true', help='Force completion of the requested action.')
# Vault
with self.argument_context('backup vault') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
c.argument('location', validator=get_default_location_from_resource_group)
with self.argument_context('backup vault create') as c:
c.argument('tags', arg_type=tags_type)
with self.argument_context('backup vault backup-properties set') as c:
c.argument('backup_storage_redundancy', arg_type=get_enum_type(['GeoRedundant', 'LocallyRedundant', 'ZoneRedundant']), help='Sets backup storage properties for a Recovery Services vault.')
c.argument('soft_delete_feature_state', arg_type=get_enum_type(['Enable', 'Disable']), help='Set soft-delete feature state for a Recovery Services Vault.')
c.argument('cross_region_restore_flag', arg_type=get_enum_type(['True', 'False']), help='Set cross-region-restore feature state for a Recovery Services Vault. Default: False.')
# Identity
with self.argument_context('backup vault identity assign') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
c.argument('system_assigned', system_assigned_type)
c.argument('user_assigned', user_assigned_type)
with self.argument_context('backup vault identity remove') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
c.argument('system_assigned', system_assigned_remove_type)
c.argument('user_assigned', user_assigned_remove_type)
with self.argument_context('backup vault identity show') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
# Encryption
with self.argument_context('backup vault encryption') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
with self.argument_context('backup vault encryption update') as c:
c.argument('encryption_key_id', encryption_key_id_type)
c.argument('infrastructure_encryption', infrastructure_encryption_type)
c.argument('mi_user_assigned', mi_user_assigned_type)
c.argument('mi_system_assigned', mi_system_assigned_type)
with self.argument_context('backup vault encryption show') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
# Container
with self.argument_context('backup container') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.ignore('status')
with self.argument_context('backup container show') as c:
c.argument('name', container_name_type, options_list=['--name', '-n'], help='Name of the container. You can use the backup container list command to get the name of a container.', id_part='child_name_2')
c.argument('backup_management_type', extended_backup_management_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to show container in secondary region.')
with self.argument_context('backup container list') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('backup_management_type', extended_backup_management_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to list containers in secondary region.')
with self.argument_context('backup container unregister') as c:
c.argument('backup_management_type', backup_management_type)
c.argument('container_name', container_name_type, id_part='child_name_2')
with self.argument_context('backup container re-register') as c:
c.argument('backup_management_type', backup_management_type)
c.argument('container_name', container_name_type, id_part='child_name_2')
c.argument('workload_type', azure_workload_type)
with self.argument_context('backup container register') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('backup_management_type', backup_management_type)
c.argument('resource_id', resource_id_type)
c.argument('workload_type', azure_workload_type)
# Item
with self.argument_context('backup item') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('container_name', container_name_type, id_part='child_name_2')
with self.argument_context('backup item show') as c:
c.argument('name', item_name_type, options_list=['--name', '-n'], help='Name of the backed up item. You can use the backup item list command to get the name of a backed up item.', id_part='child_name_3')
c.argument('backup_management_type', extended_backup_management_type)
c.argument('workload_type', workload_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to show item in secondary region.')
# TODO: Need to use item.id once https://github.com/Azure/msrestazure-for-python/issues/80 is fixed.
with self.argument_context('backup item set-policy') as c:
c.argument('item_name', item_name_type, options_list=['--name', '-n'], help='Name of the backed up item. You can use the backup item list command to get the name of a backed up item.', id_part='child_name_3')
c.argument('policy_name', policy_name_type, help='Name of the Backup policy. You can use the backup policy list command to get the name of a backup policy.')
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
with self.argument_context('backup item list') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('backup_management_type', extended_backup_management_type)
c.argument('workload_type', workload_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to list items in secondary region.')
# Policy
with self.argument_context('backup policy') as c:
c.argument('vault_name', vault_name_type, id_part='name')
for command in ['show', 'delete', 'list-associated-items']:
with self.argument_context('backup policy ' + command) as c:
c.argument('name', policy_name_type, options_list=['--name', '-n'], help='Name of the backup policy. You can use the backup policy list command to get the name of a policy.', id_part='child_name_1')
with self.argument_context('backup policy list-associated-items') as c:
c.argument('backup_management_type', backup_management_type)
with self.argument_context('backup policy set') as c:
c.argument('policy', type=file_type, help='JSON encoded policy definition. Use the show command with JSON output to obtain a policy object. Modify the values using a file editor and pass the object.', completer=FilesCompleter())
c.argument('name', options_list=['--name', '-n'], help='Name of the Policy.', id_part='child_name_1')
c.argument('fix_for_inconsistent_items', arg_type=get_three_state_flag(), options_list=['--fix-for-inconsistent-items'], help='Specify whether or not to retry Policy Update for failed items.')
c.argument('backup_management_type', backup_management_type)
with self.argument_context('backup policy create') as c:
c.argument('policy', type=file_type, help='JSON encoded policy definition. Use the show command with JSON output to obtain a policy object. Modify the values using a file editor and pass the object.', completer=FilesCompleter())
c.argument('name', options_list=['--name', '-n'], help='Name of the Policy.')
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
with self.argument_context('backup policy list') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
with self.argument_context('backup policy get-default-for-vm') as c:
c.argument('vault_name', vault_name_type, id_part=None)
# Recovery Point
# TODO: Need to use item.id once https://github.com/Azure/msrestazure-for-python/issues/80 is fixed.
with self.argument_context('backup recoverypoint') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('container_name', container_name_type, id_part='child_name_2')
c.argument('item_name', item_name_type, id_part='child_name_3')
with self.argument_context('backup recoverypoint list') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('start_date', type=datetime_type, help='The start date of the range in UTC (d-m-Y).')
c.argument('end_date', type=datetime_type, help='The end date of the range in UTC (d-m-Y).')
c.argument('backup_management_type', backup_management_type)
c.argument('container_name', container_name_type)
c.argument('workload_type', workload_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to list recoverypoints in secondary region.')
c.argument('is_ready_for_move', arg_type=get_three_state_flag(), help='Use this flag to retrieve the recoverypoints that are ready to be moved to destination-tier.')
c.argument('target_tier', target_tier_type)
c.argument('tier', tier_type)
c.argument('recommended_for_archive', action="store_true", help='Use this flag to retrieve recommended archivable recoverypoints.')
with self.argument_context('backup recoverypoint move') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('container_name', container_name_type)
c.argument('rp_name', rp_name_type, options_list=['--name', '-n'], id_part='child_name_4')
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
c.argument('source_tier', help='The source tier from which a particular recovery point has to be moved.', arg_type=get_enum_type(['VaultStandard']), options_list=['--source-tier'])
c.argument('destination_tier', help=target_tier_help, arg_type=get_enum_type(['VaultArchive']), options_list=['--destination-tier'])
with self.argument_context('backup recoverypoint show-log-chain') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('start_date', type=datetime_type, help='The start date of the range in UTC (d-m-Y).')
c.argument('end_date', type=datetime_type, help='The end date of the range in UTC (d-m-Y).')
c.argument('backup_management_type', backup_management_type)
c.argument('container_name', container_name_type)
c.argument('workload_type', workload_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to list recoverypoints in secondary region.')
with self.argument_context('backup recoverypoint show') as c:
c.argument('name', rp_name_type, options_list=['--name', '-n'], help='Name of the recovery point. You can use the backup recovery point list command to get the name of a backed up item.', id_part='child_name_4')
c.argument('backup_management_type', backup_management_type)
c.argument('container_name', container_name_type)
c.argument('workload_type', workload_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to show recoverypoints in secondary region.')
# Protection
with self.argument_context('backup protection') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('vm', help='Name or ID of the Virtual Machine to be protected.')
c.argument('policy_name', policy_name_type)
# TODO: Need to use item.id once https://github.com/Azure/msrestazure-for-python/issues/80 is fixed.
for command in ['backup-now', 'disable', 'resume', 'undelete', 'update-for-vm']:
with self.argument_context('backup protection ' + command) as c:
c.argument('container_name', container_name_type, id_part='child_name_2')
c.argument('item_name', item_name_type, id_part='child_name_3')
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
c.argument('enable_compression', arg_type=get_three_state_flag(), help='Option to enable compression')
c.argument('backup_type', help=backup_type_help, options_list=['--backup-type'])
with self.argument_context('backup protection backup-now') as c:
c.argument('retain_until', type=datetime_type, help=retain_until_help)
with self.argument_context('backup protection disable') as c:
c.argument('delete_backup_data', arg_type=get_three_state_flag(), help='Option to delete existing backed up data in the Recovery services vault.')
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
with self.argument_context('backup protection check-vm') as c:
c.argument('vm_id', help='ID of the virtual machine to be checked for protection.', deprecate_info=c.deprecate(redirect='--vm', hide=True))
with self.argument_context('backup protection enable-for-vm') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('diskslist', diskslist_type)
c.argument('disk_list_setting', arg_type=get_enum_type(['include', 'exclude']), options_list=['--disk-list-setting'], help=disk_list_setting_help)
c.argument('exclude_all_data_disks', arg_type=get_three_state_flag(), help='Option to specify to backup OS disk only.')
with self.argument_context('backup protection update-for-vm') as c:
c.argument('diskslist', diskslist_type)
c.argument('disk_list_setting', arg_type=get_enum_type(['include', 'exclude', 'resetexclusionsettings']), options_list=['--disk-list-setting'], help=disk_list_setting_help)
c.argument('exclude_all_data_disks', arg_type=get_three_state_flag(), help='Option to specify to backup OS disk only.')
with self.argument_context('backup protection enable-for-azurefileshare') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('azure_file_share', options_list=['--azure-file-share'], help='Name of the Azure FileShare.')
c.argument('storage_account', options_list=['--storage-account'], help='Name of the Storage Account of the FileShare.')
for command in ["enable-for-azurewl", "auto-enable-for-azurewl", 'auto-disable-for-azurewl']:
with self.argument_context('backup protection ' + command) as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('protectable_item_type', protectable_item_type)
c.argument('protectable_item_name', protectable_item_name_type)
c.argument('server_name', options_list=['--server-name'], help='Parent Server name of the item.')
c.argument('workload_type', workload_type)
# Protectable-item
with self.argument_context('backup protectable-item') as c:
c.argument('vault_name', vault_name_type)
c.argument('workload_type', azure_workload_type)
c.argument('container_name', container_name_type)
with self.argument_context('backup protectable-item show') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('name', options_list=['--name'], help='Name of the protectable item.', id_part='child_name_3')
c.argument('server_name', options_list=['--server-name'], help='Parent Server name of the item.')
c.argument('protectable_item_type', protectable_item_type)
with self.argument_context('backup protectable-item list') as c:
c.argument('server_name', options_list=['--server-name'], help='Parent Server name of the item.')
c.argument('protectable_item_type', protectable_item_type)
c.argument('backup_management_type', backup_management_type)
# Restore
# TODO: Need to use recovery_point.id once https://github.com/Azure/msrestazure-for-python/issues/80 is fixed.
with self.argument_context('backup restore') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('container_name', container_name_type, id_part='child_name_2')
c.argument('item_name', item_name_type, id_part='child_name_3')
c.argument('rp_name', rp_name_type, id_part='child_name_4')
with self.argument_context('backup restore restore-disks') as c:
c.argument('storage_account', help='Name or ID of the staging storage account. The VM configuration will be restored to this storage account. See the help for --restore-to-staging-storage-account parameter for more info.')
c.argument('restore_to_staging_storage_account', arg_type=get_three_state_flag(), help='Use this flag when you want disks to be restored to the staging storage account using the --storage-account parameter. When not specified, disks will be restored to their original storage accounts. Default: false.')
c.argument('target_resource_group', options_list=['--target-resource-group', '-t'], help='Use this to specify the target resource group in which the restored disks will be saved')
c.argument('diskslist', diskslist_type)
c.argument('restore_only_osdisk', arg_type=get_three_state_flag(), help='Use this flag to restore only OS disks of a backed up VM.')
c.argument('restore_as_unmanaged_disks', arg_type=get_three_state_flag(), help='Use this flag to specify to restore as unmanaged disks')
c.argument('use_secondary_region', action='store_true', help='Use this flag to restore from a recoverypoint in secondary region.')
c.argument('rehydration_duration', type=int, help='Set the maximum time, in days (between 10-30, both inclusive) for which the recovery point stays in hydrated state.')
c.argument('rehydration_priority', rehyd_priority_type)
c.argument('disk_encryption_set_id', options_list=['--disk-encryption-set-id'], help='The disk encryption set id is used for encrypting restored disks. Please ensure access to disk encryption set id that is specified here.')
c.argument('mi_system_assigned', action='store_true', help='Use this flag to specify whether a system-assigned managed identity should be used for the restore operation. MI option is not applicable for restoring unmanaged disks.')
c.argument('mi_user_assigned', help='ARM ID of the user-assigned managed identity to use for the restore operation. Specify a value for this parameter if you do not want to use a system-assigned MI for restoring the backup item.')
c.argument('target_zone', arg_type=get_enum_type(['1', '2', '3']), help='Use this to specify the target zone number where the disks will be restored.')
with self.argument_context('backup restore restore-azurefileshare') as c:
c.argument('resolve_conflict', resolve_conflict_type)
c.argument('restore_mode', restore_mode_type)
c.argument('target_file_share', options_list=['--target-file-share'], help='Destination file share to which content will be restored')
c.argument('target_folder', options_list=['--target-folder'], help='Destination folder to which content will be restored. To restore content to root , leave the folder name empty')
c.argument('target_storage_account', options_list=['--target-storage-account'], help='Destination storage account to which content will be restored')
with self.argument_context('backup restore restore-azurefiles') as c:
c.argument('resolve_conflict', resolve_conflict_type)
c.argument('restore_mode', restore_mode_type)
c.argument('target_file_share', options_list=['--target-file-share'], help='Destination file share to which content will be restored')
c.argument('target_folder', options_list=['--target-folder'], help='Destination folder to which content will be restored. To restore content to root , leave the folder name empty')
c.argument('target_storage_account', options_list=['--target-storage-account'], help='Destination storage account to which content will be restored')
c.argument('source_file_type', arg_type=get_enum_type(['File', 'Directory']), options_list=['--source-file-type'], help='Specify the source file type to be selected')
c.argument('source_file_path', options_list=['--source-file-path'], nargs='+', help="""The absolute path of the file, to be restored within the file share, as a string. This path is the same path used in the 'az storage file download' or 'az storage file show' CLI commands.""")
with self.argument_context('backup restore restore-azurewl') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('recovery_config', options_list=['--recovery-config'], help="""Specify the recovery configuration of a backed up item. The configuration object can be obtained from 'backup recoveryconfig show' command.""")
c.argument('rehydration_duration', type=int, help='Set the maximum time, in days (between 10-30, both inclusive) for which the recovery point stays in hydrated state.')
c.argument('rehydration_priority', rehyd_priority_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to restore from a recoverypoint in secondary region.')
# Recoveryconfig
with self.argument_context('backup recoveryconfig show') as c:
c.argument('container_name', container_name_type, id_part='child_name_2')
c.argument('item_name', item_name_type, id_part='child_name_3')
c.argument('restore_mode', restore_mode_workload_type)
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('log_point_in_time', options_list=['--log-point-in-time'], help="""Specify the point-in-time which will be restored.""")
c.argument('rp_name', rp_name_type)
c.argument('target_item_name', options_list=['--target-item-name'], help="""Specify the target item name for the restore operation.""")
c.argument('target_server_type', target_server_type)
c.argument('target_server_name', options_list=['--target-server-name'], help="""Specify the parent server name of the target item.""")
c.argument('workload_type', azure_workload_type)
c.argument('target_container_name', target_container_name_type)
c.argument('from_full_rp_name', from_full_rp_type)
c.argument('filepath', filepath_type)
c.argument('backup_management_type', backup_management_type)
c.argument('target_resource_group', options_list=['--target-resource-group'], help="""Specify the resource group of target item for Cross Region Restore. Default value will be same as --resource-group if not specified.""")
c.argument('target_vault_name', options_list=['--target-vault-name'], help="""Specify the vault name of target item for Cross Region Restore. Default value will be same as --vault-name if not specified.""")
# Job
with self.argument_context('backup job') as c:
c.argument('vault_name', vault_name_type, id_part='name')
# TODO: Need to use job.id once https://github.com/Azure/msrestazure-for-python/issues/80 is fixed.
for command in ['show', 'stop', 'wait']:
with self.argument_context('backup job ' + command) as c:
c.argument('name', job_name_type, help='Name of the job. You can use the backup job list command to get the name of a job.', id_part='child_name_1')
c.argument('use_secondary_region', action='store_true', help='Use this flag to show recoverypoints in secondary region.')
with self.argument_context('backup job list') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('status', arg_type=get_enum_type(['Cancelled', 'Completed', 'CompletedWithWarnings', 'Failed', 'InProgress']), help='Status of the Job.')
c.argument('operation', arg_type=get_enum_type(['Backup', 'ConfigureBackup', 'DeleteBackupData', 'DisableBackup', 'Restore']), help='User initiated operation.')
c.argument('start_date', type=datetime_type, help='The start date of the range in UTC (d-m-Y).')
c.argument('end_date', type=datetime_type, help='The end date of the range in UTC (d-m-Y).')
c.argument('backup_management_type', extended_backup_management_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to show recoverypoints in secondary region.')
with self.argument_context('backup job wait') as c:
c.argument('timeout', type=int, help='Maximum time, in seconds, to wait before aborting.')
| def load_arguments(self, _):
with self.argument_context('backup') as c:
c.argument('force', action='store_true', help='Force completion of the requested action.')
# Vault
with self.argument_context('backup vault') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
c.argument('location', validator=get_default_location_from_resource_group)
with self.argument_context('backup vault create') as c:
c.argument('tags', arg_type=tags_type)
with self.argument_context('backup vault backup-properties set') as c:
c.argument('backup_storage_redundancy', arg_type=get_enum_type(['GeoRedundant', 'LocallyRedundant', 'ZoneRedundant']), help='Set backup storage properties for a Recovery Services vault.')
c.argument('soft_delete_feature_state', arg_type=get_enum_type(['Enable', 'Disable']), help='Set soft-delete feature state for a Recovery Services Vault.')
c.argument('cross_region_restore_flag', arg_type=get_enum_type(['True', 'False']), help='Set cross-region-restore feature state for a Recovery Services Vault. Default: False.')
# Identity
with self.argument_context('backup vault identity assign') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
c.argument('system_assigned', system_assigned_type)
c.argument('user_assigned', user_assigned_type)
with self.argument_context('backup vault identity remove') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
c.argument('system_assigned', system_assigned_remove_type)
c.argument('user_assigned', user_assigned_remove_type)
with self.argument_context('backup vault identity show') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
# Encryption
with self.argument_context('backup vault encryption') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
with self.argument_context('backup vault encryption update') as c:
c.argument('encryption_key_id', encryption_key_id_type)
c.argument('infrastructure_encryption', infrastructure_encryption_type)
c.argument('mi_user_assigned', mi_user_assigned_type)
c.argument('mi_system_assigned', mi_system_assigned_type)
with self.argument_context('backup vault encryption show') as c:
c.argument('vault_name', vault_name_type, options_list=['--name', '-n'], id_part='name')
# Container
with self.argument_context('backup container') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.ignore('status')
with self.argument_context('backup container show') as c:
c.argument('name', container_name_type, options_list=['--name', '-n'], help='Name of the container. You can use the backup container list command to get the name of a container.', id_part='child_name_2')
c.argument('backup_management_type', extended_backup_management_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to show container in secondary region.')
with self.argument_context('backup container list') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('backup_management_type', extended_backup_management_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to list containers in secondary region.')
with self.argument_context('backup container unregister') as c:
c.argument('backup_management_type', backup_management_type)
c.argument('container_name', container_name_type, id_part='child_name_2')
with self.argument_context('backup container re-register') as c:
c.argument('backup_management_type', backup_management_type)
c.argument('container_name', container_name_type, id_part='child_name_2')
c.argument('workload_type', azure_workload_type)
with self.argument_context('backup container register') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('backup_management_type', backup_management_type)
c.argument('resource_id', resource_id_type)
c.argument('workload_type', azure_workload_type)
# Item
with self.argument_context('backup item') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('container_name', container_name_type, id_part='child_name_2')
with self.argument_context('backup item show') as c:
c.argument('name', item_name_type, options_list=['--name', '-n'], help='Name of the backed up item. You can use the backup item list command to get the name of a backed up item.', id_part='child_name_3')
c.argument('backup_management_type', extended_backup_management_type)
c.argument('workload_type', workload_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to show item in secondary region.')
# TODO: Need to use item.id once https://github.com/Azure/msrestazure-for-python/issues/80 is fixed.
with self.argument_context('backup item set-policy') as c:
c.argument('item_name', item_name_type, options_list=['--name', '-n'], help='Name of the backed up item. You can use the backup item list command to get the name of a backed up item.', id_part='child_name_3')
c.argument('policy_name', policy_name_type, help='Name of the Backup policy. You can use the backup policy list command to get the name of a backup policy.')
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
with self.argument_context('backup item list') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('backup_management_type', extended_backup_management_type)
c.argument('workload_type', workload_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to list items in secondary region.')
# Policy
with self.argument_context('backup policy') as c:
c.argument('vault_name', vault_name_type, id_part='name')
for command in ['show', 'delete', 'list-associated-items']:
with self.argument_context('backup policy ' + command) as c:
c.argument('name', policy_name_type, options_list=['--name', '-n'], help='Name of the backup policy. You can use the backup policy list command to get the name of a policy.', id_part='child_name_1')
with self.argument_context('backup policy list-associated-items') as c:
c.argument('backup_management_type', backup_management_type)
with self.argument_context('backup policy set') as c:
c.argument('policy', type=file_type, help='JSON encoded policy definition. Use the show command with JSON output to obtain a policy object. Modify the values using a file editor and pass the object.', completer=FilesCompleter())
c.argument('name', options_list=['--name', '-n'], help='Name of the Policy.', id_part='child_name_1')
c.argument('fix_for_inconsistent_items', arg_type=get_three_state_flag(), options_list=['--fix-for-inconsistent-items'], help='Specify whether or not to retry Policy Update for failed items.')
c.argument('backup_management_type', backup_management_type)
with self.argument_context('backup policy create') as c:
c.argument('policy', type=file_type, help='JSON encoded policy definition. Use the show command with JSON output to obtain a policy object. Modify the values using a file editor and pass the object.', completer=FilesCompleter())
c.argument('name', options_list=['--name', '-n'], help='Name of the Policy.')
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
with self.argument_context('backup policy list') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
with self.argument_context('backup policy get-default-for-vm') as c:
c.argument('vault_name', vault_name_type, id_part=None)
# Recovery Point
# TODO: Need to use item.id once https://github.com/Azure/msrestazure-for-python/issues/80 is fixed.
with self.argument_context('backup recoverypoint') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('container_name', container_name_type, id_part='child_name_2')
c.argument('item_name', item_name_type, id_part='child_name_3')
with self.argument_context('backup recoverypoint list') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('start_date', type=datetime_type, help='The start date of the range in UTC (d-m-Y).')
c.argument('end_date', type=datetime_type, help='The end date of the range in UTC (d-m-Y).')
c.argument('backup_management_type', backup_management_type)
c.argument('container_name', container_name_type)
c.argument('workload_type', workload_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to list recoverypoints in secondary region.')
c.argument('is_ready_for_move', arg_type=get_three_state_flag(), help='Use this flag to retrieve the recoverypoints that are ready to be moved to destination-tier.')
c.argument('target_tier', target_tier_type)
c.argument('tier', tier_type)
c.argument('recommended_for_archive', action="store_true", help='Use this flag to retrieve recommended archivable recoverypoints.')
with self.argument_context('backup recoverypoint move') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('container_name', container_name_type)
c.argument('rp_name', rp_name_type, options_list=['--name', '-n'], id_part='child_name_4')
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
c.argument('source_tier', help='The source tier from which a particular recovery point has to be moved.', arg_type=get_enum_type(['VaultStandard']), options_list=['--source-tier'])
c.argument('destination_tier', help=target_tier_help, arg_type=get_enum_type(['VaultArchive']), options_list=['--destination-tier'])
with self.argument_context('backup recoverypoint show-log-chain') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('start_date', type=datetime_type, help='The start date of the range in UTC (d-m-Y).')
c.argument('end_date', type=datetime_type, help='The end date of the range in UTC (d-m-Y).')
c.argument('backup_management_type', backup_management_type)
c.argument('container_name', container_name_type)
c.argument('workload_type', workload_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to list recoverypoints in secondary region.')
with self.argument_context('backup recoverypoint show') as c:
c.argument('name', rp_name_type, options_list=['--name', '-n'], help='Name of the recovery point. You can use the backup recovery point list command to get the name of a backed up item.', id_part='child_name_4')
c.argument('backup_management_type', backup_management_type)
c.argument('container_name', container_name_type)
c.argument('workload_type', workload_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to show recoverypoints in secondary region.')
# Protection
with self.argument_context('backup protection') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('vm', help='Name or ID of the Virtual Machine to be protected.')
c.argument('policy_name', policy_name_type)
# TODO: Need to use item.id once https://github.com/Azure/msrestazure-for-python/issues/80 is fixed.
for command in ['backup-now', 'disable', 'resume', 'undelete', 'update-for-vm']:
with self.argument_context('backup protection ' + command) as c:
c.argument('container_name', container_name_type, id_part='child_name_2')
c.argument('item_name', item_name_type, id_part='child_name_3')
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
c.argument('enable_compression', arg_type=get_three_state_flag(), help='Option to enable compression')
c.argument('backup_type', help=backup_type_help, options_list=['--backup-type'])
with self.argument_context('backup protection backup-now') as c:
c.argument('retain_until', type=datetime_type, help=retain_until_help)
with self.argument_context('backup protection disable') as c:
c.argument('delete_backup_data', arg_type=get_three_state_flag(), help='Option to delete existing backed up data in the Recovery services vault.')
c.argument('backup_management_type', backup_management_type)
c.argument('workload_type', workload_type)
with self.argument_context('backup protection check-vm') as c:
c.argument('vm_id', help='ID of the virtual machine to be checked for protection.', deprecate_info=c.deprecate(redirect='--vm', hide=True))
with self.argument_context('backup protection enable-for-vm') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('diskslist', diskslist_type)
c.argument('disk_list_setting', arg_type=get_enum_type(['include', 'exclude']), options_list=['--disk-list-setting'], help=disk_list_setting_help)
c.argument('exclude_all_data_disks', arg_type=get_three_state_flag(), help='Option to specify to backup OS disk only.')
with self.argument_context('backup protection update-for-vm') as c:
c.argument('diskslist', diskslist_type)
c.argument('disk_list_setting', arg_type=get_enum_type(['include', 'exclude', 'resetexclusionsettings']), options_list=['--disk-list-setting'], help=disk_list_setting_help)
c.argument('exclude_all_data_disks', arg_type=get_three_state_flag(), help='Option to specify to backup OS disk only.')
with self.argument_context('backup protection enable-for-azurefileshare') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('azure_file_share', options_list=['--azure-file-share'], help='Name of the Azure FileShare.')
c.argument('storage_account', options_list=['--storage-account'], help='Name of the Storage Account of the FileShare.')
for command in ["enable-for-azurewl", "auto-enable-for-azurewl", 'auto-disable-for-azurewl']:
with self.argument_context('backup protection ' + command) as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('protectable_item_type', protectable_item_type)
c.argument('protectable_item_name', protectable_item_name_type)
c.argument('server_name', options_list=['--server-name'], help='Parent Server name of the item.')
c.argument('workload_type', workload_type)
# Protectable-item
with self.argument_context('backup protectable-item') as c:
c.argument('vault_name', vault_name_type)
c.argument('workload_type', azure_workload_type)
c.argument('container_name', container_name_type)
with self.argument_context('backup protectable-item show') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('name', options_list=['--name'], help='Name of the protectable item.', id_part='child_name_3')
c.argument('server_name', options_list=['--server-name'], help='Parent Server name of the item.')
c.argument('protectable_item_type', protectable_item_type)
with self.argument_context('backup protectable-item list') as c:
c.argument('server_name', options_list=['--server-name'], help='Parent Server name of the item.')
c.argument('protectable_item_type', protectable_item_type)
c.argument('backup_management_type', backup_management_type)
# Restore
# TODO: Need to use recovery_point.id once https://github.com/Azure/msrestazure-for-python/issues/80 is fixed.
with self.argument_context('backup restore') as c:
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('container_name', container_name_type, id_part='child_name_2')
c.argument('item_name', item_name_type, id_part='child_name_3')
c.argument('rp_name', rp_name_type, id_part='child_name_4')
with self.argument_context('backup restore restore-disks') as c:
c.argument('storage_account', help='Name or ID of the staging storage account. The VM configuration will be restored to this storage account. See the help for --restore-to-staging-storage-account parameter for more info.')
c.argument('restore_to_staging_storage_account', arg_type=get_three_state_flag(), help='Use this flag when you want disks to be restored to the staging storage account using the --storage-account parameter. When not specified, disks will be restored to their original storage accounts. Default: false.')
c.argument('target_resource_group', options_list=['--target-resource-group', '-t'], help='Use this to specify the target resource group in which the restored disks will be saved')
c.argument('diskslist', diskslist_type)
c.argument('restore_only_osdisk', arg_type=get_three_state_flag(), help='Use this flag to restore only OS disks of a backed up VM.')
c.argument('restore_as_unmanaged_disks', arg_type=get_three_state_flag(), help='Use this flag to specify to restore as unmanaged disks')
c.argument('use_secondary_region', action='store_true', help='Use this flag to restore from a recoverypoint in secondary region.')
c.argument('rehydration_duration', type=int, help='Set the maximum time, in days (between 10-30, both inclusive) for which the recovery point stays in hydrated state.')
c.argument('rehydration_priority', rehyd_priority_type)
c.argument('disk_encryption_set_id', options_list=['--disk-encryption-set-id'], help='The disk encryption set id is used for encrypting restored disks. Please ensure access to disk encryption set id that is specified here.')
c.argument('mi_system_assigned', action='store_true', help='Use this flag to specify whether a system-assigned managed identity should be used for the restore operation. MI option is not applicable for restoring unmanaged disks.')
c.argument('mi_user_assigned', help='ARM ID of the user-assigned managed identity to use for the restore operation. Specify a value for this parameter if you do not want to use a system-assigned MI for restoring the backup item.')
c.argument('target_zone', arg_type=get_enum_type(['1', '2', '3']), help='Use this to specify the target zone number where the disks will be restored.')
with self.argument_context('backup restore restore-azurefileshare') as c:
c.argument('resolve_conflict', resolve_conflict_type)
c.argument('restore_mode', restore_mode_type)
c.argument('target_file_share', options_list=['--target-file-share'], help='Destination file share to which content will be restored')
c.argument('target_folder', options_list=['--target-folder'], help='Destination folder to which content will be restored. To restore content to root , leave the folder name empty')
c.argument('target_storage_account', options_list=['--target-storage-account'], help='Destination storage account to which content will be restored')
with self.argument_context('backup restore restore-azurefiles') as c:
c.argument('resolve_conflict', resolve_conflict_type)
c.argument('restore_mode', restore_mode_type)
c.argument('target_file_share', options_list=['--target-file-share'], help='Destination file share to which content will be restored')
c.argument('target_folder', options_list=['--target-folder'], help='Destination folder to which content will be restored. To restore content to root , leave the folder name empty')
c.argument('target_storage_account', options_list=['--target-storage-account'], help='Destination storage account to which content will be restored')
c.argument('source_file_type', arg_type=get_enum_type(['File', 'Directory']), options_list=['--source-file-type'], help='Specify the source file type to be selected')
c.argument('source_file_path', options_list=['--source-file-path'], nargs='+', help="""The absolute path of the file, to be restored within the file share, as a string. This path is the same path used in the 'az storage file download' or 'az storage file show' CLI commands.""")
with self.argument_context('backup restore restore-azurewl') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('recovery_config', options_list=['--recovery-config'], help="""Specify the recovery configuration of a backed up item. The configuration object can be obtained from 'backup recoveryconfig show' command.""")
c.argument('rehydration_duration', type=int, help='Set the maximum time, in days (between 10-30, both inclusive) for which the recovery point stays in hydrated state.')
c.argument('rehydration_priority', rehyd_priority_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to restore from a recoverypoint in secondary region.')
# Recoveryconfig
with self.argument_context('backup recoveryconfig show') as c:
c.argument('container_name', container_name_type, id_part='child_name_2')
c.argument('item_name', item_name_type, id_part='child_name_3')
c.argument('restore_mode', restore_mode_workload_type)
c.argument('vault_name', vault_name_type, id_part='name')
c.argument('log_point_in_time', options_list=['--log-point-in-time'], help="""Specify the point-in-time which will be restored.""")
c.argument('rp_name', rp_name_type)
c.argument('target_item_name', options_list=['--target-item-name'], help="""Specify the target item name for the restore operation.""")
c.argument('target_server_type', target_server_type)
c.argument('target_server_name', options_list=['--target-server-name'], help="""Specify the parent server name of the target item.""")
c.argument('workload_type', azure_workload_type)
c.argument('target_container_name', target_container_name_type)
c.argument('from_full_rp_name', from_full_rp_type)
c.argument('filepath', filepath_type)
c.argument('backup_management_type', backup_management_type)
c.argument('target_resource_group', options_list=['--target-resource-group'], help="""Specify the resource group of target item for Cross Region Restore. Default value will be same as --resource-group if not specified.""")
c.argument('target_vault_name', options_list=['--target-vault-name'], help="""Specify the vault name of target item for Cross Region Restore. Default value will be same as --vault-name if not specified.""")
# Job
with self.argument_context('backup job') as c:
c.argument('vault_name', vault_name_type, id_part='name')
# TODO: Need to use job.id once https://github.com/Azure/msrestazure-for-python/issues/80 is fixed.
for command in ['show', 'stop', 'wait']:
with self.argument_context('backup job ' + command) as c:
c.argument('name', job_name_type, help='Name of the job. You can use the backup job list command to get the name of a job.', id_part='child_name_1')
c.argument('use_secondary_region', action='store_true', help='Use this flag to show recoverypoints in secondary region.')
with self.argument_context('backup job list') as c:
c.argument('vault_name', vault_name_type, id_part=None)
c.argument('status', arg_type=get_enum_type(['Cancelled', 'Completed', 'CompletedWithWarnings', 'Failed', 'InProgress']), help='Status of the Job.')
c.argument('operation', arg_type=get_enum_type(['Backup', 'ConfigureBackup', 'DeleteBackupData', 'DisableBackup', 'Restore']), help='User initiated operation.')
c.argument('start_date', type=datetime_type, help='The start date of the range in UTC (d-m-Y).')
c.argument('end_date', type=datetime_type, help='The end date of the range in UTC (d-m-Y).')
c.argument('backup_management_type', extended_backup_management_type)
c.argument('use_secondary_region', action='store_true', help='Use this flag to show recoverypoints in secondary region.')
with self.argument_context('backup job wait') as c:
c.argument('timeout', type=int, help='Maximum time, in seconds, to wait before aborting.')
|
47,815 | def test__templater_jinja_dynamic_variable_no_violations():
"""Test no templater violation for variable defined within template."""
t = JinjaTemplater(override_context=dict(blah="foo"))
instr = """{% if True %}
{% set some_var %}1{% endset %}
SELECT {{some_var}}
{% endif %}
"""
outstr, vs = t.process(in_str=instr, fname="test", config=FluffConfig())
assert str(outstr) == "\n \n SELECT 1\n\n"
# Check we have violations.
assert not len(vs) > 0
| def test__templater_jinja_dynamic_variable_no_violations():
"""Test no templater violation for variable defined within template."""
t = JinjaTemplater(override_context=dict(blah="foo"))
instr = """{% if True %}
{% set some_var %}1{% endset %}
SELECT {{some_var}}
{% endif %}
"""
outstr, vs = t.process(in_str=instr, fname="test", config=FluffConfig())
assert str(outstr) == "\n \n SELECT 1\n\n"
# Check we have no violations.
assert not len(vs) > 0
|
30,668 | def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
base_url = params.get('url')
insecure = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
demisto.info(f'Command being called is {command}')
try:
client = Client(
base_url=base_url,
verify=insecure,
proxy=proxy,
)
commands: Dict[
str, Callable[[Client, Dict[str, str], Dict[str, str]], Tuple[str, Dict[Any, Any], Dict[Any, Any]]]
] = {
'test-module': test_module,
'azure-ad-health-get-indicators': get_indicators_command
}
if command in commands:
return_outputs(*commands[command](client, demisto.params(), demisto.args()))
elif command == 'fetch-indicators':
indicators = fetch_indicators_command(client, demisto.params())
for iter_ in batch(indicators, batch_size=2000):
demisto.createIndicators(iter_)
else:
raise NotImplementedError(f'Command {command} is not implemented.')
except Exception as err:
err_msg = f'Error in {INTEGRATION_NAME} Integration. [{err}]'
return_error(err_msg)
| def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
base_url = params.get('url')
insecure = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
demisto.info(f'Command being called in {INTEGRATION_NAME} is {command}')
try:
client = Client(
base_url=base_url,
verify=insecure,
proxy=proxy,
)
commands: Dict[
str, Callable[[Client, Dict[str, str], Dict[str, str]], Tuple[str, Dict[Any, Any], Dict[Any, Any]]]
] = {
'test-module': test_module,
'azure-ad-health-get-indicators': get_indicators_command
}
if command in commands:
return_outputs(*commands[command](client, demisto.params(), demisto.args()))
elif command == 'fetch-indicators':
indicators = fetch_indicators_command(client, demisto.params())
for iter_ in batch(indicators, batch_size=2000):
demisto.createIndicators(iter_)
else:
raise NotImplementedError(f'Command {command} is not implemented.')
except Exception as err:
err_msg = f'Error in {INTEGRATION_NAME} Integration. [{err}]'
return_error(err_msg)
|
31,360 | def call_types_command(client: Client) -> CommandResults:
"""Get call types list from TOPdesk"""
call_types = client.get_list("/incidents/call_types")
return command_with_all_fields_readable_list(results=call_types,
result_name='call types',
output_prefix='callType',
outputs_key_field='id')
| def call_types_command(client: Client) -> CommandResults:
"""Get call types list from TOPdesk"""
call_types = client.get_list("/incidents/call_types")
return command_with_all_fields_readable_list(results=call_types,
result_name='call types',
output_prefix='CallType',
outputs_key_field='id')
|
58,326 | def linspace_int(Nx, Ny, periodic=True):
"""Provide a range of `Ny` equispaced integers between `0` and `Nx-1`.
Parameters
----------
Nx: int
Range of integers
Ny: int
Number of integers
periodic: bool, optional
Whether the vector is periodic.
Determines if the Nx == 0.
Default: True
Returns
-------
vector
Generated vectors.
Examples
--------
>>> linspace_int(10, 10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> linspace_int(10, 4)
array([0, 2, 5, 7])
>>> linspace_int(10, 5)
array([0, 2, 4, 6, 8])
>>>
"""
if periodic:
jj = np.linspace(0, Nx, Ny+1)[:-1]
else:
jj = np.linspace(0, Nx-1, Ny)
jj = jj.astype(int)
return jj
| def linspace_int(Nx, Ny, periodic=True):
"""Provide a range of `Ny` equispaced integers between `0` and `Nx-1`.
Parameters
----------
Nx: int
Range of integers
Ny: int
Number of integers
periodic: bool, optional
Whether the vector is periodic.
Determines if the Nx == 0.
Default: True
Returns
-------
vector
Examples
--------
>>> linspace_int(10, 10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> linspace_int(10, 4)
array([0, 2, 5, 7])
>>> linspace_int(10, 5)
array([0, 2, 4, 6, 8])
>>>
"""
if periodic:
jj = np.linspace(0, Nx, Ny+1)[:-1]
else:
jj = np.linspace(0, Nx-1, Ny)
jj = jj.astype(int)
return jj
|
35,597 | def _apply_grid_transform(img: Tensor, grid: Tensor, mode: str, fill: Optional[int]) -> Tensor:
img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, grid.dtype)
if img.shape[0] > 1:
# Apply same grid to a batch of images
grid = grid.expand(img.shape[0], grid.shape[1], grid.shape[2], grid.shape[3])
if fill is None:
fill = 0
# Append a dummy mask for customized fill colors, should be faster than grid_sample() twice
need_mask = False
if fill != 0:
need_mask = True
dummy = torch.ones((img.shape[0], 1, img.shape[2], img.shape[3]), dtype=img.dtype, device=img.device)
img = torch.cat((img, dummy), dim=1)
img = grid_sample(img, grid, mode=mode, padding_mode="zeros", align_corners=False)
# Fill with required color
if need_mask:
mask = img[:, -1, :, :] < 0.5 # Safer, but linear interpolations should not create numbers other than 0/1
img = img[:, :-1, :, :]
img[mask.expand_as(img)] = fill
img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype)
return img
| def _apply_grid_transform(img: Tensor, grid: Tensor, mode: str, fillcolor: Optional[int]) -> Tensor:
img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, grid.dtype)
if img.shape[0] > 1:
# Apply same grid to a batch of images
grid = grid.expand(img.shape[0], grid.shape[1], grid.shape[2], grid.shape[3])
if fill is None:
fill = 0
# Append a dummy mask for customized fill colors, should be faster than grid_sample() twice
need_mask = False
if fill != 0:
need_mask = True
dummy = torch.ones((img.shape[0], 1, img.shape[2], img.shape[3]), dtype=img.dtype, device=img.device)
img = torch.cat((img, dummy), dim=1)
img = grid_sample(img, grid, mode=mode, padding_mode="zeros", align_corners=False)
# Fill with required color
if need_mask:
mask = img[:, -1, :, :] < 0.5 # Safer, but linear interpolations should not create numbers other than 0/1
img = img[:, :-1, :, :]
img[mask.expand_as(img)] = fill
img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype)
return img
|
44,313 | def taper_operation(
operation, generators, paulixops, paulix_sector, wire_order, op_wires=None, op_gen=None
):
r"""Transform a gate operation with a Clifford operator and then taper qubits.
The qubit operator for the generator of the gate operation is computed either internally or can be provided
manually via `op_gen` argument. If this operator commutes with all the :math:`\mathbb{Z}_2` symmetries of
the molecular Hamiltonian, then this operator is transformed using the Clifford operators :math:`U` and
tapered; otherwise it is discarded. Finally, the tapered generator is exponentiated using :func:`~.PauliRot`
for building the tapered unitary.
Args:
operation (Operation or Callable): qubit operation to be tapered, or a function that applies that operation
generators (list[Hamiltonian]): generators expressed as PennyLane Hamiltonians
paulixops (list[Operation]): list of single-qubit Pauli-X operators
paulix_sector (list[int]): eigenvalues of the Pauli-X operators
wire_order (Sequence[Any]): order of the wires in the quantum circuit
op_wires (Sequence[Any]): optional argument to specify wires for the operation in case the provided operation is a callable
op_gen (Hamiltonian or Callable): optional argument to give the generator of the operation, or a function that returns it
Returns:
list(Operation): list of operations of type :func:`~.PauliRot` implementing tapered unitary operation
Raises:
ValueError: optional argument `op_wires` is not provided when the provided operation is a callable
TypeError: optional argument `op_gen` is a callable but does not have 'wires' as its only keyword argument
NotImplementedError: generator of the operation cannot be constructed internally
ValueError: optional argument `op_gen` is either not a :class:`~.pennylane.Hamiltonian` or a valid generator of the operation
**Example**
Given an operation, ``qml.taper_operation`` can taper it using the symmetries of the Hamiltonian:
>>> symbols, geometry = ['He', 'H'], np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.4589]])
>>> mol = qchem.Molecule(symbols, geometry, charge=1)
>>> H, n_qubits = qchem.molecular_hamiltonian(symbols, geometry)
>>> generators = qchem.symmetry_generators(H)
>>> paulixops = qchem.paulix_ops(generators, n_qubits)
>>> paulix_sector = qchem.optimal_sector(H, genera tors, mol.n_electrons)
>>> qchem.taper_operation(qml.SingleExcitation(3.14159, wires=[0, 2]), generators,
paulixops, paulix_sector, wire_order=H.wires)
[Exp(1.570795j, 'PauliY', wires=[0])]
Alternatively, it can also be used with the functional form of the operation:
>>> tap_op = qchem.taper_operation(qml.SingleExcitation, generators, paulixops,
paulix_sector, wire_order=H.wires, op_wires=[0, 2])
>>> tap_op(3.14159)
[Exp(1.570795j, 'PauliY', wires=[0])]
Both the cases can be used within a :class:`~.pennylane.QNode`:
>>> dev = qml.device('default.qubit', wires=[0, 1])
>>> @qml.qnode(dev)
... def circuit(params):
... qchem.taper_operation(qml.SingleExcitation, generators, paulixops,
... paulix_sector, wire_order=H.wires, op_wires=[0, 2])(3.14159)
... qchem.taper_operation(qml.DoubleExcitation(params[0], wires=[0, 1, 2, 3]),
... generators, paulixops, paulix_sector, H.wires)
... return qml.expval(qml.PauliZ(0)@qml.PauliZ(1))
>>> drawer = qml.draw(circuit, show_all_wires=True)
>>> print(drawer(params=[3.14159]))
0: ─ExpY(1.570795j)-╭ExpXY(0-0.7853975j)─╭ExpYX(0-0.7853975j)─┤ ╭<Z@Z>
1: ─----------------╰ExpXY(0-0.7853975j)─╰ExpYX(0-0.7853975j)─┤ ╰<Z@Z>
.. details::
:title: Theory
Consider :math:`G` to be the generator of a unitrary :math:`V(\theta)`, i.e.,
.. math::
V(\theta) = e^{i G \theta}.
Then, for :math:`V` to have a non-trivial and compatible tapering with the generators of symmetry
:math:`\tau`, we should have :math:`[V, \tau_i] = 0` for all :math:`\theta` and :math:`\tau_i`.
This would hold only when its generator itself commutes with each :math:`\tau_i`,
.. math::
[V, \tau_i] = 0 \iff [G, \tau_i]\quad \forall \theta, \tau_i.
By ensuring this, we can taper the generator :math:`G` using the Clifford operators :math:`U`,
and exponentiate the transformed generator :math:`G^{\prime}` to obtain a tapered unitary
:math:`V^{\prime}`,
.. math::
V^{\prime} \equiv e^{i U^{\dagger} G U \theta} = e^{i G^{\prime} \theta}.
"""
# get dummy objects in case functional form of operation or op_gen is being used
operation, op_gen, callable_op = _is_callable_operation(
operation, op_wires=op_wires, op_gen=op_gen
)
if op_gen is None:
if operation.num_params < 1: # Non-parameterized gates
gen_mat = 1j * scipy.linalg.logm(qml.matrix(operation, wire_order=wire_order))
op_gen = qml.Hamiltonian(
*qml.utils.decompose_hamiltonian(gen_mat, wire_order=wire_order, hide_identity=True)
)
qml.simplify(op_gen)
if op_gen.ops[0].label() == qml.Identity(wires=[wire_order[0]]).label():
op_gen -= qml.Hamiltonian([op_gen.coeffs[0]], [qml.Identity(wires=wire_order[0])])
else: # Single-parameter gates
try:
op_gen = qml.generator(operation, "hamiltonian")
except ValueError as exc:
raise NotImplementedError(
f"Generator for {operation} is not implemented, please provide it with 'op_gen' args."
) from exc
else: # check that user-provided generator is correct
if not isinstance(op_gen, qml.Hamiltonian):
raise ValueError(
f"Generator for the operation needs to be a qml.Hamiltonian, but got {type(op_gen)}."
)
coeffs = 1.0
if operation.parameters:
coeffs = functools.reduce(lambda i, j: i * j, operation.parameters)
mat1 = scipy.linalg.expm(1j * qml.matrix(op_gen, wire_order=wire_order) * coeffs)
mat2 = qml.matrix(operation, wire_order=wire_order)
phase = np.divide(mat1, mat2, out=np.zeros_like(mat1, dtype=complex), where=mat1 != 0)[
np.nonzero(np.round(mat1, 10))
]
if not np.allclose(phase / phase[0], np.ones(len(phase))): # check if the phase is global
raise ValueError(
f"Given op_gen: {op_gen} doesn't seem to be the correct generator for the {operation}."
)
if np.all(
[
[
qml.is_commuting(op1, op2)
for op1, op2 in itertools.product(generator.ops, op_gen.ops)
]
for generator in generators
]
):
gen_tapered = qml.taper(op_gen, generators, paulixops, paulix_sector)
else:
gen_tapered = qml.Hamiltonian([], [])
qml.simplify(gen_tapered)
def _tapered_op(params):
r"""Returns or applies the tapered operation for the given parameter value."""
if qml.QueuingManager.recording():
qml.QueuingManager.update_info(operation, owner=gen_tapered)
for coeff, op in zip(*gen_tapered.terms()):
qml.exp(op, 1j * params * coeff)
else:
ops_tapered = []
for coeff, op in zip(*gen_tapered.terms()):
ops_tapered.append(qml.exp(op, 1j * params * coeff))
return ops_tapered
if callable_op:
return _tapered_op
params = operation.parameters[0] if len(operation.parameters) else 1.0
return _tapered_op(params=params)
| def taper_operation(
operation, generators, paulixops, paulix_sector, wire_order, op_wires=None, op_gen=None
):
r"""Transform a gate operation with a Clifford operator and then taper qubits.
The qubit operator for the generator of the gate operation is computed either internally or can be provided
manually via `op_gen` argument. If this operator commutes with all the :math:`\mathbb{Z}_2` symmetries of
the molecular Hamiltonian, then this operator is transformed using the Clifford operators :math:`U` and
tapered; otherwise it is discarded. Finally, the tapered generator is exponentiated using :func:`~.PauliRot`
for building the tapered unitary.
Args:
operation (Operation or Callable): qubit operation to be tapered, or a function that applies that operation
generators (list[Hamiltonian]): generators expressed as PennyLane Hamiltonians
paulixops (list[Operation]): list of single-qubit Pauli-X operators
paulix_sector (list[int]): eigenvalues of the Pauli-X operators
wire_order (Sequence[Any]): order of the wires in the quantum circuit
op_wires (Sequence[Any]): optional argument to specify wires for the operation in case the provided operation is a callable
op_gen (Hamiltonian or Callable): optional argument to give the generator of the operation, or a function that returns it
Returns:
list(Operation): list of operations of type :func:`~.PauliRot` implementing tapered unitary operation
Raises:
ValueError: optional argument `op_wires` is not provided when the provided operation is a callable
TypeError: optional argument `op_gen` is a callable but does not have 'wires' as its only keyword argument
NotImplementedError: generator of the operation cannot be constructed internally
ValueError: optional argument `op_gen` is either not a :class:`~.pennylane.Hamiltonian` or a valid generator of the operation
**Example**
Given an operation, ``qml.taper_operation`` can taper it using the symmetries of the Hamiltonian:
>>> symbols, geometry = ['He', 'H'], np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.4589]])
>>> mol = qchem.Molecule(symbols, geometry, charge=1)
>>> H, n_qubits = qchem.molecular_hamiltonian(symbols, geometry)
>>> generators = qchem.symmetry_generators(H)
>>> paulixops = qchem.paulix_ops(generators, n_qubits)
>>> paulix_sector = qchem.optimal_sector(H, generators, mol.n_electrons)
>>> qchem.taper_operation(qml.SingleExcitation(3.14159, wires=[0, 2]), generators,
paulixops, paulix_sector, wire_order=H.wires)
[Exp(1.570795j, 'PauliY', wires=[0])]
Alternatively, it can also be used with the functional form of the operation:
>>> tap_op = qchem.taper_operation(qml.SingleExcitation, generators, paulixops,
paulix_sector, wire_order=H.wires, op_wires=[0, 2])
>>> tap_op(3.14159)
[Exp(1.570795j, 'PauliY', wires=[0])]
Both the cases can be used within a :class:`~.pennylane.QNode`:
>>> dev = qml.device('default.qubit', wires=[0, 1])
>>> @qml.qnode(dev)
... def circuit(params):
... qchem.taper_operation(qml.SingleExcitation, generators, paulixops,
... paulix_sector, wire_order=H.wires, op_wires=[0, 2])(3.14159)
... qchem.taper_operation(qml.DoubleExcitation(params[0], wires=[0, 1, 2, 3]),
... generators, paulixops, paulix_sector, H.wires)
... return qml.expval(qml.PauliZ(0)@qml.PauliZ(1))
>>> drawer = qml.draw(circuit, show_all_wires=True)
>>> print(drawer(params=[3.14159]))
0: ─ExpY(1.570795j)-╭ExpXY(0-0.7853975j)─╭ExpYX(0-0.7853975j)─┤ ╭<Z@Z>
1: ─----------------╰ExpXY(0-0.7853975j)─╰ExpYX(0-0.7853975j)─┤ ╰<Z@Z>
.. details::
:title: Theory
Consider :math:`G` to be the generator of a unitrary :math:`V(\theta)`, i.e.,
.. math::
V(\theta) = e^{i G \theta}.
Then, for :math:`V` to have a non-trivial and compatible tapering with the generators of symmetry
:math:`\tau`, we should have :math:`[V, \tau_i] = 0` for all :math:`\theta` and :math:`\tau_i`.
This would hold only when its generator itself commutes with each :math:`\tau_i`,
.. math::
[V, \tau_i] = 0 \iff [G, \tau_i]\quad \forall \theta, \tau_i.
By ensuring this, we can taper the generator :math:`G` using the Clifford operators :math:`U`,
and exponentiate the transformed generator :math:`G^{\prime}` to obtain a tapered unitary
:math:`V^{\prime}`,
.. math::
V^{\prime} \equiv e^{i U^{\dagger} G U \theta} = e^{i G^{\prime} \theta}.
"""
# get dummy objects in case functional form of operation or op_gen is being used
operation, op_gen, callable_op = _is_callable_operation(
operation, op_wires=op_wires, op_gen=op_gen
)
if op_gen is None:
if operation.num_params < 1: # Non-parameterized gates
gen_mat = 1j * scipy.linalg.logm(qml.matrix(operation, wire_order=wire_order))
op_gen = qml.Hamiltonian(
*qml.utils.decompose_hamiltonian(gen_mat, wire_order=wire_order, hide_identity=True)
)
qml.simplify(op_gen)
if op_gen.ops[0].label() == qml.Identity(wires=[wire_order[0]]).label():
op_gen -= qml.Hamiltonian([op_gen.coeffs[0]], [qml.Identity(wires=wire_order[0])])
else: # Single-parameter gates
try:
op_gen = qml.generator(operation, "hamiltonian")
except ValueError as exc:
raise NotImplementedError(
f"Generator for {operation} is not implemented, please provide it with 'op_gen' args."
) from exc
else: # check that user-provided generator is correct
if not isinstance(op_gen, qml.Hamiltonian):
raise ValueError(
f"Generator for the operation needs to be a qml.Hamiltonian, but got {type(op_gen)}."
)
coeffs = 1.0
if operation.parameters:
coeffs = functools.reduce(lambda i, j: i * j, operation.parameters)
mat1 = scipy.linalg.expm(1j * qml.matrix(op_gen, wire_order=wire_order) * coeffs)
mat2 = qml.matrix(operation, wire_order=wire_order)
phase = np.divide(mat1, mat2, out=np.zeros_like(mat1, dtype=complex), where=mat1 != 0)[
np.nonzero(np.round(mat1, 10))
]
if not np.allclose(phase / phase[0], np.ones(len(phase))): # check if the phase is global
raise ValueError(
f"Given op_gen: {op_gen} doesn't seem to be the correct generator for the {operation}."
)
if np.all(
[
[
qml.is_commuting(op1, op2)
for op1, op2 in itertools.product(generator.ops, op_gen.ops)
]
for generator in generators
]
):
gen_tapered = qml.taper(op_gen, generators, paulixops, paulix_sector)
else:
gen_tapered = qml.Hamiltonian([], [])
qml.simplify(gen_tapered)
def _tapered_op(params):
r"""Returns or applies the tapered operation for the given parameter value."""
if qml.QueuingManager.recording():
qml.QueuingManager.update_info(operation, owner=gen_tapered)
for coeff, op in zip(*gen_tapered.terms()):
qml.exp(op, 1j * params * coeff)
else:
ops_tapered = []
for coeff, op in zip(*gen_tapered.terms()):
ops_tapered.append(qml.exp(op, 1j * params * coeff))
return ops_tapered
if callable_op:
return _tapered_op
params = operation.parameters[0] if len(operation.parameters) else 1.0
return _tapered_op(params=params)
|
34,437 | def _log_stories(stories: List, filename: Text, out_directory: Text) -> None:
"""Take stories as a list of dicts."""
if not out_directory:
return
with open(
os.path.join(out_directory, filename), "w", encoding=DEFAULT_ENCODING
) as f:
if len(stories) == 0:
f.write("<!-- No stories found. -->")
for story in stories:
f.write(story.export_stories(include_source=True))
f.write("\n\n")
| def _log_stories(stories: List[DialogueStateTracker], filename: Text, out_directory: Text) -> None:
"""Take stories as a list of dicts."""
if not out_directory:
return
with open(
os.path.join(out_directory, filename), "w", encoding=DEFAULT_ENCODING
) as f:
if len(stories) == 0:
f.write("<!-- No stories found. -->")
for story in stories:
f.write(story.export_stories(include_source=True))
f.write("\n\n")
|
38,916 | def test_color_type():
class Model(BaseModel):
color: Color
model_schema = Model.schema()
assert model_schema == {
'title': 'Model',
'type': 'object',
'properties': {'color': {'title': 'Color', 'type': 'string'}},
'required': ['color'],
}
| def test_color_type():
class Model(BaseModel):
color: Color
model_schema = Model.schema()
assert model_schema == {
'title': 'Model',
'type': 'object',
'properties': {'color': {'title': 'Color', 'type': 'string', 'format': 'color'}},
'required': ['color'],
}
|
8,385 | def test_create_from_multidimensional_arrays():
"""
This is a test for a bug that was fixed by #283. It makes sure that
multidimensional flux arrays are handled properly when creating Spectrum1D
objects.
"""
freqs = np.arange(50) * u.GHz
flux = np.random.random((5, len(freqs))) * u.Jy
spec = Spectrum1D(spectral_axis=freqs, flux=flux)
assert (spec.frequency == freqs).all()
assert (spec.flux == flux).all()
# Mis-matched lengths should raise and exception
freqs = np.arange(50) * u.GHz
flux = np.random.random((5, len(freqs)-1)) * u.Jy
with pytest.raises(ValueError) as e_info:
spec = Spectrum1D(spectral_axis=freqs, flux=flux)
| def test_create_from_multidimensional_arrays():
"""
This is a test for a bug that was fixed by #283. It makes sure that
multidimensional flux arrays are handled properly when creating Spectrum1D
objects.
"""
freqs = np.arange(50) * u.GHz
flux = np.random.random((5, len(freqs))) * u.Jy
spec = Spectrum1D(spectral_axis=freqs, flux=flux)
assert (spec.frequency == freqs).all()
assert (spec.flux == flux).all()
# Mis-matched lengths should raise an exception
freqs = np.arange(50) * u.GHz
flux = np.random.random((5, len(freqs)-1)) * u.Jy
with pytest.raises(ValueError) as e_info:
spec = Spectrum1D(spectral_axis=freqs, flux=flux)
|
2,503 | def compute_optics_graph(
X, *, min_samples, max_eps, metric, p, metric_params, algorithm, leaf_size, n_jobs
):
"""Compute the OPTICS reachability graph.
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features), or \
(n_samples, n_samples) if metric=’precomputed’
A feature array, or array of distances between samples if
metric='precomputed'.
min_samples : int > 1 or float between 0 and 1
The number of samples in a neighborhood for a point to be considered
as a core point. Expressed as an absolute number or a fraction of the
number of samples (rounded to be at least 2).
max_eps : float, default=np.inf
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Default value of ``np.inf`` will
identify clusters across all scales; reducing ``max_eps`` will result
in shorter run times.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string. If metric is
"precomputed", X is assumed to be a distance matrix and must be square.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : int, default=2
Parameter for the Minkowski metric from
:class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`.
- 'kd_tree' will use :class:`KDTree`.
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method. (default)
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
ordering_ : array of shape (n_samples,)
The cluster ordered list of sample indices.
core_distances_ : array of shape (n_samples,)
Distance at which each sample becomes a core point, indexed by object
order. Points which will never be core have a distance of inf. Use
``clust.core_distances_[clust.ordering_]`` to access in cluster order.
reachability_ : array of shape (n_samples,)
Reachability distances per sample, indexed by object order. Use
``clust.reachability_[clust.ordering_]`` to access in cluster order.
predecessor_ : array of shape (n_samples,)
Point that a sample was reached from, indexed by object order.
Seed points have a predecessor of -1.
References
----------
.. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
and Jörg Sander. "OPTICS: ordering points to identify the clustering
structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
"""
n_samples = X.shape[0]
_validate_size(min_samples, n_samples, "min_samples")
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
# Start all points as 'unprocessed' ##
reachability_ = np.empty(n_samples)
reachability_.fill(np.inf)
predecessor_ = np.empty(n_samples, dtype=int)
predecessor_.fill(-1)
nbrs = NearestNeighbors(
n_neighbors=min_samples,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params,
p=p,
n_jobs=n_jobs,
)
nbrs.fit(X)
# Here we first do a kNN query for each point, this differs from
# the original OPTICS that only used epsilon range queries.
# TODO: handle working_memory somehow?
core_distances_ = _compute_core_distances_(
X=X, neighbors=nbrs, min_samples=min_samples, working_memory=None
)
# OPTICS puts an upper limit on these, use inf for undefined.
core_distances_[core_distances_ > max_eps] = np.inf
np.around(
core_distances_,
decimals=np.finfo(core_distances_.dtype).precision,
out=core_distances_,
)
# Main OPTICS loop. Not parallelizable. The order that entries are
# written to the 'ordering_' list is important!
# Note that this implementation is O(n^2) theoretically, but
# supposedly with very low constant factors.
processed = np.zeros(X.shape[0], dtype=bool)
ordering = np.zeros(X.shape[0], dtype=int)
for ordering_idx in range(X.shape[0]):
# Choose next based on smallest reachability distance
# (And prefer smaller ids on ties, possibly np.inf!)
index = np.where(processed == 0)[0]
point = index[np.argmin(reachability_[index])]
processed[point] = True
ordering[ordering_idx] = point
if core_distances_[point] != np.inf:
_set_reach_dist(
core_distances_=core_distances_,
reachability_=reachability_,
predecessor_=predecessor_,
point_index=point,
processed=processed,
X=X,
nbrs=nbrs,
metric=metric,
metric_params=metric_params,
p=p,
max_eps=max_eps,
)
if np.all(np.isinf(reachability_)):
warnings.warn(
"All reachability values are inf. Set a larger"
" max_eps or all data will be considered outliers.",
UserWarning,
)
return ordering, core_distances_, reachability_, predecessor_
| def compute_optics_graph(
X, *, min_samples, max_eps, metric, p, metric_params, algorithm, leaf_size, n_jobs
):
"""Compute the OPTICS reachability graph.
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features), or \
(n_samples, n_samples) if metric="precomputed"
A feature array, or array of distances between samples if
metric='precomputed'.
min_samples : int > 1 or float between 0 and 1
The number of samples in a neighborhood for a point to be considered
as a core point. Expressed as an absolute number or a fraction of the
number of samples (rounded to be at least 2).
max_eps : float, default=np.inf
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Default value of ``np.inf`` will
identify clusters across all scales; reducing ``max_eps`` will result
in shorter run times.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string. If metric is
"precomputed", X is assumed to be a distance matrix and must be square.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : int, default=2
Parameter for the Minkowski metric from
:class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`.
- 'kd_tree' will use :class:`KDTree`.
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method. (default)
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
ordering_ : array of shape (n_samples,)
The cluster ordered list of sample indices.
core_distances_ : array of shape (n_samples,)
Distance at which each sample becomes a core point, indexed by object
order. Points which will never be core have a distance of inf. Use
``clust.core_distances_[clust.ordering_]`` to access in cluster order.
reachability_ : array of shape (n_samples,)
Reachability distances per sample, indexed by object order. Use
``clust.reachability_[clust.ordering_]`` to access in cluster order.
predecessor_ : array of shape (n_samples,)
Point that a sample was reached from, indexed by object order.
Seed points have a predecessor of -1.
References
----------
.. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
and Jörg Sander. "OPTICS: ordering points to identify the clustering
structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
"""
n_samples = X.shape[0]
_validate_size(min_samples, n_samples, "min_samples")
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
# Start all points as 'unprocessed' ##
reachability_ = np.empty(n_samples)
reachability_.fill(np.inf)
predecessor_ = np.empty(n_samples, dtype=int)
predecessor_.fill(-1)
nbrs = NearestNeighbors(
n_neighbors=min_samples,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params,
p=p,
n_jobs=n_jobs,
)
nbrs.fit(X)
# Here we first do a kNN query for each point, this differs from
# the original OPTICS that only used epsilon range queries.
# TODO: handle working_memory somehow?
core_distances_ = _compute_core_distances_(
X=X, neighbors=nbrs, min_samples=min_samples, working_memory=None
)
# OPTICS puts an upper limit on these, use inf for undefined.
core_distances_[core_distances_ > max_eps] = np.inf
np.around(
core_distances_,
decimals=np.finfo(core_distances_.dtype).precision,
out=core_distances_,
)
# Main OPTICS loop. Not parallelizable. The order that entries are
# written to the 'ordering_' list is important!
# Note that this implementation is O(n^2) theoretically, but
# supposedly with very low constant factors.
processed = np.zeros(X.shape[0], dtype=bool)
ordering = np.zeros(X.shape[0], dtype=int)
for ordering_idx in range(X.shape[0]):
# Choose next based on smallest reachability distance
# (And prefer smaller ids on ties, possibly np.inf!)
index = np.where(processed == 0)[0]
point = index[np.argmin(reachability_[index])]
processed[point] = True
ordering[ordering_idx] = point
if core_distances_[point] != np.inf:
_set_reach_dist(
core_distances_=core_distances_,
reachability_=reachability_,
predecessor_=predecessor_,
point_index=point,
processed=processed,
X=X,
nbrs=nbrs,
metric=metric,
metric_params=metric_params,
p=p,
max_eps=max_eps,
)
if np.all(np.isinf(reachability_)):
warnings.warn(
"All reachability values are inf. Set a larger"
" max_eps or all data will be considered outliers.",
UserWarning,
)
return ordering, core_distances_, reachability_, predecessor_
|
49,086 | def pole_zero(transfer_function, **kwargs):
r"""
Returns the Pole-Zero plot (also known as PZ Plot or PZ Map) of a system.
"""
poles = transfer_function.poles()
zeros = transfer_function.zeros()
pole_points = [pole.as_real_imag() for pole in poles]
zero_points = [zero.as_real_imag() for zero in zeros]
x_poles = list(map(lambda x: x[0], pole_points))
y_poles = list(map(lambda x: x[1], pole_points))
x_zeros = list(map(lambda x: x[0], zero_points))
y_zeros = list(map(lambda x: x[1], zero_points))
plt.plot(x_poles, y_poles, 'x', mfc='none', markersize=15)
plt.plot(x_zeros, y_zeros, 'o', markersize=10)
plt.xlabel('Real')
plt.ylabel('Imaginary')
plt.title('Poles and Zeros')
plt.grid()
plt.axhline(0, color='black')
plt.axvline(0, color='black')
plt.show()
return
| def pole_zero(transfer_function, **kwargs):
r"""
Returns the Pole-Zero plot (also known as PZ Plot or PZ Map) of a system.
"""
poles = transfer_function.poles()
zeros = transfer_function.zeros()
pole_points = [pole.as_real_imag() for pole in poles]
zero_points = [zero.as_real_imag() for zero in zeros]
x_poles = list(map(lambda x: x[0], pole_points))
y_poles = list(map(lambda x: x[1], pole_points))
x_zeros = list(map(lambda x: x[0], zero_points))
y_zeros = list(map(lambda x: x[1], zero_points))
plt.plot(x_poles, y_poles, 'x', mfc='none', markersize=10)
plt.plot(x_zeros, y_zeros, 'o', markersize=7)
plt.xlabel('Real')
plt.ylabel('Imaginary')
plt.title('Poles and Zeros')
plt.grid()
plt.axhline(0, color='black')
plt.axvline(0, color='black')
plt.show()
return
|
59,508 | def get_channel_waveform(sched: Schedule,
chan: PulseChannel,
backend: Union[None, IBMQBackend] = None,
qubit_index: Union[None, int] = None,
chan_freq: Union[None, float] = None,
dt: float = 2e-9 / 9,
apply_carrier_wave: bool = False):
"""Returns the waveforms on a PulseChannel.
Args:
sched: The pulse Schedule object.
chan: The PulseChannel on which the waveform is to be returned.
backend: An IBMQBackend from which the qubit frequency and dt
are to be extracted.
qubit_index: An integer indicating the qubit index.
chan_freq: A float indicating the channel wave frequency. Not necessary if
both backend and qubit_index are specified.
dt: Qubit drive channel timestep in seconds. Default to the 2/9 ns.
apply_carrier_wave: Whether the carrier wave is applied to the waveforms.
Returns:
chan_waveform: A complex-valued array of the waveform on the
given PulseChannel.
"""
# Check consistency of arguments
if not isinstance(chan, PulseChannel):
raise TypeError("The channel must be a DriveChannel, "
"ControlChannel or a MeasureChannel")
if apply_carrier_wave:
if backend is not None and qubit_index is not None:
if isinstance(chan, MeasureChannel):
chan_freq = backend.defaults().meas_freq_est[qubit_index]
else:
chan_freq = backend.defaults().qubit_freq_est[qubit_index]
else:
assert chan_freq is not None
# Flatten the Schedule and transform it into an iterator of
# InstructionTuples
sched_trans = target_qobj_transform(sched)
chan_events = ChannelEvents.load_program(sched_trans, chan)
waveform_inst_tups = chan_events.get_waveforms()
if backend is not None:
dt = backend.configuration().dt
# Bulid the channel waveform
chan_waveform = np.zeros((sched_trans.duration,), dtype=complex)
for inst_tup in waveform_inst_tups:
if isinstance(inst_tup.inst, Play):
# Unpack the time points and phase and frequency in
# the current frame
t0 = inst_tup.t0
tf = t0 + inst_tup.inst.duration
t_array = np.arange(t0, tf) * dt
phase = inst_tup.frame.phase
freq = inst_tup.frame.freq
# Apply phase and frequency shifts and optionally carrier wave
pulse_waveform = inst_tup.inst.pulse.get_waveform().samples
pulse_waveform *= np.exp(1j * phase)
pulse_waveform *= np.exp(1j * freq * t_array)
if apply_carrier_wave:
pulse_waveform *= np.exp(1j * chan_freq * t_array)
chan_waveform[t0:tf] += pulse_waveform
return chan_waveform | def get_channel_waveform(sched: Schedule,
chan: PulseChannel,
backend: Union[None, IBMQBackend] = None,
qubit_index: Union[None, int] = None,
chan_freq: Union[None, float] = None,
dt: float = 2e-9 / 9,
apply_carrier_wave: bool = False):
"""Returns the waveforms on a PulseChannel.
Args:
sched: The pulse Schedule object.
chan: The PulseChannel on which the waveform is to be returned.
backend: An IBMQBackend from which the qubit frequency and dt
are to be extracted.
qubit_index: An integer indicating the qubit index.
chan_freq: A float indicating the channel wave frequency. Not necessary if
both backend and qubit_index are specified.
dt: Qubit drive channel timestep in seconds. Default to the 2/9 ns.
apply_carrier_wave: Whether the carrier wave is applied to the waveforms.
Returns:
pulse_waveform *= np.exp(1j * 2 * np.pi * chan_freq * t_array)
chan_waveform: A complex-valued array of the waveform on the
given PulseChannel.
"""
# Check consistency of arguments
if not isinstance(chan, PulseChannel):
raise TypeError("The channel must be a DriveChannel, "
"ControlChannel or a MeasureChannel")
if apply_carrier_wave:
if backend is not None and qubit_index is not None:
if isinstance(chan, MeasureChannel):
chan_freq = backend.defaults().meas_freq_est[qubit_index]
else:
chan_freq = backend.defaults().qubit_freq_est[qubit_index]
else:
assert chan_freq is not None
# Flatten the Schedule and transform it into an iterator of
# InstructionTuples
sched_trans = target_qobj_transform(sched)
chan_events = ChannelEvents.load_program(sched_trans, chan)
waveform_inst_tups = chan_events.get_waveforms()
if backend is not None:
dt = backend.configuration().dt
# Bulid the channel waveform
chan_waveform = np.zeros((sched_trans.duration,), dtype=complex)
for inst_tup in waveform_inst_tups:
if isinstance(inst_tup.inst, Play):
# Unpack the time points and phase and frequency in
# the current frame
t0 = inst_tup.t0
tf = t0 + inst_tup.inst.duration
t_array = np.arange(t0, tf) * dt
phase = inst_tup.frame.phase
freq = inst_tup.frame.freq
# Apply phase and frequency shifts and optionally carrier wave
pulse_waveform = inst_tup.inst.pulse.get_waveform().samples
pulse_waveform *= np.exp(1j * phase)
pulse_waveform *= np.exp(1j * freq * t_array)
if apply_carrier_wave:
pulse_waveform *= np.exp(1j * chan_freq * t_array)
chan_waveform[t0:tf] += pulse_waveform
return chan_waveform |
25,572 | def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("config")
args = parser.parse_args()
with open(args.config, "r") as handler:
config = json.load(handler)
# validate the endpoints
node_to_endpoint = dict()
node_to_address = dict()
for node_name, node_info in config["nodes"].items():
if urlsplit(node_info["endpoint"]).scheme == "":
raise ValueError("'endpoint' must have the protocol defined")
url_deposit = f"{node_info['endpoint']}/api/{API_VERSION}/address"
result = requests.get(url_deposit).json()
if result["our_address"] != node_info["address"]:
raise ValueError(
f"Address mismatch, configuration {node_info['address']}, "
f"API response {result['our_address']}"
)
node_to_endpoint[node_name] = node_info["endpoint"]
node_to_address[node_name] = node_info["address"]
queue_per_node: Dict[str, List[ChannelNew]] = defaultdict(list)
target_to_depositqueue: Dict[Tuple[str, str], JoinableQueue] = dict()
# Schedule the requests to evenly distribute the load. This is important
# because as of 0.100.5 channel can not be done concurrently, by dividing
# the load evenly we make sure the channels are open as fast as possible.
for token_address, channels_to_open in config["networks"].items():
for channel in channels_to_open:
node1 = channel["node1"]
node2 = channel["node2"]
participant1 = node_to_address[node1]
participant2 = node_to_address[node2]
is_node1_with_less_work = len(queue_per_node[participant1]) < len(
queue_per_node[participant2]
)
if is_node1_with_less_work:
channel_new = ChannelNew(
token_address=token_address,
participant1=participant1,
participant2=participant2,
endpoint1=node_to_endpoint[node1],
endpoint2=node_to_endpoint[node2],
minimum_capacity1=channel["minimum_capacity1"],
minimum_capacity2=channel["minimum_capacity2"],
)
queue_per_node[participant1].append(channel_new)
else:
channel_new = ChannelNew(
token_address=token_address,
participant1=participant2,
participant2=participant1,
endpoint1=node_to_endpoint[node2],
endpoint2=node_to_endpoint[node1],
minimum_capacity1=channel["minimum_capacity2"],
minimum_capacity2=channel["minimum_capacity1"],
)
queue_per_node[participant2].append(channel_new)
# queue used to order deposits
target = (token_address, channel_new.participant2)
if target not in target_to_depositqueue:
target_to_depositqueue[target] = JoinableQueue()
open_greenlets = set(
gevent.spawn(channel_open_with_the_same_node, channels_to_open, target_to_depositqueue)
for channels_to_open in queue_per_node.values()
)
deposit_greenlets = [
gevent.spawn(channel_deposit_with_the_same_node_and_token_network, deposit_queue)
for deposit_queue in target_to_depositqueue.values()
]
gevent.joinall(open_greenlets, raise_error=True)
log.info("Opening the channels finished")
# Because all channels have been opened, there is no more deposits to do,
# so now one just has to wait for the queues to get empty.
for queue in target_to_depositqueue.values():
# Queue` and `JoinableQueue` don't have the method `rawlink`, so
# `joinall` cannot be used. At the same time calling `join` in the
# `JoinableQueue` was raising an exception `This operation would block
# forever` which seems to be a false positive. Using `empty` to
# circumvent it.
while not queue.empty():
gevent.sleep(1)
log.info("Depositing to the channels finished")
# The deposit greenlets are infinite loops.
gevent.killall(deposit_greenlets)
| def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("config")
args = parser.parse_args()
with open(args.config, "r") as handler:
config = json.load(handler)
# validate the endpoints
node_to_endpoint = dict()
node_to_address = dict()
for node_name, node_info in config["nodes"].items():
if urlsplit(node_info["endpoint"]).scheme == "":
raise ValueError("'endpoint' must have the protocol defined")
url_deposit = f"{node_info['endpoint']}/api/{API_VERSION}/address"
result = requests.get(url_deposit).json()
if result["our_address"] != node_info["address"]:
raise ValueError(
f"Address mismatch, configuration {node_info['address']}, "
f"API response {result['our_address']}"
)
node_to_endpoint[node_name] = node_info["endpoint"]
node_to_address[node_name] = node_info["address"]
queue_per_node: Dict[str, List[ChannelNew]] = defaultdict(list)
target_to_depositqueue: Dict[Tuple[str, str], JoinableQueue] = dict()
# Schedule the requests to evenly distribute the load. This is important
# because as of 0.100.5 channels cannot be opened concurrently, by dividing
# the load evenly we make sure the channels are open as fast as possible.
for token_address, channels_to_open in config["networks"].items():
for channel in channels_to_open:
node1 = channel["node1"]
node2 = channel["node2"]
participant1 = node_to_address[node1]
participant2 = node_to_address[node2]
is_node1_with_less_work = len(queue_per_node[participant1]) < len(
queue_per_node[participant2]
)
if is_node1_with_less_work:
channel_new = ChannelNew(
token_address=token_address,
participant1=participant1,
participant2=participant2,
endpoint1=node_to_endpoint[node1],
endpoint2=node_to_endpoint[node2],
minimum_capacity1=channel["minimum_capacity1"],
minimum_capacity2=channel["minimum_capacity2"],
)
queue_per_node[participant1].append(channel_new)
else:
channel_new = ChannelNew(
token_address=token_address,
participant1=participant2,
participant2=participant1,
endpoint1=node_to_endpoint[node2],
endpoint2=node_to_endpoint[node1],
minimum_capacity1=channel["minimum_capacity2"],
minimum_capacity2=channel["minimum_capacity1"],
)
queue_per_node[participant2].append(channel_new)
# queue used to order deposits
target = (token_address, channel_new.participant2)
if target not in target_to_depositqueue:
target_to_depositqueue[target] = JoinableQueue()
open_greenlets = set(
gevent.spawn(channel_open_with_the_same_node, channels_to_open, target_to_depositqueue)
for channels_to_open in queue_per_node.values()
)
deposit_greenlets = [
gevent.spawn(channel_deposit_with_the_same_node_and_token_network, deposit_queue)
for deposit_queue in target_to_depositqueue.values()
]
gevent.joinall(open_greenlets, raise_error=True)
log.info("Opening the channels finished")
# Because all channels have been opened, there is no more deposits to do,
# so now one just has to wait for the queues to get empty.
for queue in target_to_depositqueue.values():
# Queue` and `JoinableQueue` don't have the method `rawlink`, so
# `joinall` cannot be used. At the same time calling `join` in the
# `JoinableQueue` was raising an exception `This operation would block
# forever` which seems to be a false positive. Using `empty` to
# circumvent it.
while not queue.empty():
gevent.sleep(1)
log.info("Depositing to the channels finished")
# The deposit greenlets are infinite loops.
gevent.killall(deposit_greenlets)
|
3,238 | def top_events_timeseries(
timeseries_columns,
selected_columns,
user_query,
params,
orderby,
rollup,
limit,
organization,
referrer=None,
):
"""
High-level API for doing arbitrary user timeseries queries for a limited number of top events
Returns a dictionary of SnubaTSResult objects that have been zerofilled in
case of gaps. Each value of the dictionary should match the result of a timeseries query
timeseries_columns (Sequence[str]) List of public aliases to fetch for the timeseries query,
usually matches the y-axis of the graph
selected_columns (Sequence[str]) List of public aliases to fetch for the events query,
this is to determine what the top events are
user_query (str) Filter query string to create conditions from. needs to be user_query
to not conflict with the function query
params (Dict[str, str]) Filtering parameters with start, end, project_id, environment,
orderby (Sequence[str]) The fields to order results by.
rollup (int) The bucket width in seconds
limit (int) The number of events to get timeseries for
organization (Organization) Used to map group ids to short ids
referrer (str|None) A referrer string to help locate the origin of this query.
"""
top_events = query(
selected_columns,
query=user_query,
params=params,
orderby=orderby,
limit=limit,
referrer=referrer,
)
snuba_filter, translated_columns = get_timeseries_snuba_filter(
timeseries_columns + selected_columns, user_query, params, rollup
)
user_fields = FIELD_ALIASES["user"]["fields"]
for field in selected_columns:
# project is handled by filter_keys already
if field in ["project", "project.id"]:
continue
if field == "issue":
field = FIELD_ALIASES["issue"]["column_alias"]
# Note that cause orderby shouldn't be an array field its not included in the values
values = list(
{
event.get(field)
for event in top_events["data"]
if field in event and not isinstance(event.get(field), list)
}
)
if values:
# timestamp needs special handling, creating a big OR instead
if field == "timestamp":
snuba_filter.conditions.append([["timestamp", "=", value] for value in values])
# A user field can be any of its field aliases, do an OR across all the user fields
elif field == "user":
snuba_filter.conditions.append(
[[resolve_column(user_field), "IN", values] for user_field in user_fields]
)
elif None in values:
non_none_values = [value for value in values if value is not None]
condition = [[["isNull", [resolve_column(field)]], "=", 1]]
if non_none_values:
condition.append([resolve_column(field), "IN", non_none_values])
snuba_filter.conditions.append(condition)
else:
snuba_filter.conditions.append([resolve_column(field), "IN", values])
result = raw_query(
aggregations=snuba_filter.aggregations,
conditions=snuba_filter.conditions,
filter_keys=snuba_filter.filter_keys,
start=snuba_filter.start,
end=snuba_filter.end,
rollup=rollup,
orderby="time",
groupby=["time"] + snuba_filter.groupby,
dataset=Dataset.Discover,
limit=10000,
referrer=referrer,
)
result = transform_results(result, translated_columns, snuba_filter, selected_columns)
translated_columns["project_id"] = "project"
translated_groupby = [
translated_columns.get(groupby, groupby) for groupby in snuba_filter.groupby
]
if "user" in selected_columns:
# Determine user related fields to prune based on what wasn't selected, since transform_results does the same
for field in user_fields:
if field not in selected_columns:
translated_groupby.remove(field)
translated_groupby.append("user")
issues = {}
if "issue" in selected_columns:
issues = Group.issues_mapping(
set([event["issue.id"] for event in top_events["data"]]),
params["project_id"],
organization,
)
# so the result key is consistent
translated_groupby.sort()
results = {}
# Using the top events add the order to the results
for index, item in enumerate(top_events["data"]):
result_key = create_result_key(item, translated_groupby, issues)
results[result_key] = {
"order": index,
"data": [],
}
for row in result["data"]:
result_key = create_result_key(row, translated_groupby, issues)
if result_key in results:
results[result_key]["data"].append(row)
else:
logger.warning(
"Timeseries top events key mismatch",
extra={"result_key": result_key, "top_event_keys": results.keys()},
)
for key, item in six.iteritems(results):
results[key] = SnubaTSResult(
{
"data": zerofill(
item["data"], snuba_filter.start, snuba_filter.end, rollup, "time"
),
"order": item["order"],
},
snuba_filter.start,
snuba_filter.end,
rollup,
)
return results
| def top_events_timeseries(
timeseries_columns,
selected_columns,
user_query,
params,
orderby,
rollup,
limit,
organization,
referrer=None,
):
"""
High-level API for doing arbitrary user timeseries queries for a limited number of top events
Returns a dictionary of SnubaTSResult objects that have been zerofilled in
case of gaps. Each value of the dictionary should match the result of a timeseries query
timeseries_columns (Sequence[str]) List of public aliases to fetch for the timeseries query,
usually matches the y-axis of the graph
selected_columns (Sequence[str]) List of public aliases to fetch for the events query,
this is to determine what the top events are
user_query (str) Filter query string to create conditions from. needs to be user_query
to not conflict with the function query
params (Dict[str, str]) Filtering parameters with start, end, project_id, environment,
orderby (Sequence[str]) The fields to order results by.
rollup (int) The bucket width in seconds
limit (int) The number of events to get timeseries for
organization (Organization) Used to map group ids to short ids
referrer (str|None) A referrer string to help locate the origin of this query.
"""
top_events = query(
selected_columns,
query=user_query,
params=params,
orderby=orderby,
limit=limit,
referrer=referrer,
)
snuba_filter, translated_columns = get_timeseries_snuba_filter(
timeseries_columns + selected_columns, user_query, params, rollup
)
user_fields = FIELD_ALIASES["user"]["fields"]
for field in selected_columns:
# project is handled by filter_keys already
if field in ["project", "project.id"]:
continue
if field == "issue":
field = FIELD_ALIASES["issue"]["column_alias"]
# Note that because orderby shouldn't be an array field its not included in the values
values = list(
{
event.get(field)
for event in top_events["data"]
if field in event and not isinstance(event.get(field), list)
}
)
if values:
# timestamp needs special handling, creating a big OR instead
if field == "timestamp":
snuba_filter.conditions.append([["timestamp", "=", value] for value in values])
# A user field can be any of its field aliases, do an OR across all the user fields
elif field == "user":
snuba_filter.conditions.append(
[[resolve_column(user_field), "IN", values] for user_field in user_fields]
)
elif None in values:
non_none_values = [value for value in values if value is not None]
condition = [[["isNull", [resolve_column(field)]], "=", 1]]
if non_none_values:
condition.append([resolve_column(field), "IN", non_none_values])
snuba_filter.conditions.append(condition)
else:
snuba_filter.conditions.append([resolve_column(field), "IN", values])
result = raw_query(
aggregations=snuba_filter.aggregations,
conditions=snuba_filter.conditions,
filter_keys=snuba_filter.filter_keys,
start=snuba_filter.start,
end=snuba_filter.end,
rollup=rollup,
orderby="time",
groupby=["time"] + snuba_filter.groupby,
dataset=Dataset.Discover,
limit=10000,
referrer=referrer,
)
result = transform_results(result, translated_columns, snuba_filter, selected_columns)
translated_columns["project_id"] = "project"
translated_groupby = [
translated_columns.get(groupby, groupby) for groupby in snuba_filter.groupby
]
if "user" in selected_columns:
# Determine user related fields to prune based on what wasn't selected, since transform_results does the same
for field in user_fields:
if field not in selected_columns:
translated_groupby.remove(field)
translated_groupby.append("user")
issues = {}
if "issue" in selected_columns:
issues = Group.issues_mapping(
set([event["issue.id"] for event in top_events["data"]]),
params["project_id"],
organization,
)
# so the result key is consistent
translated_groupby.sort()
results = {}
# Using the top events add the order to the results
for index, item in enumerate(top_events["data"]):
result_key = create_result_key(item, translated_groupby, issues)
results[result_key] = {
"order": index,
"data": [],
}
for row in result["data"]:
result_key = create_result_key(row, translated_groupby, issues)
if result_key in results:
results[result_key]["data"].append(row)
else:
logger.warning(
"Timeseries top events key mismatch",
extra={"result_key": result_key, "top_event_keys": results.keys()},
)
for key, item in six.iteritems(results):
results[key] = SnubaTSResult(
{
"data": zerofill(
item["data"], snuba_filter.start, snuba_filter.end, rollup, "time"
),
"order": item["order"],
},
snuba_filter.start,
snuba_filter.end,
rollup,
)
return results
|
44,303 | def basis_rotation(one_electron, two_electron, tol_factor):
r"""Return the grouped coefficients and observables of a molecular Hamiltonian and the basis
rotation unitaries obtained with the basis rotation grouping method.
Args:
one_electron (array[float]): one-electron integral matrix in the molecular orbital basis
two_electron (array[array[float]]): two-electron integral tensor in the molecular orbital
basis arranged in chemist notation
tol_factor (float): threshold error value for discarding the negligible factors
Returns:
tuple(list[array[float]], list[list[Observable]], list[array[float]]): tuple containing the
grouped coefficients and grouped opservables of a Hamiltonian and the basis rotation
unitaries obtained with the basis rotation grouping method
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [1.398397361, 0.0, 0.0]], requires_grad = False)
>>> mol = qml.qchem.Molecule(symbols, geometry)
>>> core, one, two = qml.qchem.electron_integrals(mol)()
>>> coeffs, ops, eigvecs = basis_rotation(one, two, tol_factor=1.0e-5)
>>> print(coeffs)
[array([-1.29789639, 0.84064639, 0.45725000]),
array([-0.00019476, -0.01100037, 0.02239026, -0.01119513]),
array([ 0.36242096, -0.18121048, -0.18121048]),
array([-1.36155423, 2.03646071, -1.34981296, 0.67490648])]
.. details::
:title: Theory
A second-quantized molecular Hamiltonian can be constructed in the
`chemist notation <http://vergil.chemistry.gatech.edu/notes/permsymm/permsymm.pdf>`_ format
following Eq. (1) of
[`PRX Quantum 2, 030305, 2021 <https://journals.aps.org/prxquantum/abstract/10.1103/PRXQuantum.2.030305>`_]
as
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta},
where :math:`V_{pqrs}` denotes a two-electron integral in the chemist notation and
:math:`T_{pq}` is obtained from the one- and two electron integrals, :math:`h_{pq}` and
:math:`h_{pssq}`, as
.. math::
T_{pq} = h_{pq} - \frac{1}{2} \sum_s h_{pssq}.
The tensor :math:`V` can be converted to a matrix which is indexed by the indices :math:`pq`
and :math:`rs` and eigendecomposed up to a rank :math:`R` to give
.. math::
V_{pqrs} = \sum_r^R L_{pq}^{(r)} L_{rs}^{(r) T},
where :math:`L` denotes the eigenvectors of the matrix. The molecular Hamiltonian can then
be rewritten following Eq. (7) of
[`Phys. Rev. Research 3, 033055, 2021 <https://journals.aps.org/prresearch/abstract/10.1103/PhysRevResearch.3.033055>`_]
as
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_r^R \left ( \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pq}
L_{pq}^{(r)} a_{p, \alpha}^{\dagger} a_{q, \alpha} \right )^2.
The orbital basis can be rotated such that each :math:`T` and :math:`L^{(r)}` matrix is
diagonal. The Hamiltonian can then be written following Eq. (2) of
[`npj Quantum Information, 7, 23 (2021) <https://www.nature.com/articles/s41534-020-00341-7>`_]
as
.. math::
H = U_0 \left ( \sum_p d_p n_p \right ) U_0^{\dagger} + \sum_r^R U_r \left ( \sum_{pq}
d_{pq}^{(r)} n_p n_q \right ) U_r^{\dagger}
where the coefficients :math:`d` are obtained by diagonalizing the :math:`T` and
:math:`L^{(r)}` matrices. The number operators :math:`n_p = a_p^{\dagger} a_p` can be
converted to qubit operators using
.. math::
n_p = \frac{1-Z_p}{2}
where :math:`Z_p` is the Pauli :math:`Z` operator applied to qubit :math:`p`. This gives
the qubit Hamiltonian
.. math::
H = U_0 \left ( \sum_p O_p^{(0)} \right ) U_0^{\dagger} + \sum_r^R U_r \left ( \sum_{q} O_q^{(r)} \right ) U_r^{\dagger},
where :math:`O = \sum_i c_i P_i` is a linear combination of Pauli words :math:`P_` that are
a tensor product of Pauli :math:`Z` and Identity operators. This allows all the Pauli words
in each of the :math:`O` terms to be measured simultaneously. This function returns the
coefficients and the Pauli words grouped for each of the :math:`O` terms as well as the
eigenvectors of the :math:`T` and :math:`L^{(r)}` matrices that can be used to construct the
basis rotation unitaries :math:`U`.
"""
two_electron = np.swapaxes(two_electron, 1, 3)
_, eigvals_m, eigvecs_m = qml.qchem.factorize(two_electron, tol_factor, 0.0)
t_matrix = one_electron - 0.5 * np.einsum("illj", two_electron)
t_eigvals, t_eigvecs = np.linalg.eigh(t_matrix)
eigvals = [np.array(t_eigvals)] + [np.outer(x, x).flatten() * 0.5 for x in eigvals_m]
eigvecs = [t_eigvecs] + eigvecs_m
ops_t = 0.0
for i in range(len(eigvals[0])):
ops_t += 0.5 * eigvals[0][i] * qml.Identity(i) - 0.5 * eigvals[0][i] * qml.PauliZ(i)
ops_l = []
for coeff in eigvals[1:]:
ops_l_ = 0.0
for i in range(len(coeff) // 2):
for j in range(len(coeff) // 2):
cc = coeff[i + j]
if i == j:
ops_l_ += cc * (
qml.Identity(i) - qml.PauliZ(i) - qml.PauliZ(j) + qml.Identity(i)
)
else:
ops_l_ += cc * (
qml.Identity(i)
- qml.PauliZ(i)
- qml.PauliZ(j)
+ qml.grouping.pauli_mult_with_phase(qml.PauliZ(i), qml.PauliZ(j))[0]
)
ops_l.append(ops_l_.tolist())
ops = [ops_t.tolist()] + ops_l
c_group = [op.coeffs for op in ops]
o_group = [op.ops for op in ops]
return c_group, o_group, eigvecs
| def basis_rotation(one_electron, two_electron, tol_factor):
r"""Return the grouped coefficients and observables of a molecular Hamiltonian and the basis
rotation unitaries obtained with the basis rotation grouping method.
Args:
one_electron (array[float]): one-electron integral matrix in the molecular orbital basis
two_electron (array[array[float]]): two-electron integral tensor in the molecular orbital
basis arranged in chemist notation
tol_factor (float): threshold error value for discarding the negligible factors
Returns:
tuple(list[array[float]], list[list[Observable]], list[array[float]]): tuple containing the
grouped coefficients and grouped opservables of a Hamiltonian and the basis rotation
unitaries obtained with the basis rotation grouping method
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [1.398397361, 0.0, 0.0]], requires_grad = False)
>>> mol = qml.qchem.Molecule(symbols, geometry)
>>> core, one, two = qml.qchem.electron_integrals(mol)()
>>> coeffs, ops, eigvecs = basis_rotation(one, two, tol_factor=1.0e-5)
>>> print(coeffs)
[array([-1.29789639, 0.84064639, 0.45725000]),
array([-0.00019476, -0.01100037, 0.02239026, -0.01119513]),
array([ 0.36242096, -0.18121048, -0.18121048]),
array([-1.36155423, 2.03646071, -1.34981296, 0.67490648])]
.. details::
:title: Theory
A second-quantized molecular Hamiltonian can be constructed in the
`chemist notation <http://vergil.chemistry.gatech.edu/notes/permsymm/permsymm.pdf>`_ format
following Eq. (1) of
[`PRX Quantum 2, 030305, 2021 <https://journals.aps.org/prxquantum/abstract/10.1103/PRXQuantum.2.030305>`_]
as
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta},
where :math:`V_{pqrs}` denotes a two-electron integral in the chemist notation and
:math:`T_{pq}` is obtained from the one- and two electron integrals, :math:`h_{pq}` and
:math:`h_{pssq}`, as
.. math::
T_{pq} = h_{pq} - \frac{1}{2} \sum_s h_{pssq}.
The tensor :math:`V` can be converted to a matrix which is indexed by the indices :math:`pq`
and :math:`rs` and eigendecomposed up to a rank :math:`R` to give
.. math::
V_{pqrs} = \sum_r^R L_{pq}^{(r)} L_{rs}^{(r) T},
where :math:`L` denotes the eigenvectors of the matrix. The molecular Hamiltonian can then
be rewritten following Eq. (7) of
[`Phys. Rev. Research 3, 033055, 2021 <https://journals.aps.org/prresearch/abstract/10.1103/PhysRevResearch.3.033055>`_]
as
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_r^R \left ( \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pq}
L_{pq}^{(r)} a_{p, \alpha}^{\dagger} a_{q, \alpha} \right )^2.
The orbital basis can be rotated such that each :math:`T` and :math:`L^{(r)}` matrix is
diagonal. The Hamiltonian can then be written following Eq. (2) of
[`npj Quantum Information, 7, 23 (2021) <https://www.nature.com/articles/s41534-020-00341-7>`_]
as
.. math::
H = U_0 \left ( \sum_p d_p n_p \right ) U_0^{\dagger} + \sum_r^R U_r \left ( \sum_{pq}
d_{pq}^{(r)} n_p n_q \right ) U_r^{\dagger}
where the coefficients :math:`d` are obtained by diagonalizing the :math:`T` and
:math:`L^{(r)}` matrices. The number operators :math:`n_p = a_p^{\dagger} a_p` can be
converted to qubit operators using
.. math::
n_p = \frac{1-Z_p}{2}
where :math:`Z_p` is the Pauli :math:`Z` operator applied to qubit :math:`p`. This gives
the qubit Hamiltonian
.. math::
H = U_0 \left ( \sum_p O_p^{(0)} \right ) U_0^{\dagger} + \sum_r^R U_r \left ( \sum_{q} O_q^{(r)} \right ) U_r^{\dagger},
where :math:`O = \sum_i c_i P_i` is a linear combination of Pauli words :math:`P_i` that are
a tensor product of Pauli :math:`Z` and Identity operators. This allows all the Pauli words
in each of the :math:`O` terms to be measured simultaneously. This function returns the
coefficients and the Pauli words grouped for each of the :math:`O` terms as well as the
eigenvectors of the :math:`T` and :math:`L^{(r)}` matrices that can be used to construct the
basis rotation unitaries :math:`U`.
"""
two_electron = np.swapaxes(two_electron, 1, 3)
_, eigvals_m, eigvecs_m = qml.qchem.factorize(two_electron, tol_factor, 0.0)
t_matrix = one_electron - 0.5 * np.einsum("illj", two_electron)
t_eigvals, t_eigvecs = np.linalg.eigh(t_matrix)
eigvals = [np.array(t_eigvals)] + [np.outer(x, x).flatten() * 0.5 for x in eigvals_m]
eigvecs = [t_eigvecs] + eigvecs_m
ops_t = 0.0
for i in range(len(eigvals[0])):
ops_t += 0.5 * eigvals[0][i] * qml.Identity(i) - 0.5 * eigvals[0][i] * qml.PauliZ(i)
ops_l = []
for coeff in eigvals[1:]:
ops_l_ = 0.0
for i in range(len(coeff) // 2):
for j in range(len(coeff) // 2):
cc = coeff[i + j]
if i == j:
ops_l_ += cc * (
qml.Identity(i) - qml.PauliZ(i) - qml.PauliZ(j) + qml.Identity(i)
)
else:
ops_l_ += cc * (
qml.Identity(i)
- qml.PauliZ(i)
- qml.PauliZ(j)
+ qml.grouping.pauli_mult_with_phase(qml.PauliZ(i), qml.PauliZ(j))[0]
)
ops_l.append(ops_l_.tolist())
ops = [ops_t.tolist()] + ops_l
c_group = [op.coeffs for op in ops]
o_group = [op.ops for op in ops]
return c_group, o_group, eigvecs
|
58,703 | def create_intent_report(
intent_results: List[IntentEvaluationResult],
add_confused_labels_to_report: bool,
metrics_as_dict: bool,
) -> IntentReport: # pragma: no cover
"""Creates summary statistics for intents.
Only considers those examples with a set intent. Others are filtered out.
Returns a dictionary of containing the evaluation result.
Args:
intent_results: intent evaluation results
add_confused_labels_to_report: add confused label information to the intent_report
metrics_as_dict: whether the evaluation metrics should be returned as Dict
Returns: IntentReport namedtuple with evaluation results
"""
import sklearn.metrics
import sklearn.utils.multiclass
from rasa.test import get_evaluation_metrics
# remove empty intent targets
num_examples = len(intent_results)
intent_results = remove_empty_intent_examples(intent_results)
logger.info(
f"Intent Evaluation: Only considering those {len(intent_results)} examples "
f"that have a defined intent out of {num_examples} examples."
)
target_intents, predicted_intents = _targets_predictions_from(
intent_results, "intent_target", "intent_prediction"
)
confusion_matrix = sklearn.metrics.confusion_matrix(
target_intents, predicted_intents
)
labels = sklearn.utils.multiclass.unique_labels(target_intents, predicted_intents)
report, precision, f1, accuracy = get_evaluation_metrics(
target_intents, predicted_intents, output_dict=metrics_as_dict
)
if add_confused_labels_to_report:
report = _add_confused_labels_to_report(report, confusion_matrix, labels)
return IntentReport(report, precision, f1, accuracy, confusion_matrix, labels)
| def create_intent_report(
intent_results: List[IntentEvaluationResult],
add_confused_labels_to_report: bool,
metrics_as_dict: bool,
) -> IntentReport: # pragma: no cover
"""Creates summary statistics for intents.
Only considers those examples with a set intent. Others are filtered out.
Returns a dictionary of containing the evaluation result.
Args:
intent_results: intent evaluation results
add_confused_labels_to_report: add confused label information to the intent_report
metrics_as_dict: whether the evaluation metrics should be returned as Dict
Returns:
`IntentReport` `namedtuple` with evaluation results.
"""
import sklearn.metrics
import sklearn.utils.multiclass
from rasa.test import get_evaluation_metrics
# remove empty intent targets
num_examples = len(intent_results)
intent_results = remove_empty_intent_examples(intent_results)
logger.info(
f"Intent Evaluation: Only considering those {len(intent_results)} examples "
f"that have a defined intent out of {num_examples} examples."
)
target_intents, predicted_intents = _targets_predictions_from(
intent_results, "intent_target", "intent_prediction"
)
confusion_matrix = sklearn.metrics.confusion_matrix(
target_intents, predicted_intents
)
labels = sklearn.utils.multiclass.unique_labels(target_intents, predicted_intents)
report, precision, f1, accuracy = get_evaluation_metrics(
target_intents, predicted_intents, output_dict=metrics_as_dict
)
if add_confused_labels_to_report:
report = _add_confused_labels_to_report(report, confusion_matrix, labels)
return IntentReport(report, precision, f1, accuracy, confusion_matrix, labels)
|
39,439 | def test_copy_no_copy_wrap_object(datasets):
for dataset in datasets:
# different dataset tyoes have different copy behavior for points
# use point data which is common
dataset["data"] = np.ones(dataset.n_points)
new_dataset = type(dataset)(dataset)
new_dataset["data"] += 1
assert np.array_equal(new_dataset["data"], dataset["data"])
for dataset in datasets:
# different dataset tyoes have different copy behavior for points
# use point data which is common
dataset["data"] = np.ones(dataset.n_points)
new_dataset = type(dataset)(dataset, deep=True)
new_dataset["data"] += 1
assert not np.any(new_dataset["data"] == dataset["data"])
| def test_copy_no_copy_wrap_object(datasets):
for dataset in datasets:
# different dataset types have different copy behavior for points
# use point data which is common
dataset["data"] = np.ones(dataset.n_points)
new_dataset = type(dataset)(dataset)
new_dataset["data"] += 1
assert np.array_equal(new_dataset["data"], dataset["data"])
for dataset in datasets:
# different dataset tyoes have different copy behavior for points
# use point data which is common
dataset["data"] = np.ones(dataset.n_points)
new_dataset = type(dataset)(dataset, deep=True)
new_dataset["data"] += 1
assert not np.any(new_dataset["data"] == dataset["data"])
|
38,695 | def main():
# Setup command line options
argparser = argparse.ArgumentParser()
output_options = argparser.add_argument_group(
'Options controlling ReFrame output'
)
locate_options = argparser.add_argument_group(
'Options for discovering checks'
)
select_options = argparser.add_argument_group(
'Options for selecting checks'
)
action_options = argparser.add_argument_group(
'Options controlling actions'
)
run_options = argparser.add_argument_group(
'Options controlling the execution of checks'
)
env_options = argparser.add_argument_group(
'Options controlling the ReFrame environment'
)
misc_options = argparser.add_argument_group('Miscellaneous options')
# Output directory options
output_options.add_argument(
'--prefix', action='store', metavar='DIR',
help='Set general directory prefix to DIR',
envvar='RFM_PREFIX', configvar='systems/prefix'
)
output_options.add_argument(
'-o', '--output', action='store', metavar='DIR',
help='Set output directory prefix to DIR',
envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir'
)
output_options.add_argument(
'-s', '--stage', action='store', metavar='DIR',
help='Set stage directory prefix to DIR',
envvar='RFM_STAGE_DIR', configvar='systems/stagedir'
)
output_options.add_argument(
'--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT',
help=('Append a timestamp to the output and stage directory prefixes '
'(default: "%%FT%%T")'),
envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs'
)
output_options.add_argument(
'--perflogdir', action='store', metavar='DIR',
help=('Set performance log data directory prefix '
'(relevant only to the filelog log handler)'),
envvar='RFM_PERFLOG_DIR',
configvar='logging/handlers_perflog/filelog_basedir'
)
output_options.add_argument(
'--keep-stage-files', action='store_true',
help='Keep stage directories even for successful checks',
envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files'
)
output_options.add_argument(
'--dont-restage', action='store_false', dest='clean_stagedir',
help='Reuse the test stage directory',
envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir'
)
output_options.add_argument(
'--save-log-files', action='store_true', default=False,
help='Save ReFrame log files to the output directory',
envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files'
)
output_options.add_argument(
'--report-file', action='store', metavar='FILE',
help="Store JSON run report in FILE",
envvar='RFM_REPORT_FILE',
configvar='general/report_file'
)
# Check discovery options
locate_options.add_argument(
'-c', '--checkpath', action='append', metavar='PATH',
help="Add PATH to the check search path list",
envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path'
)
locate_options.add_argument(
'-R', '--recursive', action='store_true',
help='Search for checks in the search path recursively',
envvar='RFM_CHECK_SEARCH_RECURSIVE',
configvar='general/check_search_recursive'
)
locate_options.add_argument(
'--ignore-check-conflicts', action='store_true',
help='Skip checks with conflicting names',
envvar='RFM_IGNORE_CHECK_CONFLICTS',
configvar='general/ignore_check_conflicts'
)
# Select options
select_options.add_argument(
'-t', '--tag', action='append', dest='tags', metavar='PATTERN',
default=[],
help='Select checks with at least one tag matching PATTERN'
)
select_options.add_argument(
'-n', '--name', action='append', dest='names', default=[],
metavar='PATTERN', help='Select checks whose name matches PATTERN'
)
select_options.add_argument(
'-x', '--exclude', action='append', dest='exclude_names',
metavar='PATTERN', default=[],
help='Exclude checks whose name matches PATTERN'
)
select_options.add_argument(
'-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN',
help=('Select checks with at least one '
'programming environment matching PATTERN')
)
select_options.add_argument(
'--failed', action='store_true',
help="Select failed test cases (only when '--restore-session' is used)"
)
select_options.add_argument(
'--gpu-only', action='store_true',
help='Select only GPU checks'
)
select_options.add_argument(
'--cpu-only', action='store_true',
help='Select only CPU checks'
)
# Action options
action_options.add_argument(
'-l', '--list', action='store_true',
help='List the selected checks'
)
action_options.add_argument(
'-L', '--list-detailed', action='store_true',
help='List the selected checks providing details for each test'
)
action_options.add_argument(
'-r', '--run', action='store_true',
help='Run the selected checks'
)
# Run options
run_options.add_argument(
'-J', '--job-option', action='append', metavar='OPT',
dest='job_options', default=[],
help='Pass option OPT to job scheduler'
)
run_options.add_argument(
'--force-local', action='store_true',
help='Force local execution of checks'
)
run_options.add_argument(
'--skip-sanity-check', action='store_true',
help='Skip sanity checking'
)
run_options.add_argument(
'--skip-performance-check', action='store_true',
help='Skip performance checking'
)
run_options.add_argument(
'--strict', action='store_true',
help='Enforce strict performance checking'
)
run_options.add_argument(
'--skip-system-check', action='store_true',
help='Skip system check'
)
run_options.add_argument(
'--skip-prgenv-check', action='store_true',
help='Skip programming environment check'
)
run_options.add_argument(
'--exec-policy', metavar='POLICY', action='store',
choices=['async', 'serial'], default='async',
help='Set the execution policy of ReFrame (default: "async")'
)
run_options.add_argument(
'--mode', action='store', help='Execution mode to use'
)
run_options.add_argument(
'--max-retries', metavar='NUM', action='store', default=0,
help='Set the maximum number of times a failed regression test '
'may be retried (default: 0)'
)
run_options.add_argument(
'--max-fail', metavar='NUM', action='store', default=0,
help='Set the maximum number of failures before exiting'
)
run_options.add_argument(
'--restore-session', action='store', nargs='?', const='',
metavar='REPORT',
help='Restore a testing session from REPORT file'
)
run_options.add_argument(
'--flex-alloc-nodes', action='store',
dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None,
help='Set strategy for the flexible node allocation (default: "idle").'
)
run_options.add_argument(
'--disable-hook', action='append', metavar='NAME', dest='hooks',
default=[], help='Disable a pipeline hook for this run'
)
env_options.add_argument(
'-M', '--map-module', action='append', metavar='MAPPING',
dest='module_mappings', default=[],
help='Add a module mapping',
envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings'
)
env_options.add_argument(
'-m', '--module', action='append', default=[],
metavar='MOD', dest='user_modules',
help='Load module MOD before running any regression check',
envvar='RFM_USER_MODULES ,', configvar='general/user_modules'
)
env_options.add_argument(
'--module-mappings', action='store', metavar='FILE',
dest='module_map_file',
help='Load module mappings from FILE',
envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file'
)
env_options.add_argument(
'-u', '--unload-module', action='append', metavar='MOD',
dest='unload_modules', default=[],
help='Unload module MOD before running any regression check',
envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules'
)
env_options.add_argument(
'--module-path', action='append', metavar='PATH',
dest='module_paths', default=[],
help='(Un)use module path PATH before running any regression check',
)
env_options.add_argument(
'--purge-env', action='store_true', dest='purge_env', default=False,
help='Unload all modules before running any regression check',
envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment'
)
env_options.add_argument(
'--non-default-craype', action='store_true',
help='Test a non-default Cray Programming Environment',
envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype'
)
# Miscellaneous options
misc_options.add_argument(
'-C', '--config-file', action='store',
dest='config_file', metavar='FILE',
help='Set configuration file',
envvar='RFM_CONFIG_FILE'
)
misc_options.add_argument(
'--nocolor', action='store_false', dest='colorize',
help='Disable coloring of output',
envvar='RFM_COLORIZE', configvar='general/colorize'
)
misc_options.add_argument(
'--failure-stats', action='store_true', help='Print failure statistics'
)
misc_options.add_argument(
'--performance-report', action='store_true',
help='Print a report for performance tests'
)
misc_options.add_argument(
'--show-config', action='store', nargs='?', const='all',
metavar='PARAM',
help='Print the value of configuration parameter PARAM and exit'
)
misc_options.add_argument(
'--system', action='store', help='Load configuration for SYSTEM',
envvar='RFM_SYSTEM'
)
misc_options.add_argument(
'--upgrade-config-file', action='store', metavar='OLD[:NEW]',
help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax'
)
misc_options.add_argument(
'-V', '--version', action='version', version=osext.reframe_version()
)
misc_options.add_argument(
'-v', '--verbose', action='count',
help='Increase verbosity level of output',
envvar='RFM_VERBOSE', configvar='general/verbose'
)
# Options not associated with command-line arguments
argparser.add_argument(
dest='graylog_server',
envvar='RFM_GRAYLOG_ADDRESS',
configvar='logging/handlers_perflog/graylog_address',
help='Graylog server address'
)
argparser.add_argument(
dest='syslog_address',
envvar='RFM_SYSLOG_ADDRESS',
configvar='logging/handlers_perflog/syslog_address',
help='Syslog server address'
)
argparser.add_argument(
dest='ignore_reqnodenotavail',
envvar='RFM_IGNORE_REQNODENOTAVAIL',
configvar='schedulers/ignore_reqnodenotavail',
action='store_true',
help='Graylog server address'
)
argparser.add_argument(
dest='use_login_shell',
envvar='RFM_USE_LOGIN_SHELL',
configvar='general/use_login_shell',
action='store_true',
help='Use a login shell for job scripts'
)
if len(sys.argv) == 1:
argparser.print_help()
sys.exit(1)
# Parse command line
options = argparser.parse_args()
# First configure logging with our generic configuration so as to be able
# to print pretty messages; logging will be reconfigured by user's
# configuration later
site_config = config.load_config(
os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py')
)
site_config.select_subconfig('generic')
options.update_config(site_config)
logging.configure_logging(site_config)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer = PrettyPrinter()
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
if os.getenv('RFM_GRAYLOG_SERVER'):
printer.warning(
'RFM_GRAYLOG_SERVER environment variable is deprecated; '
'please use RFM_GRAYLOG_ADDRESS instead'
)
os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER')
if options.upgrade_config_file is not None:
old_config, *new_config = options.upgrade_config_file.split(
':', maxsplit=1
)
new_config = new_config[0] if new_config else None
try:
new_config = config.convert_old_config(old_config, new_config)
except Exception as e:
printer.error(f'could not convert file: {e}')
sys.exit(1)
printer.info(
f'Conversion successful! '
f'The converted file can be found at {new_config!r}.'
)
sys.exit(0)
# Now configure ReFrame according to the user configuration file
try:
try:
printer.debug('Loading user configuration')
site_config = config.load_config(options.config_file)
except warnings.ReframeDeprecationWarning as e:
printer.warning(e)
converted = config.convert_old_config(options.config_file)
printer.warning(
f"configuration file has been converted "
f"to the new syntax here: '{converted}'"
)
site_config = config.load_config(converted)
site_config.validate()
# We ignore errors about unresolved sections or configuration
# parameters here, because they might be defined at the individual
# partition level and will be caught when we will instantiating
# internally the system and partitions later on.
site_config.select_subconfig(options.system,
ignore_resolve_errors=True)
for err in options.update_config(site_config):
printer.warning(str(err))
# Update options from the selected execution mode
if options.mode:
mode_args = site_config.get(f'modes/@{options.mode}/options')
# We lexically split the mode options, because otherwise spaces
# will be treated as part of the option argument; see GH bug #1554
mode_args = list(itertools.chain.from_iterable(shlex.split(m)
for m in mode_args))
# Parse the mode's options and reparse the command-line
options = argparser.parse_args(mode_args)
options = argparser.parse_args(namespace=options.cmd_options)
options.update_config(site_config)
logging.configure_logging(site_config)
except (OSError, errors.ConfigError) as e:
printer.error(f'failed to load configuration: {e}')
printer.error(logfiles_message())
sys.exit(1)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
try:
printer.debug('Initializing runtime')
runtime.init_runtime(site_config)
except errors.ConfigError as e:
printer.error(f'failed to initialize runtime: {e}')
printer.error(logfiles_message())
sys.exit(1)
rt = runtime.runtime()
try:
if site_config.get('general/0/module_map_file'):
rt.modules_system.load_mapping_from_file(
site_config.get('general/0/module_map_file')
)
if site_config.get('general/0/module_mappings'):
for m in site_config.get('general/0/module_mappings'):
rt.modules_system.load_mapping(m)
except (errors.ConfigError, OSError) as e:
printer.error('could not load module mappings: %s' % e)
sys.exit(1)
if (osext.samefile(rt.stage_prefix, rt.output_prefix) and
not site_config.get('general/0/keep_stage_files')):
printer.error("stage and output refer to the same directory; "
"if this is on purpose, please use the "
"'--keep-stage-files' option.")
printer.error(logfiles_message())
sys.exit(1)
# Show configuration after everything is set up
if options.show_config:
config_param = options.show_config
if config_param == 'all':
printer.info(str(rt.site_config))
else:
value = rt.get_option(config_param)
if value is None:
printer.error(
f'no such configuration parameter found: {config_param}'
)
else:
printer.info(json.dumps(value, indent=2))
sys.exit(0)
printer.debug(format_env(options.env_vars))
# Setup the check loader
if options.restore_session is not None:
# We need to load the failed checks only from a report
if options.restore_session:
filename = options.restore_session
else:
filename = runreport.next_report_filename(
osext.expandvars(site_config.get('general/0/report_file')),
new=False
)
report = runreport.load_report(filename)
check_search_path = list(report.slice('filename', unique=True))
check_search_recursive = False
# If `-c` or `-R` are passed explicitly outside the configuration
# file, override the values set from the report file
if site_config.is_sticky_option('general/check_search_path'):
printer.warning(
'Ignoring check search path set in the report file: '
'search path set explicitly in the command-line or '
'the environment'
)
check_search_path = site_config.get(
'general/0/check_search_path'
)
if site_config.is_sticky_option('general/check_search_recursive'):
printer.warning(
'Ignoring check search recursive option from the report file: '
'option set explicitly in the command-line or the environment'
)
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
else:
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
check_search_path = site_config.get('general/0/check_search_path')
loader = RegressionCheckLoader(
load_path=check_search_path,
recurse=check_search_recursive,
ignore_conflicts=site_config.get(
'general/0/ignore_check_conflicts'
)
)
def print_infoline(param, value):
param = param + ':'
printer.info(f" {param.ljust(18)} {value}")
session_info = {
'cmdline': ' '.join(sys.argv),
'config_file': rt.site_config.filename,
'data_version': runreport.DATA_VERSION,
'hostname': socket.gethostname(),
'prefix_output': rt.output_prefix,
'prefix_stage': rt.stage_prefix,
'user': osext.osuser(),
'version': osext.reframe_version(),
'workdir': os.getcwd(),
}
# Print command line
printer.info(f"[ReFrame Setup]")
print_infoline('version', session_info['version'])
print_infoline('command', repr(session_info['cmdline']))
print_infoline(
f"launched by",
f"{session_info['user'] or '<unknown>'}@{session_info['hostname']}"
)
print_infoline('working directory', repr(session_info['workdir']))
print_infoline('settings file', f"{session_info['config_file']!r}")
print_infoline('check search path',
f"{'(R) ' if loader.recurse else ''}"
f"{':'.join(loader.load_path)!r}")
print_infoline('stage directory', repr(session_info['prefix_stage']))
print_infoline('output directory', repr(session_info['prefix_output']))
printer.info('')
try:
# Locate and load checks
try:
checks_found = loader.load_all()
printer.verbose(f'Loaded {len(checks_found)} test(s)')
except OSError as e:
raise errors.ReframeError from e
# Generate all possible test cases first; we will need them for
# resolving dependencies after filtering
# Determine the allowed programming environments
allowed_environs = {e.name
for env_patt in options.prgenv
for p in rt.system.partitions
for e in p.environs if re.match(env_patt, e.name)}
testcases_all = generate_testcases(checks_found,
options.skip_system_check,
options.skip_prgenv_check,
allowed_environs)
testcases = testcases_all
printer.verbose(f'Generated {len(testcases)} test case(s)')
# Filter test cases by name
if options.exclude_names:
for name in options.exclude_names:
testcases = filter(filters.have_not_name(name), testcases)
if options.names:
testcases = filter(
filters.have_name('|'.join(options.names)), testcases
)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by name: {len(testcases)} remaining'
)
# Filter test cases by tags
for tag in options.tags:
testcases = filter(filters.have_tag(tag), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by tags: {len(testcases)} remaining'
)
# Filter test cases further
if options.gpu_only and options.cpu_only:
printer.error("options `--gpu-only' and `--cpu-only' "
"are mutually exclusive")
sys.exit(1)
if options.gpu_only:
testcases = filter(filters.have_gpu_only(), testcases)
elif options.cpu_only:
testcases = filter(filters.have_cpu_only(), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by other attributes: '
f'{len(testcases)} remaining'
)
# Filter in failed cases
if options.failed:
if options.restore_session is None:
printer.error(
"the option '--failed' can only be used "
"in combination with the '--restore-session' option"
)
sys.exit(1)
def _case_failed(t):
rec = report.case(*t)
if rec and rec['result'] == 'failure':
return True
else:
return False
testcases = list(filter(_case_failed, testcases))
printer.verbose(
f'Filtering successful test case(s): '
f'{len(testcases)} remaining'
)
# Prepare for running
printer.debug('Building and validating the full test DAG')
testgraph, skipped_cases = dependencies.build_deps(testcases_all)
if skipped_cases:
# Some cases were skipped, so adjust testcases
testcases = list(set(testcases) - set(skipped_cases))
printer.verbose(
f'Filtering test case(s) due to unresolved dependencies: '
f'{len(testcases)} remaining'
)
dependencies.validate_deps(testgraph)
printer.debug('Full test DAG:')
printer.debug(dependencies.format_deps(testgraph))
restored_cases = []
if len(testcases) != len(testcases_all):
testgraph = dependencies.prune_deps(
testgraph, testcases,
max_depth=1 if options.restore_session is not None else None
)
printer.debug('Pruned test DAG')
printer.debug(dependencies.format_deps(testgraph))
if options.restore_session is not None:
testgraph, restored_cases = report.restore_dangling(testgraph)
testcases = dependencies.toposort(
testgraph,
is_subgraph=options.restore_session is not None
)
printer.verbose(f'Final number of test cases: {len(testcases)}')
# Disable hooks
for tc in testcases:
for h in options.hooks:
type(tc.check).disable_hook(h)
# Act on checks
if options.list or options.list_detailed:
list_checks(testcases, printer, options.list_detailed)
sys.exit(0)
if not options.run:
printer.error(f"No action specified. Please specify `-l'/`-L' for "
f"listing or `-r' for running. "
f"Try `{argparser.prog} -h' for more options.")
sys.exit(1)
# Manipulate ReFrame's environment
if site_config.get('general/0/purge_environment'):
rt.modules_system.unload_all()
else:
for m in site_config.get('general/0/unload_modules'):
rt.modules_system.unload_module(**m)
# Load the environment for the current system
try:
printer.debug(f'Loading environment for current system')
runtime.loadenv(rt.system.preload_environ)
except errors.EnvironError as e:
printer.error("failed to load current system's environment; "
"please check your configuration")
printer.debug(str(e))
raise
def module_use(*paths):
try:
rt.modules_system.searchpath_add(*paths)
except errors.EnvironError as e:
printer.warning(f'could not add module paths correctly')
printer.debug(str(e))
def module_unuse(*paths):
try:
rt.modules_system.searchpath_remove(*paths)
except errors.EnvironError as e:
printer.warning(f'could not remove module paths correctly')
printer.debug(str(e))
printer.debug('(Un)using module paths from command line')
module_paths = {}
for d in options.module_paths:
if d.startswith('-'):
module_paths.setdefault('-', [])
module_paths['-'].append(d[1:])
elif d.startswith('+'):
module_paths.setdefault('+', [])
module_paths['+'].append(d[1:])
else:
module_paths.setdefault('x', [])
module_paths['x'].append(d)
for op, paths in module_paths.items():
if op == '+':
module_use(*paths)
elif op == '-':
module_unuse(*paths)
else:
# First empty the current module path in a portable way
searchpath = [p for p in rt.modules_system.searchpath if p]
if searchpath:
rt.modules_system.searchpath_remove(*searchpath)
# Treat `A:B` syntax as well in this case
paths = itertools.chain(*(p.split(':') for p in paths))
module_use(*paths)
printer.debug('Loading user modules from command line')
for m in site_config.get('general/0/user_modules'):
try:
rt.modules_system.load_module(**m, force=True)
except errors.EnvironError as e:
printer.warning(
f'could not load module {m["name"]!r} correctly; '
f'skipping...'
)
printer.debug(str(e))
options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle'
# Run the tests
# Setup the execution policy
if options.exec_policy == 'serial':
exec_policy = SerialExecutionPolicy()
elif options.exec_policy == 'async':
exec_policy = AsynchronousExecutionPolicy()
else:
# This should not happen, since choices are handled by
# argparser
printer.error("unknown execution policy `%s': Exiting...")
sys.exit(1)
exec_policy.skip_system_check = options.skip_system_check
exec_policy.force_local = options.force_local
exec_policy.strict_check = options.strict
exec_policy.skip_sanity_check = options.skip_sanity_check
exec_policy.skip_performance_check = options.skip_performance_check
exec_policy.keep_stage_files = site_config.get(
'general/0/keep_stage_files'
)
try:
errmsg = "invalid option for --flex-alloc-nodes: '{0}'"
sched_flex_alloc_nodes = int(options.flex_alloc_nodes)
if sched_flex_alloc_nodes <= 0:
raise errors.ConfigError(
errmsg.format(options.flex_alloc_nodes)
)
except ValueError:
sched_flex_alloc_nodes = options.flex_alloc_nodes
exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes
parsed_job_options = []
for opt in options.job_options:
if opt.startswith('-') or opt.startswith('#'):
parsed_job_options.append(opt)
elif len(opt) == 1:
parsed_job_options.append(f'-{opt}')
else:
parsed_job_options.append(f'--{opt}')
exec_policy.sched_options = parsed_job_options
try:
max_retries = int(options.max_retries)
except ValueError:
raise errors.ConfigError(
f'--max-retries is not a valid integer: {max_retries}'
) from None
try:
max_fail = int(options.max_fail)
except ValueError:
raise errors.ConfigError(
f'--max-fail is not a valid integer: {max_fail}'
) from None
runner = Runner(exec_policy, printer, max_retries, max_fail)
try:
time_start = time.time()
session_info['time_start'] = time.strftime(
'%FT%T%z', time.localtime(time_start),
)
runner.runall(testcases, restored_cases)
finally:
time_end = time.time()
session_info['time_end'] = time.strftime(
'%FT%T%z', time.localtime(time_end)
)
session_info['time_elapsed'] = time_end - time_start
# Print a retry report if we did any retries
if runner.stats.failures(run=0):
printer.info(runner.stats.retry_report())
# Print a failure report if we had failures in the last run
success = True
if runner.stats.failures():
success = False
runner.stats.print_failure_report(printer)
if options.failure_stats:
runner.stats.print_failure_stats(printer)
if options.performance_report:
printer.info(runner.stats.performance_report())
# Generate the report for this session
report_file = os.path.normpath(
osext.expandvars(rt.get_option('general/0/report_file'))
)
basedir = os.path.dirname(report_file)
if basedir:
os.makedirs(basedir, exist_ok=True)
# Build final JSON report
run_stats = runner.stats.json()
session_info.update({
'num_cases': run_stats[0]['num_cases'],
'num_failures': run_stats[-1]['num_failures']
})
json_report = {
'session_info': session_info,
'runs': run_stats,
'restored_cases': []
}
if options.restore_session is not None:
for c in restored_cases:
json_report['restored_cases'].append(report.case(*c))
report_file = runreport.next_report_filename(report_file)
try:
with open(report_file, 'w') as fp:
jsonext.dump(json_report, fp, indent=2)
fp.write('\n')
except OSError as e:
printer.warning(
f'failed to generate report in {report_file!r}: {e}'
)
if not success:
sys.exit(1)
sys.exit(0)
except KeyboardInterrupt:
sys.exit(1)
except errors.ReframeError as e:
printer.error(str(e))
sys.exit(1)
except (Exception, errors.ReframeFatalError):
exc_info = sys.exc_info()
tb = ''.join(traceback.format_exception(*exc_info))
printer.error(errors.what(*exc_info))
if errors.is_severe(*exc_info):
printer.error(tb)
else:
printer.verbose(tb)
sys.exit(1)
finally:
try:
log_files = logging.log_files()
if site_config.get('general/0/save_log_files'):
log_files = logging.save_log_files(rt.output_prefix)
except OSError as e:
printer.error(f'could not save log file: {e}')
sys.exit(1)
finally:
printer.info(logfiles_message())
| def main():
# Setup command line options
argparser = argparse.ArgumentParser()
output_options = argparser.add_argument_group(
'Options controlling ReFrame output'
)
locate_options = argparser.add_argument_group(
'Options for discovering checks'
)
select_options = argparser.add_argument_group(
'Options for selecting checks'
)
action_options = argparser.add_argument_group(
'Options controlling actions'
)
run_options = argparser.add_argument_group(
'Options controlling the execution of checks'
)
env_options = argparser.add_argument_group(
'Options controlling the ReFrame environment'
)
misc_options = argparser.add_argument_group('Miscellaneous options')
# Output directory options
output_options.add_argument(
'--prefix', action='store', metavar='DIR',
help='Set general directory prefix to DIR',
envvar='RFM_PREFIX', configvar='systems/prefix'
)
output_options.add_argument(
'-o', '--output', action='store', metavar='DIR',
help='Set output directory prefix to DIR',
envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir'
)
output_options.add_argument(
'-s', '--stage', action='store', metavar='DIR',
help='Set stage directory prefix to DIR',
envvar='RFM_STAGE_DIR', configvar='systems/stagedir'
)
output_options.add_argument(
'--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT',
help=('Append a timestamp to the output and stage directory prefixes '
'(default: "%%FT%%T")'),
envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs'
)
output_options.add_argument(
'--perflogdir', action='store', metavar='DIR',
help=('Set performance log data directory prefix '
'(relevant only to the filelog log handler)'),
envvar='RFM_PERFLOG_DIR',
configvar='logging/handlers_perflog/filelog_basedir'
)
output_options.add_argument(
'--keep-stage-files', action='store_true',
help='Keep stage directories even for successful checks',
envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files'
)
output_options.add_argument(
'--dont-restage', action='store_false', dest='clean_stagedir',
help='Reuse the test stage directory',
envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir'
)
output_options.add_argument(
'--save-log-files', action='store_true', default=False,
help='Save ReFrame log files to the output directory',
envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files'
)
output_options.add_argument(
'--report-file', action='store', metavar='FILE',
help="Store JSON run report in FILE",
envvar='RFM_REPORT_FILE',
configvar='general/report_file'
)
# Check discovery options
locate_options.add_argument(
'-c', '--checkpath', action='append', metavar='PATH',
help="Add PATH to the check search path list",
envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path'
)
locate_options.add_argument(
'-R', '--recursive', action='store_true',
help='Search for checks in the search path recursively',
envvar='RFM_CHECK_SEARCH_RECURSIVE',
configvar='general/check_search_recursive'
)
locate_options.add_argument(
'--ignore-check-conflicts', action='store_true',
help='Skip checks with conflicting names',
envvar='RFM_IGNORE_CHECK_CONFLICTS',
configvar='general/ignore_check_conflicts'
)
# Select options
select_options.add_argument(
'-t', '--tag', action='append', dest='tags', metavar='PATTERN',
default=[],
help='Select checks with at least one tag matching PATTERN'
)
select_options.add_argument(
'-n', '--name', action='append', dest='names', default=[],
metavar='PATTERN', help='Select checks whose name matches PATTERN'
)
select_options.add_argument(
'-x', '--exclude', action='append', dest='exclude_names',
metavar='PATTERN', default=[],
help='Exclude checks whose name matches PATTERN'
)
select_options.add_argument(
'-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN',
help=('Select checks with at least one '
'programming environment matching PATTERN')
)
select_options.add_argument(
'--failed', action='store_true',
help="Select failed test cases (only when '--restore-session' is used)"
)
select_options.add_argument(
'--gpu-only', action='store_true',
help='Select only GPU checks'
)
select_options.add_argument(
'--cpu-only', action='store_true',
help='Select only CPU checks'
)
# Action options
action_options.add_argument(
'-l', '--list', action='store_true',
help='List the selected checks'
)
action_options.add_argument(
'-L', '--list-detailed', action='store_true',
help='List the selected checks providing details for each test'
)
action_options.add_argument(
'-r', '--run', action='store_true',
help='Run the selected checks'
)
# Run options
run_options.add_argument(
'-J', '--job-option', action='append', metavar='OPT',
dest='job_options', default=[],
help='Pass option OPT to job scheduler'
)
run_options.add_argument(
'--force-local', action='store_true',
help='Force local execution of checks'
)
run_options.add_argument(
'--skip-sanity-check', action='store_true',
help='Skip sanity checking'
)
run_options.add_argument(
'--skip-performance-check', action='store_true',
help='Skip performance checking'
)
run_options.add_argument(
'--strict', action='store_true',
help='Enforce strict performance checking'
)
run_options.add_argument(
'--skip-system-check', action='store_true',
help='Skip system check'
)
run_options.add_argument(
'--skip-prgenv-check', action='store_true',
help='Skip programming environment check'
)
run_options.add_argument(
'--exec-policy', metavar='POLICY', action='store',
choices=['async', 'serial'], default='async',
help='Set the execution policy of ReFrame (default: "async")'
)
run_options.add_argument(
'--mode', action='store', help='Execution mode to use'
)
run_options.add_argument(
'--max-retries', metavar='NUM', action='store', default=0,
help='Set the maximum number of times a failed regression test '
'may be retried (default: 0)'
)
run_options.add_argument(
'--max-fail', metavar='NUM', action='store', default=0,
help='Exit after first NUM failures'
)
run_options.add_argument(
'--restore-session', action='store', nargs='?', const='',
metavar='REPORT',
help='Restore a testing session from REPORT file'
)
run_options.add_argument(
'--flex-alloc-nodes', action='store',
dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None,
help='Set strategy for the flexible node allocation (default: "idle").'
)
run_options.add_argument(
'--disable-hook', action='append', metavar='NAME', dest='hooks',
default=[], help='Disable a pipeline hook for this run'
)
env_options.add_argument(
'-M', '--map-module', action='append', metavar='MAPPING',
dest='module_mappings', default=[],
help='Add a module mapping',
envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings'
)
env_options.add_argument(
'-m', '--module', action='append', default=[],
metavar='MOD', dest='user_modules',
help='Load module MOD before running any regression check',
envvar='RFM_USER_MODULES ,', configvar='general/user_modules'
)
env_options.add_argument(
'--module-mappings', action='store', metavar='FILE',
dest='module_map_file',
help='Load module mappings from FILE',
envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file'
)
env_options.add_argument(
'-u', '--unload-module', action='append', metavar='MOD',
dest='unload_modules', default=[],
help='Unload module MOD before running any regression check',
envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules'
)
env_options.add_argument(
'--module-path', action='append', metavar='PATH',
dest='module_paths', default=[],
help='(Un)use module path PATH before running any regression check',
)
env_options.add_argument(
'--purge-env', action='store_true', dest='purge_env', default=False,
help='Unload all modules before running any regression check',
envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment'
)
env_options.add_argument(
'--non-default-craype', action='store_true',
help='Test a non-default Cray Programming Environment',
envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype'
)
# Miscellaneous options
misc_options.add_argument(
'-C', '--config-file', action='store',
dest='config_file', metavar='FILE',
help='Set configuration file',
envvar='RFM_CONFIG_FILE'
)
misc_options.add_argument(
'--nocolor', action='store_false', dest='colorize',
help='Disable coloring of output',
envvar='RFM_COLORIZE', configvar='general/colorize'
)
misc_options.add_argument(
'--failure-stats', action='store_true', help='Print failure statistics'
)
misc_options.add_argument(
'--performance-report', action='store_true',
help='Print a report for performance tests'
)
misc_options.add_argument(
'--show-config', action='store', nargs='?', const='all',
metavar='PARAM',
help='Print the value of configuration parameter PARAM and exit'
)
misc_options.add_argument(
'--system', action='store', help='Load configuration for SYSTEM',
envvar='RFM_SYSTEM'
)
misc_options.add_argument(
'--upgrade-config-file', action='store', metavar='OLD[:NEW]',
help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax'
)
misc_options.add_argument(
'-V', '--version', action='version', version=osext.reframe_version()
)
misc_options.add_argument(
'-v', '--verbose', action='count',
help='Increase verbosity level of output',
envvar='RFM_VERBOSE', configvar='general/verbose'
)
# Options not associated with command-line arguments
argparser.add_argument(
dest='graylog_server',
envvar='RFM_GRAYLOG_ADDRESS',
configvar='logging/handlers_perflog/graylog_address',
help='Graylog server address'
)
argparser.add_argument(
dest='syslog_address',
envvar='RFM_SYSLOG_ADDRESS',
configvar='logging/handlers_perflog/syslog_address',
help='Syslog server address'
)
argparser.add_argument(
dest='ignore_reqnodenotavail',
envvar='RFM_IGNORE_REQNODENOTAVAIL',
configvar='schedulers/ignore_reqnodenotavail',
action='store_true',
help='Graylog server address'
)
argparser.add_argument(
dest='use_login_shell',
envvar='RFM_USE_LOGIN_SHELL',
configvar='general/use_login_shell',
action='store_true',
help='Use a login shell for job scripts'
)
if len(sys.argv) == 1:
argparser.print_help()
sys.exit(1)
# Parse command line
options = argparser.parse_args()
# First configure logging with our generic configuration so as to be able
# to print pretty messages; logging will be reconfigured by user's
# configuration later
site_config = config.load_config(
os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py')
)
site_config.select_subconfig('generic')
options.update_config(site_config)
logging.configure_logging(site_config)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer = PrettyPrinter()
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
if os.getenv('RFM_GRAYLOG_SERVER'):
printer.warning(
'RFM_GRAYLOG_SERVER environment variable is deprecated; '
'please use RFM_GRAYLOG_ADDRESS instead'
)
os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER')
if options.upgrade_config_file is not None:
old_config, *new_config = options.upgrade_config_file.split(
':', maxsplit=1
)
new_config = new_config[0] if new_config else None
try:
new_config = config.convert_old_config(old_config, new_config)
except Exception as e:
printer.error(f'could not convert file: {e}')
sys.exit(1)
printer.info(
f'Conversion successful! '
f'The converted file can be found at {new_config!r}.'
)
sys.exit(0)
# Now configure ReFrame according to the user configuration file
try:
try:
printer.debug('Loading user configuration')
site_config = config.load_config(options.config_file)
except warnings.ReframeDeprecationWarning as e:
printer.warning(e)
converted = config.convert_old_config(options.config_file)
printer.warning(
f"configuration file has been converted "
f"to the new syntax here: '{converted}'"
)
site_config = config.load_config(converted)
site_config.validate()
# We ignore errors about unresolved sections or configuration
# parameters here, because they might be defined at the individual
# partition level and will be caught when we will instantiating
# internally the system and partitions later on.
site_config.select_subconfig(options.system,
ignore_resolve_errors=True)
for err in options.update_config(site_config):
printer.warning(str(err))
# Update options from the selected execution mode
if options.mode:
mode_args = site_config.get(f'modes/@{options.mode}/options')
# We lexically split the mode options, because otherwise spaces
# will be treated as part of the option argument; see GH bug #1554
mode_args = list(itertools.chain.from_iterable(shlex.split(m)
for m in mode_args))
# Parse the mode's options and reparse the command-line
options = argparser.parse_args(mode_args)
options = argparser.parse_args(namespace=options.cmd_options)
options.update_config(site_config)
logging.configure_logging(site_config)
except (OSError, errors.ConfigError) as e:
printer.error(f'failed to load configuration: {e}')
printer.error(logfiles_message())
sys.exit(1)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
try:
printer.debug('Initializing runtime')
runtime.init_runtime(site_config)
except errors.ConfigError as e:
printer.error(f'failed to initialize runtime: {e}')
printer.error(logfiles_message())
sys.exit(1)
rt = runtime.runtime()
try:
if site_config.get('general/0/module_map_file'):
rt.modules_system.load_mapping_from_file(
site_config.get('general/0/module_map_file')
)
if site_config.get('general/0/module_mappings'):
for m in site_config.get('general/0/module_mappings'):
rt.modules_system.load_mapping(m)
except (errors.ConfigError, OSError) as e:
printer.error('could not load module mappings: %s' % e)
sys.exit(1)
if (osext.samefile(rt.stage_prefix, rt.output_prefix) and
not site_config.get('general/0/keep_stage_files')):
printer.error("stage and output refer to the same directory; "
"if this is on purpose, please use the "
"'--keep-stage-files' option.")
printer.error(logfiles_message())
sys.exit(1)
# Show configuration after everything is set up
if options.show_config:
config_param = options.show_config
if config_param == 'all':
printer.info(str(rt.site_config))
else:
value = rt.get_option(config_param)
if value is None:
printer.error(
f'no such configuration parameter found: {config_param}'
)
else:
printer.info(json.dumps(value, indent=2))
sys.exit(0)
printer.debug(format_env(options.env_vars))
# Setup the check loader
if options.restore_session is not None:
# We need to load the failed checks only from a report
if options.restore_session:
filename = options.restore_session
else:
filename = runreport.next_report_filename(
osext.expandvars(site_config.get('general/0/report_file')),
new=False
)
report = runreport.load_report(filename)
check_search_path = list(report.slice('filename', unique=True))
check_search_recursive = False
# If `-c` or `-R` are passed explicitly outside the configuration
# file, override the values set from the report file
if site_config.is_sticky_option('general/check_search_path'):
printer.warning(
'Ignoring check search path set in the report file: '
'search path set explicitly in the command-line or '
'the environment'
)
check_search_path = site_config.get(
'general/0/check_search_path'
)
if site_config.is_sticky_option('general/check_search_recursive'):
printer.warning(
'Ignoring check search recursive option from the report file: '
'option set explicitly in the command-line or the environment'
)
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
else:
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
check_search_path = site_config.get('general/0/check_search_path')
loader = RegressionCheckLoader(
load_path=check_search_path,
recurse=check_search_recursive,
ignore_conflicts=site_config.get(
'general/0/ignore_check_conflicts'
)
)
def print_infoline(param, value):
param = param + ':'
printer.info(f" {param.ljust(18)} {value}")
session_info = {
'cmdline': ' '.join(sys.argv),
'config_file': rt.site_config.filename,
'data_version': runreport.DATA_VERSION,
'hostname': socket.gethostname(),
'prefix_output': rt.output_prefix,
'prefix_stage': rt.stage_prefix,
'user': osext.osuser(),
'version': osext.reframe_version(),
'workdir': os.getcwd(),
}
# Print command line
printer.info(f"[ReFrame Setup]")
print_infoline('version', session_info['version'])
print_infoline('command', repr(session_info['cmdline']))
print_infoline(
f"launched by",
f"{session_info['user'] or '<unknown>'}@{session_info['hostname']}"
)
print_infoline('working directory', repr(session_info['workdir']))
print_infoline('settings file', f"{session_info['config_file']!r}")
print_infoline('check search path',
f"{'(R) ' if loader.recurse else ''}"
f"{':'.join(loader.load_path)!r}")
print_infoline('stage directory', repr(session_info['prefix_stage']))
print_infoline('output directory', repr(session_info['prefix_output']))
printer.info('')
try:
# Locate and load checks
try:
checks_found = loader.load_all()
printer.verbose(f'Loaded {len(checks_found)} test(s)')
except OSError as e:
raise errors.ReframeError from e
# Generate all possible test cases first; we will need them for
# resolving dependencies after filtering
# Determine the allowed programming environments
allowed_environs = {e.name
for env_patt in options.prgenv
for p in rt.system.partitions
for e in p.environs if re.match(env_patt, e.name)}
testcases_all = generate_testcases(checks_found,
options.skip_system_check,
options.skip_prgenv_check,
allowed_environs)
testcases = testcases_all
printer.verbose(f'Generated {len(testcases)} test case(s)')
# Filter test cases by name
if options.exclude_names:
for name in options.exclude_names:
testcases = filter(filters.have_not_name(name), testcases)
if options.names:
testcases = filter(
filters.have_name('|'.join(options.names)), testcases
)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by name: {len(testcases)} remaining'
)
# Filter test cases by tags
for tag in options.tags:
testcases = filter(filters.have_tag(tag), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by tags: {len(testcases)} remaining'
)
# Filter test cases further
if options.gpu_only and options.cpu_only:
printer.error("options `--gpu-only' and `--cpu-only' "
"are mutually exclusive")
sys.exit(1)
if options.gpu_only:
testcases = filter(filters.have_gpu_only(), testcases)
elif options.cpu_only:
testcases = filter(filters.have_cpu_only(), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by other attributes: '
f'{len(testcases)} remaining'
)
# Filter in failed cases
if options.failed:
if options.restore_session is None:
printer.error(
"the option '--failed' can only be used "
"in combination with the '--restore-session' option"
)
sys.exit(1)
def _case_failed(t):
rec = report.case(*t)
if rec and rec['result'] == 'failure':
return True
else:
return False
testcases = list(filter(_case_failed, testcases))
printer.verbose(
f'Filtering successful test case(s): '
f'{len(testcases)} remaining'
)
# Prepare for running
printer.debug('Building and validating the full test DAG')
testgraph, skipped_cases = dependencies.build_deps(testcases_all)
if skipped_cases:
# Some cases were skipped, so adjust testcases
testcases = list(set(testcases) - set(skipped_cases))
printer.verbose(
f'Filtering test case(s) due to unresolved dependencies: '
f'{len(testcases)} remaining'
)
dependencies.validate_deps(testgraph)
printer.debug('Full test DAG:')
printer.debug(dependencies.format_deps(testgraph))
restored_cases = []
if len(testcases) != len(testcases_all):
testgraph = dependencies.prune_deps(
testgraph, testcases,
max_depth=1 if options.restore_session is not None else None
)
printer.debug('Pruned test DAG')
printer.debug(dependencies.format_deps(testgraph))
if options.restore_session is not None:
testgraph, restored_cases = report.restore_dangling(testgraph)
testcases = dependencies.toposort(
testgraph,
is_subgraph=options.restore_session is not None
)
printer.verbose(f'Final number of test cases: {len(testcases)}')
# Disable hooks
for tc in testcases:
for h in options.hooks:
type(tc.check).disable_hook(h)
# Act on checks
if options.list or options.list_detailed:
list_checks(testcases, printer, options.list_detailed)
sys.exit(0)
if not options.run:
printer.error(f"No action specified. Please specify `-l'/`-L' for "
f"listing or `-r' for running. "
f"Try `{argparser.prog} -h' for more options.")
sys.exit(1)
# Manipulate ReFrame's environment
if site_config.get('general/0/purge_environment'):
rt.modules_system.unload_all()
else:
for m in site_config.get('general/0/unload_modules'):
rt.modules_system.unload_module(**m)
# Load the environment for the current system
try:
printer.debug(f'Loading environment for current system')
runtime.loadenv(rt.system.preload_environ)
except errors.EnvironError as e:
printer.error("failed to load current system's environment; "
"please check your configuration")
printer.debug(str(e))
raise
def module_use(*paths):
try:
rt.modules_system.searchpath_add(*paths)
except errors.EnvironError as e:
printer.warning(f'could not add module paths correctly')
printer.debug(str(e))
def module_unuse(*paths):
try:
rt.modules_system.searchpath_remove(*paths)
except errors.EnvironError as e:
printer.warning(f'could not remove module paths correctly')
printer.debug(str(e))
printer.debug('(Un)using module paths from command line')
module_paths = {}
for d in options.module_paths:
if d.startswith('-'):
module_paths.setdefault('-', [])
module_paths['-'].append(d[1:])
elif d.startswith('+'):
module_paths.setdefault('+', [])
module_paths['+'].append(d[1:])
else:
module_paths.setdefault('x', [])
module_paths['x'].append(d)
for op, paths in module_paths.items():
if op == '+':
module_use(*paths)
elif op == '-':
module_unuse(*paths)
else:
# First empty the current module path in a portable way
searchpath = [p for p in rt.modules_system.searchpath if p]
if searchpath:
rt.modules_system.searchpath_remove(*searchpath)
# Treat `A:B` syntax as well in this case
paths = itertools.chain(*(p.split(':') for p in paths))
module_use(*paths)
printer.debug('Loading user modules from command line')
for m in site_config.get('general/0/user_modules'):
try:
rt.modules_system.load_module(**m, force=True)
except errors.EnvironError as e:
printer.warning(
f'could not load module {m["name"]!r} correctly; '
f'skipping...'
)
printer.debug(str(e))
options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle'
# Run the tests
# Setup the execution policy
if options.exec_policy == 'serial':
exec_policy = SerialExecutionPolicy()
elif options.exec_policy == 'async':
exec_policy = AsynchronousExecutionPolicy()
else:
# This should not happen, since choices are handled by
# argparser
printer.error("unknown execution policy `%s': Exiting...")
sys.exit(1)
exec_policy.skip_system_check = options.skip_system_check
exec_policy.force_local = options.force_local
exec_policy.strict_check = options.strict
exec_policy.skip_sanity_check = options.skip_sanity_check
exec_policy.skip_performance_check = options.skip_performance_check
exec_policy.keep_stage_files = site_config.get(
'general/0/keep_stage_files'
)
try:
errmsg = "invalid option for --flex-alloc-nodes: '{0}'"
sched_flex_alloc_nodes = int(options.flex_alloc_nodes)
if sched_flex_alloc_nodes <= 0:
raise errors.ConfigError(
errmsg.format(options.flex_alloc_nodes)
)
except ValueError:
sched_flex_alloc_nodes = options.flex_alloc_nodes
exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes
parsed_job_options = []
for opt in options.job_options:
if opt.startswith('-') or opt.startswith('#'):
parsed_job_options.append(opt)
elif len(opt) == 1:
parsed_job_options.append(f'-{opt}')
else:
parsed_job_options.append(f'--{opt}')
exec_policy.sched_options = parsed_job_options
try:
max_retries = int(options.max_retries)
except ValueError:
raise errors.ConfigError(
f'--max-retries is not a valid integer: {max_retries}'
) from None
try:
max_fail = int(options.max_fail)
except ValueError:
raise errors.ConfigError(
f'--max-fail is not a valid integer: {max_fail}'
) from None
runner = Runner(exec_policy, printer, max_retries, max_fail)
try:
time_start = time.time()
session_info['time_start'] = time.strftime(
'%FT%T%z', time.localtime(time_start),
)
runner.runall(testcases, restored_cases)
finally:
time_end = time.time()
session_info['time_end'] = time.strftime(
'%FT%T%z', time.localtime(time_end)
)
session_info['time_elapsed'] = time_end - time_start
# Print a retry report if we did any retries
if runner.stats.failures(run=0):
printer.info(runner.stats.retry_report())
# Print a failure report if we had failures in the last run
success = True
if runner.stats.failures():
success = False
runner.stats.print_failure_report(printer)
if options.failure_stats:
runner.stats.print_failure_stats(printer)
if options.performance_report:
printer.info(runner.stats.performance_report())
# Generate the report for this session
report_file = os.path.normpath(
osext.expandvars(rt.get_option('general/0/report_file'))
)
basedir = os.path.dirname(report_file)
if basedir:
os.makedirs(basedir, exist_ok=True)
# Build final JSON report
run_stats = runner.stats.json()
session_info.update({
'num_cases': run_stats[0]['num_cases'],
'num_failures': run_stats[-1]['num_failures']
})
json_report = {
'session_info': session_info,
'runs': run_stats,
'restored_cases': []
}
if options.restore_session is not None:
for c in restored_cases:
json_report['restored_cases'].append(report.case(*c))
report_file = runreport.next_report_filename(report_file)
try:
with open(report_file, 'w') as fp:
jsonext.dump(json_report, fp, indent=2)
fp.write('\n')
except OSError as e:
printer.warning(
f'failed to generate report in {report_file!r}: {e}'
)
if not success:
sys.exit(1)
sys.exit(0)
except KeyboardInterrupt:
sys.exit(1)
except errors.ReframeError as e:
printer.error(str(e))
sys.exit(1)
except (Exception, errors.ReframeFatalError):
exc_info = sys.exc_info()
tb = ''.join(traceback.format_exception(*exc_info))
printer.error(errors.what(*exc_info))
if errors.is_severe(*exc_info):
printer.error(tb)
else:
printer.verbose(tb)
sys.exit(1)
finally:
try:
log_files = logging.log_files()
if site_config.get('general/0/save_log_files'):
log_files = logging.save_log_files(rt.output_prefix)
except OSError as e:
printer.error(f'could not save log file: {e}')
sys.exit(1)
finally:
printer.info(logfiles_message())
|
22,869 | def omt_evaluate_command(cmd, optimizer, client_data = None):
if cmd.name == smtcmd.MAXIMIZE:
rt = MaximizationGoal(cmd.args[0])
if client_data is not None:
client_data[0].append(rt)
return rt
elif cmd.name == smtcmd.MINIMIZE:
rt = MinimizationGoal(cmd.args[0])
if client_data is not None:
client_data[0].append(rt)
return rt
elif cmd.name == smtcmd.CHECK_SAT:
if client_data != None:
client_data[1].clear()
for g in client_data[0]:
client_data[1].append((g.term(), optimizer.optimize(g)[1]))
return optimizer.check_sat()
elif cmd.name == smtcmd.MAXMIN:
rt = MaxMinGoal(cmd.args[0])
if client_data is not None:
client_data[0].append(rt)
return rt
elif cmd.name == smtcmd.MINMAX:
rt = MinMaxGoal(cmd.args[0])
if client_data is not None:
client_data[0].append(rt)
return rt
elif cmd.name == smtcmd.GET_OBJECTIVES:
return client_data[1]
else:
return smt_evaluate_command(cmd, optimizer)
| def omt_evaluate_command(cmd, optimizer, client_data = None):
if cmd.name == smtcmd.MAXIMIZE:
rt = MaximizationGoal(cmd.args[0])
if client_data is not None:
client_data[0].append(rt)
return rt
elif cmd.name == smtcmd.MINIMIZE:
rt = MinimizationGoal(cmd.args[0])
if client_data is not None:
client_data[0].append(rt)
return rt
elif cmd.name == smtcmd.CHECK_SAT:
if client_data is not None:
client_data[1].clear()
for g in client_data[0]:
client_data[1].append((g.term(), optimizer.optimize(g)[1]))
return optimizer.check_sat()
elif cmd.name == smtcmd.MAXMIN:
rt = MaxMinGoal(cmd.args[0])
if client_data is not None:
client_data[0].append(rt)
return rt
elif cmd.name == smtcmd.MINMAX:
rt = MinMaxGoal(cmd.args[0])
if client_data is not None:
client_data[0].append(rt)
return rt
elif cmd.name == smtcmd.GET_OBJECTIVES:
return client_data[1]
else:
return smt_evaluate_command(cmd, optimizer)
|
4,032 | def parseOpts(overrideArguments=None):
def _readOptions(filename_bytes, default=[]):
try:
optionf = open(filename_bytes)
except IOError:
return default # silently skip if file is not present
try:
# FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
contents = optionf.read()
if sys.version_info < (3,):
contents = contents.decode(preferredencoding())
res = compat_shlex_split(contents, comments=True)
finally:
optionf.close()
return res
def _readUserConf():
xdg_config_home = compat_getenv('XDG_CONFIG_HOME')
if xdg_config_home:
userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
else:
userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl.conf')
userConf = _readOptions(userConfFile, None)
if userConf is None:
appdata_dir = compat_getenv('appdata')
if appdata_dir:
userConf = _readOptions(
os.path.join(appdata_dir, 'youtube-dl', 'config'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(compat_expanduser('~'), 'youtube-dl.conf'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(compat_expanduser('~'), 'youtube-dl.conf.txt'),
default=None)
if userConf is None:
userConf = []
return userConf
def _format_option_string(option):
''' ('-o', '--option') -> -o, --format METAVAR'''
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, ', ')
if option.takes_value():
opts.append(' %s' % option.metavar)
return ''.join(opts)
def _comma_separated_values_options_callback(option, opt_str, value, parser):
setattr(parser.values, option.dest, value.split(','))
# No need to wrap help messages if we're on a wide console
columns = compat_get_terminal_size().columns
max_width = columns if columns else 80
max_help_position = 80
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
fmt.format_option_strings = _format_option_string
kw = {
'version': __version__,
'formatter': fmt,
'usage': '%prog [OPTIONS] URL [URL...]',
'conflict_handler': 'resolve',
}
parser = optparse.OptionParser(**compat_kwargs(kw))
general = optparse.OptionGroup(parser, 'General Options')
general.add_option(
'-h', '--help',
action='help',
help='Print this help text and exit')
general.add_option(
'--version',
action='version',
help='Print program version and exit')
general.add_option(
'-U', '--update',
action='store_true', dest='update_self',
help='Update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
general.add_option(
'-i', '--ignore-errors',
action='store_true', dest='ignoreerrors', default=False,
help='Continue on download errors, for example to skip unavailable videos in a playlist')
general.add_option(
'--abort-on-error',
action='store_false', dest='ignoreerrors',
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
general.add_option(
'--dump-user-agent',
action='store_true', dest='dump_user_agent', default=False,
help='Display the current browser identification')
general.add_option(
'--list-extractors',
action='store_true', dest='list_extractors', default=False,
help='List all supported extractors')
general.add_option(
'--extractor-descriptions',
action='store_true', dest='list_extractor_descriptions', default=False,
help='Output descriptions of all supported extractors')
general.add_option(
'--force-generic-extractor',
action='store_true', dest='force_generic_extractor', default=False,
help='Force extraction to use the generic extractor')
general.add_option(
'--default-search',
dest='default_search', metavar='PREFIX',
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.')
general.add_option(
'--ignore-config',
action='store_true',
help='Do not read configuration files. '
'When given in the global configuration file /etc/youtube-dl.conf: '
'Do not read the user configuration in ~/.config/youtube-dl/config '
'(%APPDATA%/youtube-dl/config.txt on Windows)')
general.add_option(
'--config-location',
dest='config_location', metavar='PATH',
help='Location of the configuration file; either the path to the config or its containing directory.')
general.add_option(
'--flat-playlist',
action='store_const', dest='extract_flat', const='in_playlist',
default=False,
help='Do not extract the videos of a playlist, only list them.')
general.add_option(
'--mark-watched',
action='store_true', dest='mark_watched', default=False,
help='Mark videos watched')
general.add_option(
'--no-mark-watched',
action='store_false', dest='mark_watched', default=False,
help='Do not mark videos watched')
general.add_option(
'--no-color', '--no-colors',
action='store_true', dest='no_color',
default=False,
help='Do not emit color codes in output')
network = optparse.OptionGroup(parser, 'Network Options')
network.add_option(
'--proxy', dest='proxy',
default=None, metavar='URL',
help='Use the specified HTTP/HTTPS/SOCKS proxy. To enable '
'SOCKS proxy, specify a proper scheme. For example '
'socks5://127.0.0.1:1080/. Pass in an empty string (--proxy "") '
'for direct connection')
network.add_option(
'--socket-timeout',
dest='socket_timeout', type=float, default=None, metavar='SECONDS',
help='Time to wait before giving up, in seconds')
network.add_option(
'--source-address',
metavar='IP', dest='source_address', default=None,
help='Client-side IP address to bind to',
)
network.add_option(
'-4', '--force-ipv4',
action='store_const', const='0.0.0.0', dest='source_address',
help='Make all connections via IPv4',
)
network.add_option(
'-6', '--force-ipv6',
action='store_const', const='::', dest='source_address',
help='Make all connections via IPv6',
)
geo = optparse.OptionGroup(parser, 'Geo Restriction')
geo.add_option(
'--geo-verification-proxy',
dest='geo_verification_proxy', default=None, metavar='URL',
help='Use this proxy to verify the IP address for some geo-restricted sites. '
'The default proxy specified by --proxy (or none, if the option is not present) is used for the actual downloading.')
geo.add_option(
'--cn-verification-proxy',
dest='cn_verification_proxy', default=None, metavar='URL',
help=optparse.SUPPRESS_HELP)
geo.add_option(
'--geo-bypass',
action='store_true', dest='geo_bypass', default=True,
help='Bypass geographic restriction via faking X-Forwarded-For HTTP header')
geo.add_option(
'--no-geo-bypass',
action='store_false', dest='geo_bypass', default=True,
help='Do not bypass geographic restriction via faking X-Forwarded-For HTTP header')
geo.add_option(
'--geo-bypass-country', metavar='CODE',
dest='geo_bypass_country', default=None,
help='Force bypass geographic restriction with explicitly provided two-letter ISO 3166-2 country code')
geo.add_option(
'--geo-bypass-ip-block', metavar='IP_BLOCK',
dest='geo_bypass_ip_block', default=None,
help='Force bypass geographic restriction with explicitly provided IP block in CIDR notation')
selection = optparse.OptionGroup(parser, 'Video Selection')
selection.add_option(
'--playlist-start',
dest='playliststart', metavar='NUMBER', default=1, type=int,
help='Playlist video to start at (default is %default)')
selection.add_option(
'--playlist-end',
dest='playlistend', metavar='NUMBER', default=None, type=int,
help='Playlist video to end at (default is last)')
selection.add_option(
'--playlist-items',
dest='playlist_items', metavar='ITEM_SPEC', default=None,
help='Playlist video items to download. Specify indices of the videos in the playlist separated by commas like: "--playlist-items 1,2,5,8" if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13.')
selection.add_option(
'--match-title',
dest='matchtitle', metavar='REGEX',
help='Download only matching titles (case-insensitive regex or alphanumeric sub-string)')
selection.add_option(
'--reject-title',
dest='rejecttitle', metavar='REGEX',
help='Skip download for matching titles (case-insensitive regex or alphanumeric sub-string)')
selection.add_option(
'--max-downloads',
dest='max_downloads', metavar='NUMBER', type=int, default=None,
help='Abort after downloading NUMBER files')
selection.add_option(
'--min-filesize',
metavar='SIZE', dest='min_filesize', default=None,
help='Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)')
selection.add_option(
'--max-filesize',
metavar='SIZE', dest='max_filesize', default=None,
help='Do not download any videos larger than SIZE (e.g. 50k or 44.6m)')
selection.add_option(
'--date',
metavar='DATE', dest='date', default=None,
help='Download only videos uploaded in this date')
selection.add_option(
'--datebefore',
metavar='DATE', dest='datebefore', default=None,
help='Download only videos uploaded on or before this date (i.e. inclusive)')
selection.add_option(
'--dateafter',
metavar='DATE', dest='dateafter', default=None,
help='Download only videos uploaded on or after this date (i.e. inclusive)')
selection.add_option(
'--min-views',
metavar='COUNT', dest='min_views', default=None, type=int,
help='Do not download any videos with less than COUNT views')
selection.add_option(
'--max-views',
metavar='COUNT', dest='max_views', default=None, type=int,
help='Do not download any videos with more than COUNT views')
selection.add_option(
'--match-filter',
metavar='FILTER', dest='match_filter', default=None,
help=(
'Generic video filter. '
'Specify any key (see the "OUTPUT TEMPLATE" for a list of available keys) to '
'match if the key is present, '
'!key to check if the key is not present, '
'key > NUMBER (like "comment_count > 12", also works with '
'>=, <, <=, !=, =) to compare against a number, '
'key = \'LITERAL\' (like "uploader = \'Mike Smith\'", also works with !=) '
'to match against a string literal '
'and & to require multiple matches. '
'Values which are not known are excluded unless you '
'put a question mark (?) after the operator. '
'For example, to only match videos that have been liked more than '
'100 times and disliked less than 50 times (or the dislike '
'functionality is not available at the given service), but who '
'also have a description, use --match-filter '
'"like_count > 100 & dislike_count <? 50 & description" .'
))
selection.add_option(
'--no-playlist',
action='store_true', dest='noplaylist', default=False,
help='Download only the video, if the URL refers to a video and a playlist.')
selection.add_option(
'--yes-playlist',
action='store_false', dest='noplaylist', default=False,
help='Download the playlist, if the URL refers to a video and a playlist.')
selection.add_option(
'--age-limit',
metavar='YEARS', dest='age_limit', default=None, type=int,
help='Download only videos suitable for the given age')
selection.add_option(
'--download-archive', metavar='FILE',
dest='download_archive',
help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
selection.add_option(
'--include-ads',
dest='include_ads', action='store_true',
help='Download advertisements as well (experimental)')
authentication = optparse.OptionGroup(parser, 'Authentication Options')
authentication.add_option(
'-u', '--username',
dest='username', metavar='USERNAME',
help='Login with this account ID')
authentication.add_option(
'-p', '--password',
dest='password', metavar='PASSWORD',
help='Account password. If this option is left out, youtube-dl will ask interactively.')
authentication.add_option(
'-2', '--twofactor',
dest='twofactor', metavar='TWOFACTOR',
help='Two-factor authentication code')
authentication.add_option(
'-n', '--netrc',
action='store_true', dest='usenetrc', default=False,
help='Use .netrc authentication data')
authentication.add_option(
'--video-password',
dest='videopassword', metavar='PASSWORD',
help='Video password (vimeo, youku)')
adobe_pass = optparse.OptionGroup(parser, 'Adobe Pass Options')
adobe_pass.add_option(
'--ap-mso',
dest='ap_mso', metavar='MSO',
help='Adobe Pass multiple-system operator (TV provider) identifier, use --ap-list-mso for a list of available MSOs')
adobe_pass.add_option(
'--ap-username',
dest='ap_username', metavar='USERNAME',
help='Multiple-system operator account login')
adobe_pass.add_option(
'--ap-password',
dest='ap_password', metavar='PASSWORD',
help='Multiple-system operator account password. If this option is left out, youtube-dl will ask interactively.')
adobe_pass.add_option(
'--ap-list-mso',
action='store_true', dest='ap_list_mso', default=False,
help='List all supported multiple-system operators')
video_format = optparse.OptionGroup(parser, 'Video Format Options')
video_format.add_option(
'-f', '--format',
action='store', dest='format', metavar='FORMAT', default=None,
help='Video format code, see the "FORMAT SELECTION" for all the info')
video_format.add_option(
'--all-formats',
action='store_const', dest='format', const='all',
help='Download all available video formats')
video_format.add_option(
'--prefer-free-formats',
action='store_true', dest='prefer_free_formats', default=False,
help='Prefer free video formats unless a specific one is requested')
video_format.add_option(
'-F', '--list-formats',
action='store_true', dest='listformats',
help='List all available formats of requested videos')
video_format.add_option(
'--youtube-include-dash-manifest',
action='store_true', dest='youtube_include_dash_manifest', default=True,
help=optparse.SUPPRESS_HELP)
video_format.add_option(
'--youtube-skip-dash-manifest',
action='store_false', dest='youtube_include_dash_manifest',
help='Do not download the DASH manifests and related data on YouTube videos')
video_format.add_option(
'--merge-output-format',
action='store', dest='merge_output_format', metavar='FORMAT', default=None,
help=(
'If a merge is required (e.g. bestvideo+bestaudio), '
'output to given container format. One of mkv, mp4, ogg, webm, flv. '
'Ignored if no merge is required'))
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
subtitles.add_option(
'--write-sub', '--write-srt',
action='store_true', dest='writesubtitles', default=False,
help='Write subtitle file')
subtitles.add_option(
'--write-auto-sub', '--write-automatic-sub',
action='store_true', dest='writeautomaticsub', default=False,
help='Write automatically generated subtitle file (YouTube only)')
subtitles.add_option(
'--all-subs',
action='store_true', dest='allsubtitles', default=False,
help='Download all the available subtitles of the video')
subtitles.add_option(
'--list-subs',
action='store_true', dest='listsubtitles', default=False,
help='List all available subtitles for the video')
subtitles.add_option(
'--sub-format',
action='store', dest='subtitlesformat', metavar='FORMAT', default='best',
help='Subtitle format, accepts formats preference, for example: "srt" or "ass/srt/best"')
subtitles.add_option(
'--sub-lang', '--sub-langs', '--srt-lang',
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
default=[], callback=_comma_separated_values_options_callback,
help='Languages of the subtitles to download (optional) separated by commas, use --list-subs for available language tags')
downloader = optparse.OptionGroup(parser, 'Download Options')
downloader.add_option(
'-r', '--limit-rate', '--rate-limit',
dest='ratelimit', metavar='RATE',
help='Maximum download rate in bytes per second (e.g. 50K or 4.2M)')
downloader.add_option(
'-R', '--retries',
dest='retries', metavar='RETRIES', default=10,
help='Number of retries (default is %default), or "infinite".')
downloader.add_option(
'--fragment-retries',
dest='fragment_retries', metavar='RETRIES', default=10,
help='Number of retries for a fragment (default is %default), or "infinite" (DASH, hlsnative and ISM)')
downloader.add_option(
'--skip-unavailable-fragments',
action='store_true', dest='skip_unavailable_fragments', default=True,
help='Skip unavailable fragments (DASH, hlsnative and ISM)')
downloader.add_option(
'--abort-on-unavailable-fragment',
action='store_false', dest='skip_unavailable_fragments',
help='Abort downloading when some fragment is not available')
downloader.add_option(
'--keep-fragments',
action='store_true', dest='keep_fragments', default=False,
help='Keep downloaded fragments on disk after downloading is finished; fragments are erased by default')
downloader.add_option(
'--buffer-size',
dest='buffersize', metavar='SIZE', default='1024',
help='Size of download buffer (e.g. 1024 or 16K) (default is %default)')
downloader.add_option(
'--no-resize-buffer',
action='store_true', dest='noresizebuffer', default=False,
help='Do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.')
downloader.add_option(
'--http-chunk-size',
dest='http_chunk_size', metavar='SIZE', default=None,
help='Size of a chunk for chunk-based HTTP downloading (e.g. 10485760 or 10M) (default is disabled). '
'May be useful for bypassing bandwidth throttling imposed by a webserver (experimental)')
downloader.add_option(
'--test',
action='store_true', dest='test', default=False,
help=optparse.SUPPRESS_HELP)
downloader.add_option(
'--playlist-reverse',
action='store_true',
help='Download playlist videos in reverse order')
downloader.add_option(
'--playlist-random',
action='store_true',
help='Download playlist videos in random order')
downloader.add_option(
'--xattr-set-filesize',
dest='xattr_set_filesize', action='store_true',
help='Set file xattribute ytdl.filesize with expected file size')
downloader.add_option(
'--hls-prefer-native',
dest='hls_prefer_native', action='store_true', default=None,
help='Use the native HLS downloader instead of ffmpeg')
downloader.add_option(
'--hls-prefer-ffmpeg',
dest='hls_prefer_native', action='store_false', default=None,
help='Use ffmpeg instead of the native HLS downloader')
downloader.add_option(
'--hls-use-mpegts',
dest='hls_use_mpegts', action='store_true',
help='Use the mpegts container for HLS videos, allowing to play the '
'video while downloading (some players may not be able to play it)')
downloader.add_option(
'--external-downloader',
dest='external_downloader', metavar='COMMAND',
help='Use the specified external downloader. '
'Currently supports %s' % ','.join(list_external_downloaders()))
downloader.add_option(
'--external-downloader-args',
dest='external_downloader_args', metavar='ARGS',
help='Give these arguments to the external downloader')
workarounds = optparse.OptionGroup(parser, 'Workarounds')
workarounds.add_option(
'--encoding',
dest='encoding', metavar='ENCODING',
help='Force the specified encoding (experimental)')
workarounds.add_option(
'--no-check-certificate',
action='store_true', dest='no_check_certificate', default=False,
help='Suppress HTTPS certificate validation')
workarounds.add_option(
'--prefer-insecure',
'--prefer-unsecure', action='store_true', dest='prefer_insecure',
help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
workarounds.add_option(
'--user-agent',
metavar='UA', dest='user_agent',
help='Specify a custom user agent')
workarounds.add_option(
'--referer',
metavar='URL', dest='referer', default=None,
help='Specify a custom referer, use if the video access is restricted to one domain',
)
workarounds.add_option(
'--add-header',
metavar='FIELD:VALUE', dest='headers', action='append',
help='Specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
)
workarounds.add_option(
'--bidi-workaround',
dest='bidi_workaround', action='store_true',
help='Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
workarounds.add_option(
'--sleep-interval', '--min-sleep-interval', metavar='SECONDS',
dest='sleep_interval', type=float,
help=(
'Number of seconds to sleep before each download when used alone '
'or a lower bound of a range for randomized sleep before each download '
'(minimum possible number of seconds to sleep) when used along with '
'--max-sleep-interval.'))
workarounds.add_option(
'--max-sleep-interval', metavar='SECONDS',
dest='max_sleep_interval', type=float,
help=(
'Upper bound of a range for randomized sleep before each download '
'(maximum possible number of seconds to sleep). Must only be used '
'along with --min-sleep-interval.'))
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
verbosity.add_option(
'-q', '--quiet',
action='store_true', dest='quiet', default=False,
help='Activate quiet mode')
verbosity.add_option(
'--no-warnings',
dest='no_warnings', action='store_true', default=False,
help='Ignore warnings')
verbosity.add_option(
'-s', '--simulate',
action='store_true', dest='simulate', default=False,
help='Do not download the video and do not write anything to disk')
verbosity.add_option(
'--skip-download',
action='store_true', dest='skip_download', default=False,
help='Do not download the video')
verbosity.add_option(
'-g', '--get-url',
action='store_true', dest='geturl', default=False,
help='Simulate, quiet but print URL')
verbosity.add_option(
'-e', '--get-title',
action='store_true', dest='gettitle', default=False,
help='Simulate, quiet but print title')
verbosity.add_option(
'--get-id',
action='store_true', dest='getid', default=False,
help='Simulate, quiet but print id')
verbosity.add_option(
'--get-thumbnail',
action='store_true', dest='getthumbnail', default=False,
help='Simulate, quiet but print thumbnail URL')
verbosity.add_option(
'--get-description',
action='store_true', dest='getdescription', default=False,
help='Simulate, quiet but print video description')
verbosity.add_option(
'--get-duration',
action='store_true', dest='getduration', default=False,
help='Simulate, quiet but print video length')
verbosity.add_option(
'--get-filename',
action='store_true', dest='getfilename', default=False,
help='Simulate, quiet but print output filename')
verbosity.add_option(
'--get-format',
action='store_true', dest='getformat', default=False,
help='Simulate, quiet but print output format')
verbosity.add_option(
'-j', '--dump-json',
action='store_true', dest='dumpjson', default=False,
help='Simulate, quiet but print JSON information. See the "OUTPUT TEMPLATE" for a description of available keys.')
verbosity.add_option(
'-J', '--dump-single-json',
action='store_true', dest='dump_single_json', default=False,
help='Simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist information in a single line.')
verbosity.add_option(
'--print-json',
action='store_true', dest='print_json', default=False,
help='Be quiet and print the video information as JSON (video is still being downloaded).',
)
verbosity.add_option(
'--newline',
action='store_true', dest='progress_with_newline', default=False,
help='Output progress bar as new lines')
verbosity.add_option(
'--no-progress',
action='store_true', dest='noprogress', default=False,
help='Do not print progress bar')
verbosity.add_option(
'--console-title',
action='store_true', dest='consoletitle', default=False,
help='Display progress in console titlebar')
verbosity.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='Print various debugging information')
verbosity.add_option(
'--dump-pages', '--dump-intermediate-pages',
action='store_true', dest='dump_intermediate_pages', default=False,
help='Print downloaded pages encoded using base64 to debug problems (very verbose)')
verbosity.add_option(
'--write-pages',
action='store_true', dest='write_pages', default=False,
help='Write downloaded intermediary pages to files in the current directory to debug problems')
verbosity.add_option(
'--youtube-print-sig-code',
action='store_true', dest='youtube_print_sig_code', default=False,
help=optparse.SUPPRESS_HELP)
verbosity.add_option(
'--print-traffic', '--dump-headers',
dest='debug_printtraffic', action='store_true', default=False,
help='Display sent and read HTTP traffic')
verbosity.add_option(
'-C', '--call-home',
dest='call_home', action='store_true', default=False,
help='Contact the youtube-dl server for debugging')
verbosity.add_option(
'--no-call-home',
dest='call_home', action='store_false', default=False,
help='Do NOT contact the youtube-dl server for debugging')
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
filesystem.add_option(
'-a', '--batch-file',
dest='batchfile', metavar='FILE',
help="File containing URLs to download ('-' for stdin), one URL per line. "
"Lines starting with '#', ';' or ']' are considered as comments and ignored.")
filesystem.add_option(
'--id', default=False,
action='store_true', dest='useid', help='Use only video ID in file name')
filesystem.add_option(
'-o', '--output',
dest='outtmpl', metavar='TEMPLATE',
help=('Output filename template, see the "OUTPUT TEMPLATE" for all the info'))
filesystem.add_option(
'--output-na-placeholder',
dest='outtmpl_na_placeholder', metavar='PLACEHOLDER', default='NA',
help=('Placeholder value for unavailable meta fields in output filename template (default is "%default")'))
filesystem.add_option(
'--autonumber-size',
dest='autonumber_size', metavar='NUMBER', type=int,
help=optparse.SUPPRESS_HELP)
filesystem.add_option(
'--autonumber-start',
dest='autonumber_start', metavar='NUMBER', default=1, type=int,
help='Specify the start value for %(autonumber)s (default is %default)')
filesystem.add_option(
'--restrict-filenames',
action='store_true', dest='restrictfilenames', default=False,
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames')
filesystem.add_option(
'-A', '--auto-number',
action='store_true', dest='autonumber', default=False,
help=optparse.SUPPRESS_HELP)
filesystem.add_option(
'-t', '--title',
action='store_true', dest='usetitle', default=False,
help=optparse.SUPPRESS_HELP)
filesystem.add_option(
'-l', '--literal', default=False,
action='store_true', dest='usetitle',
help=optparse.SUPPRESS_HELP)
filesystem.add_option(
'-w', '--no-overwrites',
action='store_true', dest='nooverwrites', default=False,
help='Do not overwrite files')
filesystem.add_option(
'-c', '--continue',
action='store_true', dest='continue_dl', default=True,
help='Force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.')
filesystem.add_option(
'--no-continue',
action='store_false', dest='continue_dl',
help='Do not resume partially downloaded files (restart from beginning)')
filesystem.add_option(
'--no-part',
action='store_true', dest='nopart', default=False,
help='Do not use .part files - write directly into output file')
filesystem.add_option(
'--no-mtime',
action='store_false', dest='updatetime', default=True,
help='Do not use the Last-modified header to set the file modification time')
filesystem.add_option(
'--write-description',
action='store_true', dest='writedescription', default=False,
help='Write video description to a .description file')
filesystem.add_option(
'--write-info-json',
action='store_true', dest='writeinfojson', default=False,
help='Write video metadata to a .info.json file')
filesystem.add_option(
'--write-annotations',
action='store_true', dest='writeannotations', default=False,
help='Write video annotations to a .annotations.xml file')
filesystem.add_option(
'--load-info-json', '--load-info',
dest='load_info_filename', metavar='FILE',
help='JSON file containing the video information (created with the "--write-info-json" option)')
filesystem.add_option(
'--cookies',
dest='cookiefile', metavar='FILE',
help='File to read cookies from and dump cookie jar in')
filesystem.add_option(
'--cache-dir', dest='cachedir', default=None, metavar='DIR',
help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
filesystem.add_option(
'--no-cache-dir', action='store_const', const=False, dest='cachedir',
help='Disable filesystem caching')
filesystem.add_option(
'--rm-cache-dir',
action='store_true', dest='rm_cachedir',
help='Delete all filesystem cache files')
thumbnail = optparse.OptionGroup(parser, 'Thumbnail Options')
thumbnail.add_option(
'--write-thumbnail',
action='store_true', dest='writethumbnail', default=False,
help='Write thumbnail image to disk')
thumbnail.add_option(
'--write-all-thumbnails',
action='store_true', dest='write_all_thumbnails', default=False,
help='Write all thumbnail image formats to disk')
thumbnail.add_option(
'--list-thumbnails',
action='store_true', dest='list_thumbnails', default=False,
help='Simulate and list all available thumbnail formats')
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
postproc.add_option(
'-x', '--extract-audio',
action='store_true', dest='extractaudio', default=False,
help='Convert video files to audio-only files (requires ffmpeg/avconv and ffprobe/avprobe)')
postproc.add_option(
'--audio-format', metavar='FORMAT', dest='audioformat', default='best',
help='Specify audio format: "best", "aac", "flac", "mp3", "m4a", "opus", "vorbis", or "wav"; "%default" by default; No effect without -x')
postproc.add_option(
'--audio-quality', metavar='QUALITY',
dest='audioquality', default='5',
help='Specify ffmpeg/avconv audio quality, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default %default)')
postproc.add_option(
'--recode-video',
metavar='FORMAT', dest='recodevideo', default=None,
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv|avi)')
postproc.add_option(
'--postprocessor-args',
dest='postprocessor_args', metavar='ARGS',
help='Give these arguments to the postprocessor')
postproc.add_option(
'-k', '--keep-video',
action='store_true', dest='keepvideo', default=False,
help='Keep the video file on disk after the post-processing; the video is erased by default')
postproc.add_option(
'--no-post-overwrites',
action='store_true', dest='nopostoverwrites', default=False,
help='Do not overwrite post-processed files; the post-processed files are overwritten by default')
postproc.add_option(
'--embed-subs',
action='store_true', dest='embedsubtitles', default=False,
help='Embed subtitles in the video (only for mp4, webm and mkv videos)')
postproc.add_option(
'--embed-thumbnail',
action='store_true', dest='embedthumbnail', default=False,
help='Embed thumbnail in the audio as cover art')
postproc.add_option(
'--add-metadata',
action='store_true', dest='addmetadata', default=False,
help='Write metadata to the video file')
postproc.add_option(
'--metadata-from-title',
metavar='FORMAT', dest='metafromtitle',
help='Parse additional metadata like song title / artist from the video title. '
'The format syntax is the same as --output. Regular expression with '
'named capture groups may also be used. '
'The parsed parameters replace existing values. '
'Example: --metadata-from-title "%(artist)s - %(title)s" matches a title like '
'"Coldplay - Paradise". '
'Example (regex): --metadata-from-title "(?P<artist>.+?) - (?P<title>.+)"')
postproc.add_option(
'--xattrs',
action='store_true', dest='xattrs', default=False,
help='Write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
postproc.add_option(
'--fixup',
metavar='POLICY', dest='fixup', default='detect_or_warn',
help='Automatically correct known faults of the file. '
'One of never (do nothing), warn (only emit a warning), '
'detect_or_warn (the default; fix file if we can, warn otherwise)')
postproc.add_option(
'--prefer-avconv',
action='store_false', dest='prefer_ffmpeg',
help='Prefer avconv over ffmpeg for running the postprocessors')
postproc.add_option(
'--prefer-ffmpeg',
action='store_true', dest='prefer_ffmpeg',
help='Prefer ffmpeg over avconv for running the postprocessors (default)')
postproc.add_option(
'--ffmpeg-location', '--avconv-location', metavar='PATH',
dest='ffmpeg_location',
help='Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory.')
postproc.add_option(
'--exec',
metavar='CMD', dest='exec_cmd',
help='Execute a command on the file after downloading and post-processing, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'')
postproc.add_option(
'--convert-subs', '--convert-subtitles',
metavar='FORMAT', dest='convertsubtitles', default=None,
help='Convert the subtitles to other format (currently supported: srt|ass|vtt|lrc)')
parser.add_option_group(general)
parser.add_option_group(network)
parser.add_option_group(geo)
parser.add_option_group(selection)
parser.add_option_group(downloader)
parser.add_option_group(filesystem)
parser.add_option_group(thumbnail)
parser.add_option_group(verbosity)
parser.add_option_group(workarounds)
parser.add_option_group(video_format)
parser.add_option_group(subtitles)
parser.add_option_group(authentication)
parser.add_option_group(adobe_pass)
parser.add_option_group(postproc)
if overrideArguments is not None:
opts, args = parser.parse_args(overrideArguments)
if opts.verbose:
write_string('[debug] Override config: ' + repr(overrideArguments) + '\n')
else:
def compat_conf(conf):
if sys.version_info < (3,):
return [a.decode(preferredencoding(), 'replace') for a in conf]
return conf
command_line_conf = compat_conf(sys.argv[1:])
opts, args = parser.parse_args(command_line_conf)
system_conf = user_conf = custom_conf = []
if '--config-location' in command_line_conf:
location = compat_expanduser(opts.config_location)
if os.path.isdir(location):
location = os.path.join(location, 'youtube-dl.conf')
if not os.path.exists(location):
parser.error('config-location %s does not exist.' % location)
custom_conf = _readOptions(location)
elif '--ignore-config' in command_line_conf:
pass
else:
system_conf = _readOptions('/etc/youtube-dl.conf')
if '--ignore-config' not in system_conf:
user_conf = _readUserConf()
argv = system_conf + user_conf + custom_conf + command_line_conf
opts, args = parser.parse_args(argv)
if opts.verbose:
for conf_label, conf in (
('System config', system_conf),
('User config', user_conf),
('Custom config', custom_conf),
('Command-line args', command_line_conf)):
write_string('[debug] %s: %s\n' % (conf_label, repr(_hide_login_info(conf))))
return parser, opts, args
| def parseOpts(overrideArguments=None):
def _readOptions(filename_bytes, default=[]):
try:
optionf = open(filename_bytes)
except IOError:
return default # silently skip if file is not present
try:
# FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
contents = optionf.read()
if sys.version_info < (3,):
contents = contents.decode(preferredencoding())
res = compat_shlex_split(contents, comments=True)
finally:
optionf.close()
return res
def _readUserConf():
xdg_config_home = compat_getenv('XDG_CONFIG_HOME')
if xdg_config_home:
userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
else:
userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl.conf')
userConf = _readOptions(userConfFile, None)
if userConf is None:
appdata_dir = compat_getenv('appdata')
if appdata_dir:
userConf = _readOptions(
os.path.join(appdata_dir, 'youtube-dl', 'config'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(compat_expanduser('~'), 'youtube-dl.conf'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(compat_expanduser('~'), 'youtube-dl.conf.txt'),
default=None)
if userConf is None:
userConf = []
return userConf
def _format_option_string(option):
''' ('-o', '--option') -> -o, --format METAVAR'''
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, ', ')
if option.takes_value():
opts.append(' %s' % option.metavar)
return ''.join(opts)
def _comma_separated_values_options_callback(option, opt_str, value, parser):
setattr(parser.values, option.dest, value.split(','))
# No need to wrap help messages if we're on a wide console
columns = compat_get_terminal_size().columns
max_width = columns if columns else 80
max_help_position = 80
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
fmt.format_option_strings = _format_option_string
kw = {
'version': __version__,
'formatter': fmt,
'usage': '%prog [OPTIONS] URL [URL...]',
'conflict_handler': 'resolve',
}
parser = optparse.OptionParser(**compat_kwargs(kw))
general = optparse.OptionGroup(parser, 'General Options')
general.add_option(
'-h', '--help',
action='help',
help='Print this help text and exit')
general.add_option(
'--version',
action='version',
help='Print program version and exit')
general.add_option(
'-U', '--update',
action='store_true', dest='update_self',
help='Update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
general.add_option(
'-i', '--ignore-errors',
action='store_true', dest='ignoreerrors', default=False,
help='Continue on download errors, for example to skip unavailable videos in a playlist')
general.add_option(
'--abort-on-error',
action='store_false', dest='ignoreerrors',
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
general.add_option(
'--dump-user-agent',
action='store_true', dest='dump_user_agent', default=False,
help='Display the current browser identification')
general.add_option(
'--list-extractors',
action='store_true', dest='list_extractors', default=False,
help='List all supported extractors')
general.add_option(
'--extractor-descriptions',
action='store_true', dest='list_extractor_descriptions', default=False,
help='Output descriptions of all supported extractors')
general.add_option(
'--force-generic-extractor',
action='store_true', dest='force_generic_extractor', default=False,
help='Force extraction to use the generic extractor')
general.add_option(
'--default-search',
dest='default_search', metavar='PREFIX',
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.')
general.add_option(
'--ignore-config',
action='store_true',
help='Do not read configuration files. '
'When given in the global configuration file /etc/youtube-dl.conf: '
'Do not read the user configuration in ~/.config/youtube-dl/config '
'(%APPDATA%/youtube-dl/config.txt on Windows)')
general.add_option(
'--config-location',
dest='config_location', metavar='PATH',
help='Location of the configuration file; either the path to the config or its containing directory.')
general.add_option(
'--flat-playlist',
action='store_const', dest='extract_flat', const='in_playlist',
default=False,
help='Do not extract the videos of a playlist, only list them.')
general.add_option(
'--mark-watched',
action='store_true', dest='mark_watched', default=False,
help='Mark videos watched (if supported for site)')
general.add_option(
'--no-mark-watched',
action='store_false', dest='mark_watched', default=False,
help='Do not mark videos watched')
general.add_option(
'--no-color', '--no-colors',
action='store_true', dest='no_color',
default=False,
help='Do not emit color codes in output')
network = optparse.OptionGroup(parser, 'Network Options')
network.add_option(
'--proxy', dest='proxy',
default=None, metavar='URL',
help='Use the specified HTTP/HTTPS/SOCKS proxy. To enable '
'SOCKS proxy, specify a proper scheme. For example '
'socks5://127.0.0.1:1080/. Pass in an empty string (--proxy "") '
'for direct connection')
network.add_option(
'--socket-timeout',
dest='socket_timeout', type=float, default=None, metavar='SECONDS',
help='Time to wait before giving up, in seconds')
network.add_option(
'--source-address',
metavar='IP', dest='source_address', default=None,
help='Client-side IP address to bind to',
)
network.add_option(
'-4', '--force-ipv4',
action='store_const', const='0.0.0.0', dest='source_address',
help='Make all connections via IPv4',
)
network.add_option(
'-6', '--force-ipv6',
action='store_const', const='::', dest='source_address',
help='Make all connections via IPv6',
)
geo = optparse.OptionGroup(parser, 'Geo Restriction')
geo.add_option(
'--geo-verification-proxy',
dest='geo_verification_proxy', default=None, metavar='URL',
help='Use this proxy to verify the IP address for some geo-restricted sites. '
'The default proxy specified by --proxy (or none, if the option is not present) is used for the actual downloading.')
geo.add_option(
'--cn-verification-proxy',
dest='cn_verification_proxy', default=None, metavar='URL',
help=optparse.SUPPRESS_HELP)
geo.add_option(
'--geo-bypass',
action='store_true', dest='geo_bypass', default=True,
help='Bypass geographic restriction via faking X-Forwarded-For HTTP header')
geo.add_option(
'--no-geo-bypass',
action='store_false', dest='geo_bypass', default=True,
help='Do not bypass geographic restriction via faking X-Forwarded-For HTTP header')
geo.add_option(
'--geo-bypass-country', metavar='CODE',
dest='geo_bypass_country', default=None,
help='Force bypass geographic restriction with explicitly provided two-letter ISO 3166-2 country code')
geo.add_option(
'--geo-bypass-ip-block', metavar='IP_BLOCK',
dest='geo_bypass_ip_block', default=None,
help='Force bypass geographic restriction with explicitly provided IP block in CIDR notation')
selection = optparse.OptionGroup(parser, 'Video Selection')
selection.add_option(
'--playlist-start',
dest='playliststart', metavar='NUMBER', default=1, type=int,
help='Playlist video to start at (default is %default)')
selection.add_option(
'--playlist-end',
dest='playlistend', metavar='NUMBER', default=None, type=int,
help='Playlist video to end at (default is last)')
selection.add_option(
'--playlist-items',
dest='playlist_items', metavar='ITEM_SPEC', default=None,
help='Playlist video items to download. Specify indices of the videos in the playlist separated by commas like: "--playlist-items 1,2,5,8" if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13.')
selection.add_option(
'--match-title',
dest='matchtitle', metavar='REGEX',
help='Download only matching titles (case-insensitive regex or alphanumeric sub-string)')
selection.add_option(
'--reject-title',
dest='rejecttitle', metavar='REGEX',
help='Skip download for matching titles (case-insensitive regex or alphanumeric sub-string)')
selection.add_option(
'--max-downloads',
dest='max_downloads', metavar='NUMBER', type=int, default=None,
help='Abort after downloading NUMBER files')
selection.add_option(
'--min-filesize',
metavar='SIZE', dest='min_filesize', default=None,
help='Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)')
selection.add_option(
'--max-filesize',
metavar='SIZE', dest='max_filesize', default=None,
help='Do not download any videos larger than SIZE (e.g. 50k or 44.6m)')
selection.add_option(
'--date',
metavar='DATE', dest='date', default=None,
help='Download only videos uploaded in this date')
selection.add_option(
'--datebefore',
metavar='DATE', dest='datebefore', default=None,
help='Download only videos uploaded on or before this date (i.e. inclusive)')
selection.add_option(
'--dateafter',
metavar='DATE', dest='dateafter', default=None,
help='Download only videos uploaded on or after this date (i.e. inclusive)')
selection.add_option(
'--min-views',
metavar='COUNT', dest='min_views', default=None, type=int,
help='Do not download any videos with less than COUNT views')
selection.add_option(
'--max-views',
metavar='COUNT', dest='max_views', default=None, type=int,
help='Do not download any videos with more than COUNT views')
selection.add_option(
'--match-filter',
metavar='FILTER', dest='match_filter', default=None,
help=(
'Generic video filter. '
'Specify any key (see the "OUTPUT TEMPLATE" for a list of available keys) to '
'match if the key is present, '
'!key to check if the key is not present, '
'key > NUMBER (like "comment_count > 12", also works with '
'>=, <, <=, !=, =) to compare against a number, '
'key = \'LITERAL\' (like "uploader = \'Mike Smith\'", also works with !=) '
'to match against a string literal '
'and & to require multiple matches. '
'Values which are not known are excluded unless you '
'put a question mark (?) after the operator. '
'For example, to only match videos that have been liked more than '
'100 times and disliked less than 50 times (or the dislike '
'functionality is not available at the given service), but who '
'also have a description, use --match-filter '
'"like_count > 100 & dislike_count <? 50 & description" .'
))
selection.add_option(
'--no-playlist',
action='store_true', dest='noplaylist', default=False,
help='Download only the video, if the URL refers to a video and a playlist.')
selection.add_option(
'--yes-playlist',
action='store_false', dest='noplaylist', default=False,
help='Download the playlist, if the URL refers to a video and a playlist.')
selection.add_option(
'--age-limit',
metavar='YEARS', dest='age_limit', default=None, type=int,
help='Download only videos suitable for the given age')
selection.add_option(
'--download-archive', metavar='FILE',
dest='download_archive',
help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
selection.add_option(
'--include-ads',
dest='include_ads', action='store_true',
help='Download advertisements as well (experimental)')
authentication = optparse.OptionGroup(parser, 'Authentication Options')
authentication.add_option(
'-u', '--username',
dest='username', metavar='USERNAME',
help='Login with this account ID')
authentication.add_option(
'-p', '--password',
dest='password', metavar='PASSWORD',
help='Account password. If this option is left out, youtube-dl will ask interactively.')
authentication.add_option(
'-2', '--twofactor',
dest='twofactor', metavar='TWOFACTOR',
help='Two-factor authentication code')
authentication.add_option(
'-n', '--netrc',
action='store_true', dest='usenetrc', default=False,
help='Use .netrc authentication data')
authentication.add_option(
'--video-password',
dest='videopassword', metavar='PASSWORD',
help='Video password (vimeo, youku)')
adobe_pass = optparse.OptionGroup(parser, 'Adobe Pass Options')
adobe_pass.add_option(
'--ap-mso',
dest='ap_mso', metavar='MSO',
help='Adobe Pass multiple-system operator (TV provider) identifier, use --ap-list-mso for a list of available MSOs')
adobe_pass.add_option(
'--ap-username',
dest='ap_username', metavar='USERNAME',
help='Multiple-system operator account login')
adobe_pass.add_option(
'--ap-password',
dest='ap_password', metavar='PASSWORD',
help='Multiple-system operator account password. If this option is left out, youtube-dl will ask interactively.')
adobe_pass.add_option(
'--ap-list-mso',
action='store_true', dest='ap_list_mso', default=False,
help='List all supported multiple-system operators')
video_format = optparse.OptionGroup(parser, 'Video Format Options')
video_format.add_option(
'-f', '--format',
action='store', dest='format', metavar='FORMAT', default=None,
help='Video format code, see the "FORMAT SELECTION" for all the info')
video_format.add_option(
'--all-formats',
action='store_const', dest='format', const='all',
help='Download all available video formats')
video_format.add_option(
'--prefer-free-formats',
action='store_true', dest='prefer_free_formats', default=False,
help='Prefer free video formats unless a specific one is requested')
video_format.add_option(
'-F', '--list-formats',
action='store_true', dest='listformats',
help='List all available formats of requested videos')
video_format.add_option(
'--youtube-include-dash-manifest',
action='store_true', dest='youtube_include_dash_manifest', default=True,
help=optparse.SUPPRESS_HELP)
video_format.add_option(
'--youtube-skip-dash-manifest',
action='store_false', dest='youtube_include_dash_manifest',
help='Do not download the DASH manifests and related data on YouTube videos')
video_format.add_option(
'--merge-output-format',
action='store', dest='merge_output_format', metavar='FORMAT', default=None,
help=(
'If a merge is required (e.g. bestvideo+bestaudio), '
'output to given container format. One of mkv, mp4, ogg, webm, flv. '
'Ignored if no merge is required'))
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
subtitles.add_option(
'--write-sub', '--write-srt',
action='store_true', dest='writesubtitles', default=False,
help='Write subtitle file')
subtitles.add_option(
'--write-auto-sub', '--write-automatic-sub',
action='store_true', dest='writeautomaticsub', default=False,
help='Write automatically generated subtitle file (YouTube only)')
subtitles.add_option(
'--all-subs',
action='store_true', dest='allsubtitles', default=False,
help='Download all the available subtitles of the video')
subtitles.add_option(
'--list-subs',
action='store_true', dest='listsubtitles', default=False,
help='List all available subtitles for the video')
subtitles.add_option(
'--sub-format',
action='store', dest='subtitlesformat', metavar='FORMAT', default='best',
help='Subtitle format, accepts formats preference, for example: "srt" or "ass/srt/best"')
subtitles.add_option(
'--sub-lang', '--sub-langs', '--srt-lang',
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
default=[], callback=_comma_separated_values_options_callback,
help='Languages of the subtitles to download (optional) separated by commas, use --list-subs for available language tags')
downloader = optparse.OptionGroup(parser, 'Download Options')
downloader.add_option(
'-r', '--limit-rate', '--rate-limit',
dest='ratelimit', metavar='RATE',
help='Maximum download rate in bytes per second (e.g. 50K or 4.2M)')
downloader.add_option(
'-R', '--retries',
dest='retries', metavar='RETRIES', default=10,
help='Number of retries (default is %default), or "infinite".')
downloader.add_option(
'--fragment-retries',
dest='fragment_retries', metavar='RETRIES', default=10,
help='Number of retries for a fragment (default is %default), or "infinite" (DASH, hlsnative and ISM)')
downloader.add_option(
'--skip-unavailable-fragments',
action='store_true', dest='skip_unavailable_fragments', default=True,
help='Skip unavailable fragments (DASH, hlsnative and ISM)')
downloader.add_option(
'--abort-on-unavailable-fragment',
action='store_false', dest='skip_unavailable_fragments',
help='Abort downloading when some fragment is not available')
downloader.add_option(
'--keep-fragments',
action='store_true', dest='keep_fragments', default=False,
help='Keep downloaded fragments on disk after downloading is finished; fragments are erased by default')
downloader.add_option(
'--buffer-size',
dest='buffersize', metavar='SIZE', default='1024',
help='Size of download buffer (e.g. 1024 or 16K) (default is %default)')
downloader.add_option(
'--no-resize-buffer',
action='store_true', dest='noresizebuffer', default=False,
help='Do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.')
downloader.add_option(
'--http-chunk-size',
dest='http_chunk_size', metavar='SIZE', default=None,
help='Size of a chunk for chunk-based HTTP downloading (e.g. 10485760 or 10M) (default is disabled). '
'May be useful for bypassing bandwidth throttling imposed by a webserver (experimental)')
downloader.add_option(
'--test',
action='store_true', dest='test', default=False,
help=optparse.SUPPRESS_HELP)
downloader.add_option(
'--playlist-reverse',
action='store_true',
help='Download playlist videos in reverse order')
downloader.add_option(
'--playlist-random',
action='store_true',
help='Download playlist videos in random order')
downloader.add_option(
'--xattr-set-filesize',
dest='xattr_set_filesize', action='store_true',
help='Set file xattribute ytdl.filesize with expected file size')
downloader.add_option(
'--hls-prefer-native',
dest='hls_prefer_native', action='store_true', default=None,
help='Use the native HLS downloader instead of ffmpeg')
downloader.add_option(
'--hls-prefer-ffmpeg',
dest='hls_prefer_native', action='store_false', default=None,
help='Use ffmpeg instead of the native HLS downloader')
downloader.add_option(
'--hls-use-mpegts',
dest='hls_use_mpegts', action='store_true',
help='Use the mpegts container for HLS videos, allowing to play the '
'video while downloading (some players may not be able to play it)')
downloader.add_option(
'--external-downloader',
dest='external_downloader', metavar='COMMAND',
help='Use the specified external downloader. '
'Currently supports %s' % ','.join(list_external_downloaders()))
downloader.add_option(
'--external-downloader-args',
dest='external_downloader_args', metavar='ARGS',
help='Give these arguments to the external downloader')
workarounds = optparse.OptionGroup(parser, 'Workarounds')
workarounds.add_option(
'--encoding',
dest='encoding', metavar='ENCODING',
help='Force the specified encoding (experimental)')
workarounds.add_option(
'--no-check-certificate',
action='store_true', dest='no_check_certificate', default=False,
help='Suppress HTTPS certificate validation')
workarounds.add_option(
'--prefer-insecure',
'--prefer-unsecure', action='store_true', dest='prefer_insecure',
help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
workarounds.add_option(
'--user-agent',
metavar='UA', dest='user_agent',
help='Specify a custom user agent')
workarounds.add_option(
'--referer',
metavar='URL', dest='referer', default=None,
help='Specify a custom referer, use if the video access is restricted to one domain',
)
workarounds.add_option(
'--add-header',
metavar='FIELD:VALUE', dest='headers', action='append',
help='Specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
)
workarounds.add_option(
'--bidi-workaround',
dest='bidi_workaround', action='store_true',
help='Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
workarounds.add_option(
'--sleep-interval', '--min-sleep-interval', metavar='SECONDS',
dest='sleep_interval', type=float,
help=(
'Number of seconds to sleep before each download when used alone '
'or a lower bound of a range for randomized sleep before each download '
'(minimum possible number of seconds to sleep) when used along with '
'--max-sleep-interval.'))
workarounds.add_option(
'--max-sleep-interval', metavar='SECONDS',
dest='max_sleep_interval', type=float,
help=(
'Upper bound of a range for randomized sleep before each download '
'(maximum possible number of seconds to sleep). Must only be used '
'along with --min-sleep-interval.'))
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
verbosity.add_option(
'-q', '--quiet',
action='store_true', dest='quiet', default=False,
help='Activate quiet mode')
verbosity.add_option(
'--no-warnings',
dest='no_warnings', action='store_true', default=False,
help='Ignore warnings')
verbosity.add_option(
'-s', '--simulate',
action='store_true', dest='simulate', default=False,
help='Do not download the video and do not write anything to disk')
verbosity.add_option(
'--skip-download',
action='store_true', dest='skip_download', default=False,
help='Do not download the video')
verbosity.add_option(
'-g', '--get-url',
action='store_true', dest='geturl', default=False,
help='Simulate, quiet but print URL')
verbosity.add_option(
'-e', '--get-title',
action='store_true', dest='gettitle', default=False,
help='Simulate, quiet but print title')
verbosity.add_option(
'--get-id',
action='store_true', dest='getid', default=False,
help='Simulate, quiet but print id')
verbosity.add_option(
'--get-thumbnail',
action='store_true', dest='getthumbnail', default=False,
help='Simulate, quiet but print thumbnail URL')
verbosity.add_option(
'--get-description',
action='store_true', dest='getdescription', default=False,
help='Simulate, quiet but print video description')
verbosity.add_option(
'--get-duration',
action='store_true', dest='getduration', default=False,
help='Simulate, quiet but print video length')
verbosity.add_option(
'--get-filename',
action='store_true', dest='getfilename', default=False,
help='Simulate, quiet but print output filename')
verbosity.add_option(
'--get-format',
action='store_true', dest='getformat', default=False,
help='Simulate, quiet but print output format')
verbosity.add_option(
'-j', '--dump-json',
action='store_true', dest='dumpjson', default=False,
help='Simulate, quiet but print JSON information. See the "OUTPUT TEMPLATE" for a description of available keys.')
verbosity.add_option(
'-J', '--dump-single-json',
action='store_true', dest='dump_single_json', default=False,
help='Simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist information in a single line.')
verbosity.add_option(
'--print-json',
action='store_true', dest='print_json', default=False,
help='Be quiet and print the video information as JSON (video is still being downloaded).',
)
verbosity.add_option(
'--newline',
action='store_true', dest='progress_with_newline', default=False,
help='Output progress bar as new lines')
verbosity.add_option(
'--no-progress',
action='store_true', dest='noprogress', default=False,
help='Do not print progress bar')
verbosity.add_option(
'--console-title',
action='store_true', dest='consoletitle', default=False,
help='Display progress in console titlebar')
verbosity.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='Print various debugging information')
verbosity.add_option(
'--dump-pages', '--dump-intermediate-pages',
action='store_true', dest='dump_intermediate_pages', default=False,
help='Print downloaded pages encoded using base64 to debug problems (very verbose)')
verbosity.add_option(
'--write-pages',
action='store_true', dest='write_pages', default=False,
help='Write downloaded intermediary pages to files in the current directory to debug problems')
verbosity.add_option(
'--youtube-print-sig-code',
action='store_true', dest='youtube_print_sig_code', default=False,
help=optparse.SUPPRESS_HELP)
verbosity.add_option(
'--print-traffic', '--dump-headers',
dest='debug_printtraffic', action='store_true', default=False,
help='Display sent and read HTTP traffic')
verbosity.add_option(
'-C', '--call-home',
dest='call_home', action='store_true', default=False,
help='Contact the youtube-dl server for debugging')
verbosity.add_option(
'--no-call-home',
dest='call_home', action='store_false', default=False,
help='Do NOT contact the youtube-dl server for debugging')
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
filesystem.add_option(
'-a', '--batch-file',
dest='batchfile', metavar='FILE',
help="File containing URLs to download ('-' for stdin), one URL per line. "
"Lines starting with '#', ';' or ']' are considered as comments and ignored.")
filesystem.add_option(
'--id', default=False,
action='store_true', dest='useid', help='Use only video ID in file name')
filesystem.add_option(
'-o', '--output',
dest='outtmpl', metavar='TEMPLATE',
help=('Output filename template, see the "OUTPUT TEMPLATE" for all the info'))
filesystem.add_option(
'--output-na-placeholder',
dest='outtmpl_na_placeholder', metavar='PLACEHOLDER', default='NA',
help=('Placeholder value for unavailable meta fields in output filename template (default is "%default")'))
filesystem.add_option(
'--autonumber-size',
dest='autonumber_size', metavar='NUMBER', type=int,
help=optparse.SUPPRESS_HELP)
filesystem.add_option(
'--autonumber-start',
dest='autonumber_start', metavar='NUMBER', default=1, type=int,
help='Specify the start value for %(autonumber)s (default is %default)')
filesystem.add_option(
'--restrict-filenames',
action='store_true', dest='restrictfilenames', default=False,
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames')
filesystem.add_option(
'-A', '--auto-number',
action='store_true', dest='autonumber', default=False,
help=optparse.SUPPRESS_HELP)
filesystem.add_option(
'-t', '--title',
action='store_true', dest='usetitle', default=False,
help=optparse.SUPPRESS_HELP)
filesystem.add_option(
'-l', '--literal', default=False,
action='store_true', dest='usetitle',
help=optparse.SUPPRESS_HELP)
filesystem.add_option(
'-w', '--no-overwrites',
action='store_true', dest='nooverwrites', default=False,
help='Do not overwrite files')
filesystem.add_option(
'-c', '--continue',
action='store_true', dest='continue_dl', default=True,
help='Force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.')
filesystem.add_option(
'--no-continue',
action='store_false', dest='continue_dl',
help='Do not resume partially downloaded files (restart from beginning)')
filesystem.add_option(
'--no-part',
action='store_true', dest='nopart', default=False,
help='Do not use .part files - write directly into output file')
filesystem.add_option(
'--no-mtime',
action='store_false', dest='updatetime', default=True,
help='Do not use the Last-modified header to set the file modification time')
filesystem.add_option(
'--write-description',
action='store_true', dest='writedescription', default=False,
help='Write video description to a .description file')
filesystem.add_option(
'--write-info-json',
action='store_true', dest='writeinfojson', default=False,
help='Write video metadata to a .info.json file')
filesystem.add_option(
'--write-annotations',
action='store_true', dest='writeannotations', default=False,
help='Write video annotations to a .annotations.xml file')
filesystem.add_option(
'--load-info-json', '--load-info',
dest='load_info_filename', metavar='FILE',
help='JSON file containing the video information (created with the "--write-info-json" option)')
filesystem.add_option(
'--cookies',
dest='cookiefile', metavar='FILE',
help='File to read cookies from and dump cookie jar in')
filesystem.add_option(
'--cache-dir', dest='cachedir', default=None, metavar='DIR',
help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
filesystem.add_option(
'--no-cache-dir', action='store_const', const=False, dest='cachedir',
help='Disable filesystem caching')
filesystem.add_option(
'--rm-cache-dir',
action='store_true', dest='rm_cachedir',
help='Delete all filesystem cache files')
thumbnail = optparse.OptionGroup(parser, 'Thumbnail Options')
thumbnail.add_option(
'--write-thumbnail',
action='store_true', dest='writethumbnail', default=False,
help='Write thumbnail image to disk')
thumbnail.add_option(
'--write-all-thumbnails',
action='store_true', dest='write_all_thumbnails', default=False,
help='Write all thumbnail image formats to disk')
thumbnail.add_option(
'--list-thumbnails',
action='store_true', dest='list_thumbnails', default=False,
help='Simulate and list all available thumbnail formats')
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
postproc.add_option(
'-x', '--extract-audio',
action='store_true', dest='extractaudio', default=False,
help='Convert video files to audio-only files (requires ffmpeg/avconv and ffprobe/avprobe)')
postproc.add_option(
'--audio-format', metavar='FORMAT', dest='audioformat', default='best',
help='Specify audio format: "best", "aac", "flac", "mp3", "m4a", "opus", "vorbis", or "wav"; "%default" by default; No effect without -x')
postproc.add_option(
'--audio-quality', metavar='QUALITY',
dest='audioquality', default='5',
help='Specify ffmpeg/avconv audio quality, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default %default)')
postproc.add_option(
'--recode-video',
metavar='FORMAT', dest='recodevideo', default=None,
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv|avi)')
postproc.add_option(
'--postprocessor-args',
dest='postprocessor_args', metavar='ARGS',
help='Give these arguments to the postprocessor')
postproc.add_option(
'-k', '--keep-video',
action='store_true', dest='keepvideo', default=False,
help='Keep the video file on disk after the post-processing; the video is erased by default')
postproc.add_option(
'--no-post-overwrites',
action='store_true', dest='nopostoverwrites', default=False,
help='Do not overwrite post-processed files; the post-processed files are overwritten by default')
postproc.add_option(
'--embed-subs',
action='store_true', dest='embedsubtitles', default=False,
help='Embed subtitles in the video (only for mp4, webm and mkv videos)')
postproc.add_option(
'--embed-thumbnail',
action='store_true', dest='embedthumbnail', default=False,
help='Embed thumbnail in the audio as cover art')
postproc.add_option(
'--add-metadata',
action='store_true', dest='addmetadata', default=False,
help='Write metadata to the video file')
postproc.add_option(
'--metadata-from-title',
metavar='FORMAT', dest='metafromtitle',
help='Parse additional metadata like song title / artist from the video title. '
'The format syntax is the same as --output. Regular expression with '
'named capture groups may also be used. '
'The parsed parameters replace existing values. '
'Example: --metadata-from-title "%(artist)s - %(title)s" matches a title like '
'"Coldplay - Paradise". '
'Example (regex): --metadata-from-title "(?P<artist>.+?) - (?P<title>.+)"')
postproc.add_option(
'--xattrs',
action='store_true', dest='xattrs', default=False,
help='Write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
postproc.add_option(
'--fixup',
metavar='POLICY', dest='fixup', default='detect_or_warn',
help='Automatically correct known faults of the file. '
'One of never (do nothing), warn (only emit a warning), '
'detect_or_warn (the default; fix file if we can, warn otherwise)')
postproc.add_option(
'--prefer-avconv',
action='store_false', dest='prefer_ffmpeg',
help='Prefer avconv over ffmpeg for running the postprocessors')
postproc.add_option(
'--prefer-ffmpeg',
action='store_true', dest='prefer_ffmpeg',
help='Prefer ffmpeg over avconv for running the postprocessors (default)')
postproc.add_option(
'--ffmpeg-location', '--avconv-location', metavar='PATH',
dest='ffmpeg_location',
help='Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory.')
postproc.add_option(
'--exec',
metavar='CMD', dest='exec_cmd',
help='Execute a command on the file after downloading and post-processing, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'')
postproc.add_option(
'--convert-subs', '--convert-subtitles',
metavar='FORMAT', dest='convertsubtitles', default=None,
help='Convert the subtitles to other format (currently supported: srt|ass|vtt|lrc)')
parser.add_option_group(general)
parser.add_option_group(network)
parser.add_option_group(geo)
parser.add_option_group(selection)
parser.add_option_group(downloader)
parser.add_option_group(filesystem)
parser.add_option_group(thumbnail)
parser.add_option_group(verbosity)
parser.add_option_group(workarounds)
parser.add_option_group(video_format)
parser.add_option_group(subtitles)
parser.add_option_group(authentication)
parser.add_option_group(adobe_pass)
parser.add_option_group(postproc)
if overrideArguments is not None:
opts, args = parser.parse_args(overrideArguments)
if opts.verbose:
write_string('[debug] Override config: ' + repr(overrideArguments) + '\n')
else:
def compat_conf(conf):
if sys.version_info < (3,):
return [a.decode(preferredencoding(), 'replace') for a in conf]
return conf
command_line_conf = compat_conf(sys.argv[1:])
opts, args = parser.parse_args(command_line_conf)
system_conf = user_conf = custom_conf = []
if '--config-location' in command_line_conf:
location = compat_expanduser(opts.config_location)
if os.path.isdir(location):
location = os.path.join(location, 'youtube-dl.conf')
if not os.path.exists(location):
parser.error('config-location %s does not exist.' % location)
custom_conf = _readOptions(location)
elif '--ignore-config' in command_line_conf:
pass
else:
system_conf = _readOptions('/etc/youtube-dl.conf')
if '--ignore-config' not in system_conf:
user_conf = _readUserConf()
argv = system_conf + user_conf + custom_conf + command_line_conf
opts, args = parser.parse_args(argv)
if opts.verbose:
for conf_label, conf in (
('System config', system_conf),
('User config', user_conf),
('Custom config', custom_conf),
('Command-line args', command_line_conf)):
write_string('[debug] %s: %s\n' % (conf_label, repr(_hide_login_info(conf))))
return parser, opts, args
|
41,998 | def _create_zmap(
x_values: np.ndarray,
y_values: np.ndarray,
z_values: List[Union[int, float]],
xi: np.ndarray,
yi: np.ndarray,
) -> Dict[complex, Union[int, float]]:
# creates z-map from trial values and params.
# since params were resampled either with linspace or logspace
# original params might not be on the x and y axes anymore
# so we are going with close approximations of trial value positions
zmap = dict()
for x, y, z in zip(x_values, y_values, z_values):
xindex = np.argmin(np.abs(xi - x))
yindex = np.argmin(np.abs(yi - y))
zmap[complex(xindex, yindex)] = z # type: ignore
return zmap
| def _create_zmap(
x_values: np.ndarray,
y_values: np.ndarray,
z_values: List[Union[int, float]],
xi: np.ndarray,
yi: np.ndarray,
) -> Dict[complex, float]:
# creates z-map from trial values and params.
# since params were resampled either with linspace or logspace
# original params might not be on the x and y axes anymore
# so we are going with close approximations of trial value positions
zmap = dict()
for x, y, z in zip(x_values, y_values, z_values):
xindex = np.argmin(np.abs(xi - x))
yindex = np.argmin(np.abs(yi - y))
zmap[complex(xindex, yindex)] = z # type: ignore
return zmap
|
14,488 | def _make_module_stub(module_name: str) -> None:
# a.b.c is treated a collection
if re.match(r"\w+\.\w+\.[\.\w]+$", module_name):
namespace, collection, module_file = module_name.split(".", 2)
path = f"{ options.project_dir }/.cache/collections/ansible_collections/{ namespace }/{ collection }/plugins/modules"
if "." in module_file:
parts = module_file.split(".")
path += "/" + "/".join(parts[0:-1])
module_file = parts[-1]
print(f"ERROR: {path} -> {module_file}")
os.makedirs(path, exist_ok=True)
_write_module_stub(
filename=f"{path}/{module_file}.py",
name=module_file,
namespace=namespace,
collection=collection,
)
elif "." in module_name:
_logger.error("Config error: %s is not a valid module name.", module_name)
sys.exit(INVALID_CONFIG_RC)
else:
os.makedirs(f"{options.project_dir}/.cache/modules", exist_ok=True)
_write_module_stub(
filename=f"{options.project_dir}/.cache/modules/{module_name}.py",
name=module_name,
)
| def _make_module_stub(module_name: str) -> None:
# a.b.c is treated a collection
if re.match(r"\w+\.\w+(\.\w+)+$", module_name):
namespace, collection, module_file = module_name.split(".", 2)
path = f"{ options.project_dir }/.cache/collections/ansible_collections/{ namespace }/{ collection }/plugins/modules"
if "." in module_file:
parts = module_file.split(".")
path += "/" + "/".join(parts[0:-1])
module_file = parts[-1]
print(f"ERROR: {path} -> {module_file}")
os.makedirs(path, exist_ok=True)
_write_module_stub(
filename=f"{path}/{module_file}.py",
name=module_file,
namespace=namespace,
collection=collection,
)
elif "." in module_name:
_logger.error("Config error: %s is not a valid module name.", module_name)
sys.exit(INVALID_CONFIG_RC)
else:
os.makedirs(f"{options.project_dir}/.cache/modules", exist_ok=True)
_write_module_stub(
filename=f"{options.project_dir}/.cache/modules/{module_name}.py",
name=module_name,
)
|
56,010 | def find_model_file(dest_dir): # this one better
model_files = list(Path(dest_dir).glob("*.npz"))
if not (len(model_files) == 1, model_files):
raise ValueError("Find more than one model file")
model_file = model_files[0]
return model_file
| def find_model_file(dest_dir): # this one better
model_files = list(Path(dest_dir).glob("*.npz"))
if len(model_files) != 1:
raise ValueError(f"Found more than one model file: {model_files}")
model_file = model_files[0]
return model_file
|
42,558 | def test_all_trade_types_in_db(database):
"""
Test that all trade_types in DB deserialize to a valid TradeType
"""
# Query for all locations
cursor = database.conn.cursor()
trade_types = cursor.execute("SELECT type, seq from trade_type")
# We deserialize, then serialize and compare the result
for trade_type, seq in trade_types:
deserialized_trade_type = deserialize_trade_type_from_db(trade_type)
assert deserialized_trade_type.value == seq
assert TradeType(seq).serialize_for_db() == trade_type
trade_type_name = deserialize_trade_type(str(deserialized_trade_type))
assert trade_type_name == deserialized_trade_type
| def test_all_trade_types_in_db(database):
"""
Test that all trade_types in DB deserialize to a valid TradeType
"""
# Query for all trades
cursor = database.conn.cursor()
trade_types = cursor.execute("SELECT type, seq from trade_type")
# We deserialize, then serialize and compare the result
for trade_type, seq in trade_types:
deserialized_trade_type = deserialize_trade_type_from_db(trade_type)
assert deserialized_trade_type.value == seq
assert TradeType(seq).serialize_for_db() == trade_type
trade_type_name = deserialize_trade_type(str(deserialized_trade_type))
assert trade_type_name == deserialized_trade_type
|
5,686 | def fni_input_validation(dist, tol, max_intervals):
if int(max_intervals) != max_intervals or max_intervals <= 1:
raise ValueError("`max_intervals' must be an integer greater than 1.")
tol = float(tol) # if there's an exception, raise it now
return dist, tol, max_intervals
| def _fni_input_validation(dist, tol, max_intervals):
if int(max_intervals) != max_intervals or max_intervals <= 1:
raise ValueError("`max_intervals' must be an integer greater than 1.")
tol = float(tol) # if there's an exception, raise it now
return dist, tol, max_intervals
|
2,248 | def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert pipe.predict(None)
# and transformer params should not be changed
assert pipe.named_steps['transf'].a is None
assert pipe.named_steps['transf'].b is None
# invalid parameters should raise an error message
msg = re.escape(r"fit() got an unexpected keyword argument 'bad'")
with pytest.raises(TypeError, match=msg):
pipe.fit(None, None, clf__bad=True)
| def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert pipe.predict(None)
# and transformer params should not be changed
assert pipe.named_steps['transf'].a is None
assert pipe.named_steps['transf'].b is None
# invalid parameters should raise an error message
msg = re.escape("fit() got an unexpected keyword argument 'bad'")
with pytest.raises(TypeError, match=msg):
pipe.fit(None, None, clf__bad=True)
|
29,777 | def load_heuristic(heuristic):
"""Load heuristic from the file, return the module
"""
if os.path.sep in heuristic or os.path.lexists(heuristic):
heuristic_file = op.realpath(heuristic)
path, fname = op.split(heuristic_file)
try:
old_syspath = sys.path[:]
sys.path.append(0, path)
mod = __import__(fname.split('.')[0])
mod.filename = heuristic_file
finally:
sys.path = old_syspath
else:
from importlib import import_module
try:
mod = import_module('heudiconv.heuristics.%s' % heuristic)
mod.filename = mod.__file__.rstrip('co') # remove c or o from pyc/pyo
except Exception as exc:
raise ImportError(
"Failed to import heuristic %s: %s"
% (heuristic, exc)
)
return mod
| def load_heuristic(heuristic):
"""Load heuristic from the file, return the module
"""
if os.path.sep in heuristic or os.path.lexists(heuristic):
heuristic_file = op.realpath(heuristic)
path, fname = op.split(heuristic_file)
try:
old_syspath = sys.path[:]
sys.path.insert(0, path)
mod = __import__(fname.split('.')[0])
mod.filename = heuristic_file
finally:
sys.path = old_syspath
else:
from importlib import import_module
try:
mod = import_module('heudiconv.heuristics.%s' % heuristic)
mod.filename = mod.__file__.rstrip('co') # remove c or o from pyc/pyo
except Exception as exc:
raise ImportError(
"Failed to import heuristic %s: %s"
% (heuristic, exc)
)
return mod
|
50,335 | def _RsyncFunc(cls, diff_to_apply, thread_state=None):
"""Worker function for performing the actual copy and remove operations."""
gsutil_api = GetCloudApiInstance(cls, thread_state=thread_state)
dst_url_str = diff_to_apply.dst_url_str
dst_url = StorageUrlFromString(dst_url_str)
posix_attrs = diff_to_apply.src_posix_attrs
if diff_to_apply.diff_action == DiffAction.REMOVE:
if cls.dryrun:
cls.logger.info('Would remove %s', dst_url)
else:
cls.logger.info('Removing %s', dst_url)
if dst_url.IsFileUrl():
os.unlink(dst_url.object_name)
else:
try:
gsutil_api.DeleteObject(dst_url.bucket_name,
dst_url.object_name,
generation=dst_url.generation,
provider=dst_url.scheme)
except NotFoundException:
# If the object happened to be deleted by an external process, this
# is fine because it moves us closer to the desired state.
pass
elif diff_to_apply.diff_action == DiffAction.COPY:
src_url_str = diff_to_apply.src_url_str
src_url = StorageUrlFromString(src_url_str)
if cls.dryrun:
if src_url.IsFileUrl():
# Try to open the local file to detect errors that would occur in
# non-dry-run mode.
try:
with open(src_url.object_name, 'rb') as _:
pass
except Exception as e: # pylint: disable=broad-except
cls.logger.info('Could not open %s' % src_url.object_name)
raise
cls.logger.info('Would copy %s to %s', src_url, dst_url)
else:
try:
src_obj_metadata = None
if src_url.IsCloudUrl():
src_generation = GenerationFromUrlAndString(src_url,
src_url.generation)
src_obj_metadata = gsutil_api.GetObjectMetadata(
src_url.bucket_name,
src_url.object_name,
generation=src_generation,
provider=src_url.scheme,
fields=cls.source_metadata_fields)
if ObjectIsGzipEncoded(src_obj_metadata):
cls.logger.info(
'%s has a compressed content-encoding, so it will be '
'decompressed upon download; future executions of gsutil rsync '
'with this source object will always download it. If you wish '
'to synchronize such an object efficiently, compress the '
'source objects in place before synchronizing them, rather '
'than (for example) using gsutil cp -Z to compress them '
'on-the-fly (which results in compressed content-encoding).' %
src_url)
else: # src_url.IsFileUrl()
src_obj_metadata = apitools_messages.Object()
# getmtime can return a float, so it needs to be converted to long.
if posix_attrs.mtime > long(time.time()) + SECONDS_PER_DAY:
WarnFutureTimestamp('mtime', src_url.url_string)
if src_obj_metadata.metadata:
custom_metadata = src_obj_metadata.metadata
else:
custom_metadata = apitools_messages.Object.MetadataValue(
additionalProperties=[])
SerializeFileAttributesToObjectMetadata(
posix_attrs,
custom_metadata,
preserve_posix=cls.preserve_posix_attrs)
tmp_obj_metadata = apitools_messages.Object()
tmp_obj_metadata.metadata = custom_metadata
CopyCustomMetadata(tmp_obj_metadata, src_obj_metadata, override=True)
copy_result = copy_helper.PerformCopy(
cls.logger,
src_url,
dst_url,
gsutil_api,
cls,
_RsyncExceptionHandler,
src_obj_metadata=src_obj_metadata,
headers=cls.headers,
is_rsync=True,
gzip_encoded=cls.gzip_encoded,
gzip_exts=cls.gzip_exts,
preserve_posix=cls.preserve_posix_attrs)
if copy_result is not None:
(_, bytes_transferred, _, _) = copy_result
with cls.stats_lock:
cls.total_bytes_transferred += bytes_transferred
except SkipUnsupportedObjectError as e:
cls.logger.info('Skipping item %s with unsupported object type %s',
src_url, e.unsupported_type)
elif diff_to_apply.diff_action == DiffAction.MTIME_SRC_TO_DST:
# If the destination is an object in a bucket, this will not blow away other
# metadata. This behavior is unlike if the file/object actually needed to be
# copied from the source to the destination.
dst_url = StorageUrlFromString(diff_to_apply.dst_url_str)
if cls.dryrun:
cls.logger.info('Would set mtime for %s', dst_url)
else:
cls.logger.info('Copying mtime from src to dst for %s',
dst_url.url_string)
mtime = posix_attrs.mtime
obj_metadata = apitools_messages.Object()
obj_metadata.metadata = CreateCustomMetadata({MTIME_ATTR: mtime})
if dst_url.IsCloudUrl():
dst_url = StorageUrlFromString(diff_to_apply.dst_url_str)
dst_generation = GenerationFromUrlAndString(dst_url, dst_url.generation)
try:
# Assume we have permission, and can patch the object.
gsutil_api.PatchObjectMetadata(dst_url.bucket_name,
dst_url.object_name,
obj_metadata,
provider=dst_url.scheme,
generation=dst_url.generation)
except:
# We don't have permission to patch apparently, so it must be copied.
cls.logger.info(
'Copying whole file/object for %s instead of patching'
' because you don\'t have patch permission on the '
'object.', dst_url.url_string)
_RsyncFunc(cls,
RsyncDiffToApply(diff_to_apply.src_url_str,
diff_to_apply.dst_url_str, posix_attrs,
DiffAction.COPY, diff_to_apply.copy_size),
thread_state=thread_state)
else:
ParseAndSetPOSIXAttributes(dst_url.object_name,
obj_metadata,
preserve_posix=cls.preserve_posix_attrs)
elif diff_to_apply.diff_action == DiffAction.POSIX_SRC_TO_DST:
# If the destination is an object in a bucket, this will not blow away other
# metadata. This behavior is unlike if the file/object actually needed to be
# copied from the source to the destination.
dst_url = StorageUrlFromString(diff_to_apply.dst_url_str)
if cls.dryrun:
cls.logger.info('Would set POSIX attributes for %s', dst_url)
else:
cls.logger.info('Copying POSIX attributes from src to dst for %s',
dst_url.url_string)
obj_metadata = apitools_messages.Object()
obj_metadata.metadata = apitools_messages.Object.MetadataValue(
additionalProperties=[])
SerializeFileAttributesToObjectMetadata(posix_attrs,
obj_metadata.metadata,
preserve_posix=True)
if dst_url.IsCloudUrl():
dst_generation = GenerationFromUrlAndString(dst_url, dst_url.generation)
dst_obj_metadata = gsutil_api.GetObjectMetadata(
dst_url.bucket_name,
dst_url.object_name,
generation=dst_generation,
provider=dst_url.scheme,
fields=['acl'])
try:
# Assume we have ownership, and can patch the object.
gsutil_api.PatchObjectMetadata(dst_url.bucket_name,
dst_url.object_name,
obj_metadata,
provider=dst_url.scheme,
generation=dst_url.generation)
except:
# Apparently we don't have object ownership, so it must be copied.
cls.logger.info(
'Copying whole file/object for %s instead of patching'
' because you don\'t have patch permission on the '
'object.', dst_url.url_string)
_RsyncFunc(cls,
RsyncDiffToApply(diff_to_apply.src_url_str,
diff_to_apply.dst_url_str, posix_attrs,
DiffAction.COPY, diff_to_apply.copy_size),
thread_state=thread_state)
else:
raise CommandException('Got unexpected DiffAction (%d)' %
diff_to_apply.diff_action)
| def _RsyncFunc(cls, diff_to_apply, thread_state=None):
"""Worker function for performing the actual copy and remove operations."""
gsutil_api = GetCloudApiInstance(cls, thread_state=thread_state)
dst_url_str = diff_to_apply.dst_url_str
dst_url = StorageUrlFromString(dst_url_str)
posix_attrs = diff_to_apply.src_posix_attrs
if diff_to_apply.diff_action == DiffAction.REMOVE:
if cls.dryrun:
cls.logger.info('Would remove %s', dst_url)
else:
cls.logger.info('Removing %s', dst_url)
if dst_url.IsFileUrl():
os.unlink(dst_url.object_name)
else:
try:
gsutil_api.DeleteObject(dst_url.bucket_name,
dst_url.object_name,
generation=dst_url.generation,
provider=dst_url.scheme)
except NotFoundException:
# If the object happened to be deleted by an external process, this
# is fine because it moves us closer to the desired state.
pass
elif diff_to_apply.diff_action == DiffAction.COPY:
src_url_str = diff_to_apply.src_url_str
src_url = StorageUrlFromString(src_url_str)
if cls.dryrun:
if src_url.IsFileUrl():
# Try to open the local file to detect errors that would occur in
# non-dry-run mode.
try:
with open(src_url.object_name, 'rb') as _:
pass
except Exception as e: # pylint: disable=broad-except
cls.logger.info('Could not open %s' % src_url.object_name)
raise
cls.logger.info('Would copy %s to %s', src_url, dst_url)
else:
try:
src_obj_metadata = None
if src_url.IsCloudUrl():
src_generation = GenerationFromUrlAndString(src_url,
src_url.generation)
src_obj_metadata = gsutil_api.GetObjectMetadata(
src_url.bucket_name,
src_url.object_name,
generation=src_generation,
provider=src_url.scheme,
fields=cls.source_metadata_fields)
if ObjectIsGzipEncoded(src_obj_metadata):
cls.logger.info(
'%s has a compressed content-encoding, so it will be '
'decompressed upon download; future executions of gsutil rsync '
'with this source object will always download it. If you wish '
'to synchronize such an object efficiently, compress the '
'source objects in place before synchronizing them, rather '
'than (for example) using gsutil cp -Z to compress them '
'on-the-fly (which results in compressed content-encoding).' %
src_url)
else: # src_url.IsFileUrl()
src_obj_metadata = apitools_messages.Object()
# getmtime can return a float, so it needs to be converted to long.
if posix_attrs.mtime > long(time.time()) + SECONDS_PER_DAY:
WarnFutureTimestamp('mtime', src_url.url_string)
if src_obj_metadata.metadata:
custom_metadata = src_obj_metadata.metadata
else:
custom_metadata = apitools_messages.Object.MetadataValue(
additionalProperties=[])
SerializeFileAttributesToObjectMetadata(
posix_attrs,
custom_metadata,
preserve_posix=cls.preserve_posix_attrs)
tmp_obj_metadata = apitools_messages.Object()
tmp_obj_metadata.metadata = custom_metadata
CopyCustomMetadata(tmp_obj_metadata, src_obj_metadata, override=True)
copy_result = copy_helper.PerformCopy(
cls.logger,
src_url,
dst_url,
gsutil_api,
cls,
_RsyncExceptionHandler,
src_obj_metadata=src_obj_metadata,
headers=cls.headers,
is_rsync=True,
gzip_encoded=cls.gzip_encoded,
gzip_exts=cls.gzip_exts,
preserve_posix=cls.preserve_posix_attrs)
if copy_result is not None:
(_, bytes_transferred, _, _) = copy_result
with cls.stats_lock:
cls.total_bytes_transferred += bytes_transferred
except SkipUnsupportedObjectError as e:
cls.logger.info('Skipping item %s with unsupported object type %s',
src_url, e.unsupported_type)
elif diff_to_apply.diff_action == DiffAction.MTIME_SRC_TO_DST:
# If the destination is an object in a bucket, this will not blow away other
# metadata. This behavior is unlike if the file/object actually needed to be
# copied from the source to the destination.
dst_url = StorageUrlFromString(diff_to_apply.dst_url_str)
if cls.dryrun:
cls.logger.info('Would set mtime for %s', dst_url)
else:
cls.logger.info('Copying mtime from src to dst for %s',
dst_url.url_string)
mtime = posix_attrs.mtime
obj_metadata = apitools_messages.Object()
obj_metadata.metadata = CreateCustomMetadata({MTIME_ATTR: mtime})
if dst_url.IsCloudUrl():
dst_url = StorageUrlFromString(diff_to_apply.dst_url_str)
dst_generation = GenerationFromUrlAndString(dst_url, dst_url.generation)
try:
# Assume we have permission, and can patch the object.
gsutil_api.PatchObjectMetadata(dst_url.bucket_name,
dst_url.object_name,
obj_metadata,
provider=dst_url.scheme,
generation=dst_url.generation)
except ServiceException as err:
cls.logger.debug('Error while trying to patch object: %s', err)
# We don't have permission to patch apparently, so it must be copied.
cls.logger.info(
'Copying whole file/object for %s instead of patching'
' because you don\'t have patch permission on the '
'object.', dst_url.url_string)
_RsyncFunc(cls,
RsyncDiffToApply(diff_to_apply.src_url_str,
diff_to_apply.dst_url_str, posix_attrs,
DiffAction.COPY, diff_to_apply.copy_size),
thread_state=thread_state)
else:
ParseAndSetPOSIXAttributes(dst_url.object_name,
obj_metadata,
preserve_posix=cls.preserve_posix_attrs)
elif diff_to_apply.diff_action == DiffAction.POSIX_SRC_TO_DST:
# If the destination is an object in a bucket, this will not blow away other
# metadata. This behavior is unlike if the file/object actually needed to be
# copied from the source to the destination.
dst_url = StorageUrlFromString(diff_to_apply.dst_url_str)
if cls.dryrun:
cls.logger.info('Would set POSIX attributes for %s', dst_url)
else:
cls.logger.info('Copying POSIX attributes from src to dst for %s',
dst_url.url_string)
obj_metadata = apitools_messages.Object()
obj_metadata.metadata = apitools_messages.Object.MetadataValue(
additionalProperties=[])
SerializeFileAttributesToObjectMetadata(posix_attrs,
obj_metadata.metadata,
preserve_posix=True)
if dst_url.IsCloudUrl():
dst_generation = GenerationFromUrlAndString(dst_url, dst_url.generation)
dst_obj_metadata = gsutil_api.GetObjectMetadata(
dst_url.bucket_name,
dst_url.object_name,
generation=dst_generation,
provider=dst_url.scheme,
fields=['acl'])
try:
# Assume we have ownership, and can patch the object.
gsutil_api.PatchObjectMetadata(dst_url.bucket_name,
dst_url.object_name,
obj_metadata,
provider=dst_url.scheme,
generation=dst_url.generation)
except:
# Apparently we don't have object ownership, so it must be copied.
cls.logger.info(
'Copying whole file/object for %s instead of patching'
' because you don\'t have patch permission on the '
'object.', dst_url.url_string)
_RsyncFunc(cls,
RsyncDiffToApply(diff_to_apply.src_url_str,
diff_to_apply.dst_url_str, posix_attrs,
DiffAction.COPY, diff_to_apply.copy_size),
thread_state=thread_state)
else:
raise CommandException('Got unexpected DiffAction (%d)' %
diff_to_apply.diff_action)
|
44,930 | def string_to_type(val: str) -> Union[bool, int, float, str]:
"""
Helper function for transforming string env var values into typed values.
Maps:
- "true" (any capitalization) to `True`
- "false" (any capitalization) to `False`
- integers to `int`
- floats to `float`
Arguments:
- val (str): the string value of an environment variable
Returns:
Union[bool, int, float, str]: the type-cast env var value
"""
# bool
if val.upper() == "TRUE":
return True
elif val.upper() == "FALSE":
return False
# dicts
try:
val_as_obj = literal_eval(val)
return val_as_obj
except Exception:
pass
# return string value
return val
| def string_to_type(val: str) -> Union[bool, int, float, str]:
"""
Helper function for transforming string env var values into typed values.
Maps:
- "true" (any capitalization) to `True`
- "false" (any capitalization) to `False`
- integers to `int`
- floats to `float`
Arguments:
- val (str): the string value of an environment variable
Returns:
Union[bool, int, float, str]: the type-cast env var value
"""
# bool
if val.upper() == "TRUE":
return True
elif val.upper() == "FALSE":
return False
# dicts, ints, floats, or any other literal Python syntax
try:
val_as_obj = literal_eval(val)
return val_as_obj
except Exception:
pass
# return string value
return val
|
25,157 | def _get_if_statement_ancestor(node: NodeNG) -> Optional[If]:
"""Return True if the given node is the child of a If node"""
for parent in node.node_ancestors():
if isinstance(parent, If):
return parent
return None
| def _get_if_statement_ancestor(node: NodeNG) -> Optional[If]:
"""Return the first parent node that is an If node (or None)"""
for parent in node.node_ancestors():
if isinstance(parent, If):
return parent
return None
|
6,830 | def get_frontmatter(string):
"""
Reference: https://github.com/jonbeebe/frontmatter
"""
fmatter = ""
body = ""
result = re.compile(r'^\s*(?:---|\+\+\+)(.*?)(?:---|\+\+\+)\s*(.+)$', re.S | re.M).search(string)
if result:
fmatter = result.group(1)
body = result.group(2)
return {
"attributes": yaml.load(fmatter),
"body": body,
}
| def get_frontmatter(string):
"""
Reference: https://github.com/jonbeebe/frontmatter
"""
fmatter = ""
body = ""
result = re.compile(r'^\s*(?:---|\+\+\+)(.*?)(?:---|\+\+\+)\s*(.+)$', re.S | re.M).search(string)
if result:
fmatter = result.group(1)
body = result.group(2)
return {
"attributes": yaml.safe_load(fmatter),
"body": body,
}
|
8,218 | def _transform_obstime(frame, obstime):
"""
Transform a frame to a new obstime using the appropriate loopback transformation.
If the new obstime is None, no transformation is performed.
If the frame's obstime is None, the frame is copied with the new obstime.
"""
# If obstime is None or the obstime matches, nothing needs to be done
if obstime is None or _times_are_equal(frame.obstime, obstime):
return frame
# Transform to the new obstime using the appropriate loopback transformation
new_frame = frame.replicate(obstime=obstime)
if frame.obstime is not None:
if _apply_diffrot:
from .metaframes import RotatedSunFrame # avoid a circular import
return RotatedSunFrame(base=frame, rotated_time=obstime).transform_to(new_frame)
else:
return frame.transform_to(new_frame)
else:
return new_frame
| def _transform_obstime(frame, obstime):
"""
Transform a frame to a new obstime using the appropriate loopback transformation.
If the new obstime is None, no transformation is performed.
If the frame's obstime is None, the frame is copied with the new obstime.
"""
# If obstime is None or the obstime matches, nothing needs to be done
if obstime is None or _times_are_equal(frame.obstime, obstime):
return frame
# Transform to the new obstime using the appropriate loopback transformation
new_frame = frame.replicate(obstime=obstime)
if frame.obstime is not None:
if _apply_diffrot:
from .metaframes import RotatedSunFrame # avoid a circular import
return RotatedSunFrame(base=frame, rotated_time=obstime).transform_to(new_frame)
return frame.transform_to(new_frame)
else:
return new_frame
|
53,780 | def aggregatelines(network, buses, interlines, line_length_factor=1.0, with_time=True):
#make sure all lines have same bus ordering
positive_order = interlines.bus0_s < interlines.bus1_s
interlines_p = interlines[positive_order]
interlines_n = interlines[~ positive_order].rename(columns={"bus0_s":"bus1_s", "bus1_s":"bus0_s"})
interlines_c = pd.concat((interlines_p,interlines_n), sort=False)
attrs = network.components["Line"]["attrs"]
columns = set(attrs.index[attrs.static & attrs.status.str.startswith('Input')]).difference(('name', 'bus0', 'bus1'))
consense = {
attr: _make_consense('Bus', attr)
for attr in (columns | {'sub_network'}
- {'r', 'x', 'g', 'b', 'terrain_factor', 's_nom',
's_nom_min', 's_nom_max', 's_nom_extendable',
'length', 'v_ang_min', 'v_ang_max'})
}
def aggregatelinegroup(l):
# l.name is a tuple of the groupby index (bus0_s, bus1_s)
length_s = haversine_pts(buses.loc[l.name[0], ['x', 'y']],
buses.loc[l.name[1], ['x', 'y']]) * line_length_factor
v_nom_s = buses.loc[list(l.name),'v_nom'].max()
voltage_factor = (np.asarray(network.buses.loc[l.bus0,'v_nom'])/v_nom_s)**2
length_factor = (length_s/l['length'])
data = dict(
r=1./(voltage_factor/(length_factor * l['r'])).sum(),
x=1./(voltage_factor/(length_factor * l['x'])).sum(),
g=(voltage_factor * length_factor * l['g']).sum(),
b=(voltage_factor * length_factor * l['b']).sum(),
terrain_factor=l['terrain_factor'].mean(),
s_max_pu=(l['s_max_pu'] * _normed(l['s_nom'])).sum(),
s_nom=l['s_nom'].sum(),
s_nom_min=l['s_nom_min'].sum(),
s_nom_max=l['s_nom_max'].sum(),
s_nom_extendable=l['s_nom_extendable'].any(),
num_parallel=l['num_parallel'].sum(),
capital_cost=(length_factor * _normed(l['s_nom']) * l['capital_cost']).sum(),
length=length_s,
sub_network=consense['sub_network'](l['sub_network']),
v_ang_min=l['v_ang_min'].max(),
v_ang_max=l['v_ang_max'].min()
)
data.update((f, consense[f](l[f])) for f in columns.difference(data))
return pd.Series(data, index=[f for f in l.columns if f in columns])
lines = interlines_c.groupby(['bus0_s', 'bus1_s']).apply(aggregatelinegroup)
lines['name'] = [str(i+1) for i in range(len(lines))]
linemap_p = interlines_p.join(lines['name'], on=['bus0_s', 'bus1_s'])['name']
linemap_n = interlines_n.join(lines['name'], on=['bus0_s', 'bus1_s'])['name']
linemap = pd.concat((linemap_p,linemap_n), sort=False)
lines_t = dict()
if with_time:
for attr, df in network.lines_t.items():
df.columns = df.columns.astype(str)
lines_agg_b = df.columns.to_series().map(linemap).dropna()
df_agg = df.loc[:, lines_agg_b.index]
if not df_agg.empty:
pnl_df = df_agg.mul(
network.lines.groupby(linemap).s_nom.apply(
lambda grp: grp/grp.sum()
)).groupby(linemap, axis=1).sum()
pnl_df.columns = _flatten_multiindex(pnl_df.columns).rename("name")
lines_t[attr] = pnl_df
return lines, linemap_p, linemap_n, linemap, lines_t
| def aggregatelines(network, buses, interlines, line_length_factor=1.0, with_time=True):
#make sure all lines have same bus ordering
positive_order = interlines.bus0_s < interlines.bus1_s
interlines_p = interlines[positive_order]
interlines_n = interlines[~ positive_order].rename(columns={"bus0_s":"bus1_s", "bus1_s":"bus0_s"})
interlines_c = pd.concat((interlines_p,interlines_n), sort=False)
attrs = network.components["Line"]["attrs"]
columns = set(attrs.index[attrs.static & attrs.status.str.startswith('Input')]).difference(('name', 'bus0', 'bus1'))
consense = {
attr: _make_consense('Bus', attr)
for attr in (columns | {'sub_network'}
- {'r', 'x', 'g', 'b', 'terrain_factor', 's_nom',
's_nom_min', 's_nom_max', 's_nom_extendable',
'length', 'v_ang_min', 'v_ang_max'})
}
def aggregatelinegroup(l):
# l.name is a tuple of the groupby index (bus0_s, bus1_s)
length_s = haversine_pts(buses.loc[l.name[0], ['x', 'y']],
buses.loc[l.name[1], ['x', 'y']]) * line_length_factor
v_nom_s = buses.loc[list(l.name),'v_nom'].max()
voltage_factor = (np.asarray(network.buses.loc[l.bus0,'v_nom'])/v_nom_s)**2
length_factor = (length_s/l['length'])
data = dict(
r=1./(voltage_factor/(length_factor * l['r'])).sum(),
x=1./(voltage_factor/(length_factor * l['x'])).sum(),
g=(voltage_factor * length_factor * l['g']).sum(),
b=(voltage_factor * length_factor * l['b']).sum(),
terrain_factor=l['terrain_factor'].mean(),
s_max_pu=(l['s_max_pu'] * _normed(l['s_nom'])).sum(),
s_nom=l['s_nom'].sum(),
s_nom_min=l['s_nom_min'].sum(),
s_nom_max=l['s_nom_max'].sum(),
s_nom_extendable=l['s_nom_extendable'].any(),
num_parallel=l['num_parallel'].sum(),
capital_cost=(length_factor * _normed(l['s_nom']) * l['capital_cost']).sum(),
length=length_s,
sub_network=consense['sub_network'](l['sub_network']),
v_ang_min=l['v_ang_min'].max(),
v_ang_max=l['v_ang_max'].min()
)
data.update((f, consense[f](l[f])) for f in columns.difference(data))
return pd.Series(data, index=[f for f in l.columns if f in columns])
lines = interlines_c.groupby(['bus0_s', 'bus1_s']).apply(aggregatelinegroup)
lines['name'] = [str(i+1) for i in range(len(lines))]
linemap_p = interlines_p.join(lines['name'], on=['bus0_s', 'bus1_s'])['name']
linemap_n = interlines_n.join(lines['name'], on=['bus0_s', 'bus1_s'])['name']
linemap = pd.concat((linemap_p,linemap_n), sort=False)
lines_t = dict()
if with_time:
for attr, df in network.lines_t.items():
lines_agg_b = df.columns.to_series().map(linemap).dropna()
df_agg = df.loc[:, lines_agg_b.index]
if not df_agg.empty:
if (attr == 's_max_pu') or (attr == "s_min_pu"):
weighting = network.lines.groupby(linemap).s_nom.transform(_normed)
df_agg = df_agg.multiply(weighting.loc[df_agg.columns], axis=1)
pnl_df = df_agg.groupby(linemap, axis=1).sum()
pnl_df.columns = _flatten_multiindex(pnl_df.columns).rename("name")
lines_t[attr] = pnl_df
return lines, linemap_p, linemap_n, linemap, lines_t
|
32,207 | def fetch_incidents_detection_alerts(client_obj, params: Dict[str, Any], start_time, end_time, time_window, max_fetch):
"""Fetch incidents of detection alert type.
:type client_obj: Client
:param client_obj: client object.
:type params: dict
:param params: configuration parameter of fetch incidents.
:type start_time: str
:param start_time: start time of request.
:type end_time: str
:param end_time: end time of request.
:type time_window: str
:param time_window: time delay for an event to appear in chronicle after generation
:type max_fetch: str
:param max_fetch: maximum number of incidents to fetch each time
:rtype: list
:return: list of incidents
"""
# list of detections that were pulled but not processed due to max_fetch.
detection_to_process: List[Dict[str, Any]] = []
# detections that are larger than max_fetch and had a next page token for fetch incident.
detection_to_pull: Dict[str, Any] = {}
# max_attempts track for 429 and 500 error
simple_backoff_rules: Dict[str, Any] = {}
# rule_id or version_id and alert_state for which detections are yet to be fetched.
pending_rule_or_version_id_with_alert_state: Dict[str, Any] = {}
detection_identifiers: List = []
rule_first_fetched_time = None
last_run = demisto.getLastRun()
incidents = []
if last_run and 'start_time' in last_run:
start_time = last_run.get('start_time') or start_time
detection_identifiers = last_run.get('detection_identifiers', detection_identifiers)
detection_to_process = last_run.get('detection_to_process', detection_to_process)
detection_to_pull = last_run.get('detection_to_pull', detection_to_pull)
simple_backoff_rules = last_run.get('simple_backoff_rules', simple_backoff_rules)
pending_rule_or_version_id_with_alert_state = last_run.get('pending_rule_or_version_id_with_alert_state',
pending_rule_or_version_id_with_alert_state)
end_time = last_run.get('rule_first_fetched_time') or end_time
if not last_run.get('rule_first_fetched_time'):
demisto.info(f"Starting new time window from START-TIME : {start_time} to END_TIME : {end_time}")
delayed_start_time = generate_delayed_start_time(time_window, start_time)
fetch_detection_by_alert_state = pending_rule_or_version_id_with_alert_state.get('alert_state', '')
fetch_detection_by_list_basis = pending_rule_or_version_id_with_alert_state.get('listBasis', 'CREATED_TIME')
# giving priority to comma separated detection ids over check box of fetch all live detections
if not pending_rule_or_version_id_with_alert_state.get("rule_id") and \
not detection_to_pull and not detection_to_process and not simple_backoff_rules:
fetch_detection_by_ids = params.get('fetch_detection_by_ids') or ""
if not fetch_detection_by_ids and params.get('fetch_all_detections', False):
fetch_detection_by_ids = '-'
fetch_detection_by_ids = get_unique_value_from_list(
[r_v_id.strip() for r_v_id in fetch_detection_by_ids.split(',')])
fetch_detection_by_alert_state = params.get('fetch_detection_by_alert_state',
fetch_detection_by_alert_state)
fetch_detection_by_list_basis = params.get('fetch_detection_by_list_basis', fetch_detection_by_list_basis)
# when 1st time fetch or when pending_rule_or_version_id got emptied in last sync.
# when detection_to_pull has some rule ids
pending_rule_or_version_id_with_alert_state.update({'rule_id': fetch_detection_by_ids,
'alert_state': fetch_detection_by_alert_state,
'listBasis': fetch_detection_by_list_basis})
events, detection_to_process, detection_to_pull, pending_rule_or_version_id, simple_backoff_rules \
= fetch_detections(client_obj, delayed_start_time, end_time, int(max_fetch), detection_to_process,
detection_to_pull, pending_rule_or_version_id_with_alert_state.get('rule_id', ''),
pending_rule_or_version_id_with_alert_state.get('alert_state', ''), simple_backoff_rules,
pending_rule_or_version_id_with_alert_state.get('listBasis'))
# The batch processing is in progress i.e. detections for pending rules are yet to be fetched
# so updating the end_time to the start time when considered for current batch
if pending_rule_or_version_id or detection_to_pull or simple_backoff_rules:
rule_first_fetched_time = end_time
end_time = start_time
else:
demisto.info(f"End of current time window from START-TIME : {start_time} to END_TIME : {end_time}")
pending_rule_or_version_id_with_alert_state.update({'rule_id': pending_rule_or_version_id,
'alert_state': fetch_detection_by_alert_state,
'listBasis': fetch_detection_by_list_basis})
detection_identifiers, unique_detections = deduplicate_detections(events, detection_identifiers)
if unique_detections:
incidents = convert_events_to_actionable_incidents(unique_detections)
demisto.setLastRun({
'start_time': end_time,
'detection_identifiers': detection_identifiers,
'rule_first_fetched_time': rule_first_fetched_time,
'detection_to_process': detection_to_process,
'detection_to_pull': detection_to_pull,
'simple_backoff_rules': simple_backoff_rules,
'pending_rule_or_version_id_with_alert_state': pending_rule_or_version_id_with_alert_state
})
return incidents
| def fetch_incidents_detection_alerts(client_obj, params: Dict[str, Any], start_time, end_time, time_window, max_fetch):
"""Fetch incidents of detection alert type.
:type client_obj: Client
:param client_obj: client object.
:type params: dict
:param params: configuration parameter of fetch incidents.
:type start_time: str
:param start_time: start time of request.
:type end_time: str
:param end_time: end time of request.
:type time_window: str
:param time_window: time delay for an event to appear in chronicle after generation
:type max_fetch: str
:param max_fetch: maximum number of incidents to fetch each time
:rtype: list
:return: list of incidents
"""
# list of detections that were pulled but not processed due to max_fetch.
detection_to_process: List[Dict[str, Any]] = []
# detections that are larger than max_fetch and had a next page token for fetch incident.
detection_to_pull: Dict[str, Any] = {}
# max_attempts track for 429 and 500 error
simple_backoff_rules: Dict[str, Any] = {}
# rule_id or version_id and alert_state for which detections are yet to be fetched.
pending_rule_or_version_id_with_alert_state: Dict[str, Any] = {}
detection_identifiers: List = []
rule_first_fetched_time = None
last_run = demisto.getLastRun()
incidents = []
if last_run and 'start_time' in last_run:
start_time = last_run.get('start_time') or start_time
detection_identifiers = last_run.get('detection_identifiers', detection_identifiers)
detection_to_process = last_run.get('detection_to_process', detection_to_process)
detection_to_pull = last_run.get('detection_to_pull', detection_to_pull)
simple_backoff_rules = last_run.get('simple_backoff_rules', simple_backoff_rules)
pending_rule_or_version_id_with_alert_state = last_run.get('pending_rule_or_version_id_with_alert_state',
pending_rule_or_version_id_with_alert_state)
end_time = last_run.get('rule_first_fetched_time') or end_time
if not last_run.get('rule_first_fetched_time'):
demisto.info(f"Starting new time window from START-TIME : {start_time} to END_TIME : {end_time}")
delayed_start_time = generate_delayed_start_time(time_window, start_time)
fetch_detection_by_alert_state = pending_rule_or_version_id_with_alert_state.get('alert_state', '')
fetch_detection_by_list_basis = pending_rule_or_version_id_with_alert_state.get('listBasis', 'CREATED_TIME')
# giving priority to comma separated detection ids over check box of fetch all live detections
if not pending_rule_or_version_id_with_alert_state.get("rule_id") and \
not detection_to_pull and not detection_to_process and not simple_backoff_rules:
fetch_detection_by_ids = params.get('fetch_detection_by_ids') or ""
if not fetch_detection_by_ids and argToBoolean(params.get('fetch_all_detections', False)):
fetch_detection_by_ids = '-'
fetch_detection_by_ids = get_unique_value_from_list(
[r_v_id.strip() for r_v_id in fetch_detection_by_ids.split(',')])
fetch_detection_by_alert_state = params.get('fetch_detection_by_alert_state',
fetch_detection_by_alert_state)
fetch_detection_by_list_basis = params.get('fetch_detection_by_list_basis', fetch_detection_by_list_basis)
# when 1st time fetch or when pending_rule_or_version_id got emptied in last sync.
# when detection_to_pull has some rule ids
pending_rule_or_version_id_with_alert_state.update({'rule_id': fetch_detection_by_ids,
'alert_state': fetch_detection_by_alert_state,
'listBasis': fetch_detection_by_list_basis})
events, detection_to_process, detection_to_pull, pending_rule_or_version_id, simple_backoff_rules \
= fetch_detections(client_obj, delayed_start_time, end_time, int(max_fetch), detection_to_process,
detection_to_pull, pending_rule_or_version_id_with_alert_state.get('rule_id', ''),
pending_rule_or_version_id_with_alert_state.get('alert_state', ''), simple_backoff_rules,
pending_rule_or_version_id_with_alert_state.get('listBasis'))
# The batch processing is in progress i.e. detections for pending rules are yet to be fetched
# so updating the end_time to the start time when considered for current batch
if pending_rule_or_version_id or detection_to_pull or simple_backoff_rules:
rule_first_fetched_time = end_time
end_time = start_time
else:
demisto.info(f"End of current time window from START-TIME : {start_time} to END_TIME : {end_time}")
pending_rule_or_version_id_with_alert_state.update({'rule_id': pending_rule_or_version_id,
'alert_state': fetch_detection_by_alert_state,
'listBasis': fetch_detection_by_list_basis})
detection_identifiers, unique_detections = deduplicate_detections(events, detection_identifiers)
if unique_detections:
incidents = convert_events_to_actionable_incidents(unique_detections)
demisto.setLastRun({
'start_time': end_time,
'detection_identifiers': detection_identifiers,
'rule_first_fetched_time': rule_first_fetched_time,
'detection_to_process': detection_to_process,
'detection_to_pull': detection_to_pull,
'simple_backoff_rules': simple_backoff_rules,
'pending_rule_or_version_id_with_alert_state': pending_rule_or_version_id_with_alert_state
})
return incidents
|
33,227 | def _update_from_standard_locations():
""" Check standard locations for config files and update settings if found.
Order is user's home dir, environment variable ($PYBIDS_CONFIG), and then
current directory--with later files taking precedence over earlier ones.
"""
locs = [
Path.home() / _config_name,
Path('.') / _config_name
]
if 'PYBIDS_CONFIG' in os.environ:
locs.insert(1, os.environ['PYBIDS_CONFIG'])
from_file(locs, False)
| def _update_from_standard_locations():
""" Check standard locations for config files and update settings if found.
Order is user's home dir, environment variable ($PYBIDS_CONFIG), and then
current directory--with later files taking precedence over earlier ones.
"""
locs = [
Path.home() / _config_name,
Path() / _config_name
]
if 'PYBIDS_CONFIG' in os.environ:
locs.insert(1, os.environ['PYBIDS_CONFIG'])
from_file(locs, False)
|
31,146 | def main():
SESSION.proxies = handle_proxy()
client = SixgillEnrichClient(
demisto.params()["client_id"], demisto.params()["client_secret"], CHANNEL_CODE, demisto, SESSION, VERIFY
)
command = demisto.command()
demisto.info(f"Command being called is {command}")
commands: Dict[str, Callable] = {
"test-module": test_module_command,
}
try:
if demisto.command() == "ip":
return_results(ip_reputation_command(client, demisto.args()))
elif demisto.command() == "domain":
return_results(domain_reputation_command(client, demisto.args()))
elif demisto.command() == "url":
return_results(url_reputation_command(client, demisto.args()))
elif demisto.command() == "file":
return_results(file_reputation_command(client, demisto.args()))
elif demisto.command() == "actor":
return_results(actor_reputation_command(client, demisto.args()))
elif demisto.command() == "post_id":
return_results(postid_reputation_command(client, demisto.args()))
else:
readable_output, outputs, raw_response = commands[command](client, demisto.args())
return_outputs(readable_output, outputs, raw_response)
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f"Error failed to execute {demisto.command()}, error: [{e}]")
| def main():
SESSION.proxies = handle_proxy()
client = SixgillEnrichClient(
demisto.params()["client_id"], demisto.params()["client_secret"], CHANNEL_CODE, demisto, SESSION, VERIFY
)
command = demisto.command()
demisto.info(f"Command being called is {command}")
commands: Dict[str, Callable] = {
"test-module": test_module_command,
}
try:
if command == "ip":
return_results(ip_reputation_command(client, demisto.args()))
elif demisto.command() == "domain":
return_results(domain_reputation_command(client, demisto.args()))
elif demisto.command() == "url":
return_results(url_reputation_command(client, demisto.args()))
elif demisto.command() == "file":
return_results(file_reputation_command(client, demisto.args()))
elif demisto.command() == "actor":
return_results(actor_reputation_command(client, demisto.args()))
elif demisto.command() == "post_id":
return_results(postid_reputation_command(client, demisto.args()))
else:
readable_output, outputs, raw_response = commands[command](client, demisto.args())
return_outputs(readable_output, outputs, raw_response)
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f"Error failed to execute {demisto.command()}, error: [{e}]")
|
53,390 | def _regexp_paths_csv_validator(_, name: str, value: str) -> List[Pattern[str]]:
patterns = []
for val in _csv_validator(_, name, value):
patterns.append(
re.compile(str(pathlib.PureWindowsPath(val)).replace("\\", "\\\\"))
)
patterns.append(re.compile(pathlib.PureWindowsPath(val).as_posix()))
return patterns
| def _regexp_paths_csv_validator(_, name: str, value: str) -> List[Pattern[str]]:
patterns = []
for val in _csv_validator(_, name, value):
patterns.append(
re.compile(
str(pathlib.PureWindowsPath(val)).replace("\\", "\\\\")
+ '|'
+ pathlib.PureWindowsPath(val).as_posix()
)
)
return patterns
|
5,240 | def load_facebook_model(path, encoding='utf-8'):
"""Load the input-hidden weight matrix from Facebook's native fasttext `.bin` output file.
Notes
------
Facebook provides both `.vec` and `.bin` files with their modules.
The former contains human-readable vectors.
The latter contains machine-readable vectors along with other model parameters.
This function requires you to **provide the full path to the .bin file**.
It effectively ignores the `.vec` output file, since it is redundant.
This function uses the smart_open library to open the path.
The path may be on a remote host (e.g. HTTP, S3, etc).
It may also be gzip or bz2 compressed.
For details, see `<https://github.com/RaRe-Technologies/smart_open>`__.
Parameters
----------
model_file : str
Path to the FastText output files.
FastText outputs two model files - `/path/to/model.vec` and `/path/to/model.bin`
Expected value for this example: `/path/to/model` or `/path/to/model.bin`,
as Gensim requires only `.bin` file to the load entire fastText model.
encoding : str, optional
Specifies the file encoding.
Examples
--------
Load, infer, continue training:
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> cap_path = datapath("crime-and-punishment.bin")
>>> fb_model = load_facebook_model(cap_path)
>>>
>>> 'landlord' in fb_model.wv.vocab # Word is out of vocabulary
False
>>> oov_term = fb_model.wv['landlord']
>>>
>>> 'landlady' in fb_model.wv.vocab # Word is in the vocabulary
True
>>> iv_term = fb_model.wv['landlady']
>>>
>>> new_sent = [['lord', 'of', 'the', 'rings'], ['lord', 'of', 'the', 'flies']]
>>> fb_model.build_vocab(new_sent, update=True)
>>> fb_model.train(sentences=new_sent, total_examples=len(new_sent), epochs=5)
Returns
-------
gensim.models.fasttext.FastText
The loaded model.
See Also
--------
:func:`~gensim.models.fasttext.load_facebook_vectors` loads
the word embeddings only. Its faster, but does not enable you to continue
training.
"""
return _load_fasttext_format(path, encoding=encoding, full_model=True)
| def load_facebook_model(path, encoding='utf-8'):
"""Load the input-hidden weight matrix from Facebook's native fasttext `.bin` output file.
Notes
------
Facebook provides both `.vec` and `.bin` files with their modules.
The former contains human-readable vectors.
The latter contains machine-readable vectors along with other model parameters.
This function requires you to **provide the full path to the .bin file**.
It effectively ignores the `.vec` output file, since it is redundant.
This function uses the smart_open library to open the path.
The path may be on a remote host (e.g. HTTP, S3, etc).
It may also be gzip or bz2 compressed (i.e. end in `.bin.gz` or `.bin.bz2`).
For details, see `<https://github.com/RaRe-Technologies/smart_open>`__.
Parameters
----------
model_file : str
Path to the FastText output files.
FastText outputs two model files - `/path/to/model.vec` and `/path/to/model.bin`
Expected value for this example: `/path/to/model` or `/path/to/model.bin`,
as Gensim requires only `.bin` file to the load entire fastText model.
encoding : str, optional
Specifies the file encoding.
Examples
--------
Load, infer, continue training:
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> cap_path = datapath("crime-and-punishment.bin")
>>> fb_model = load_facebook_model(cap_path)
>>>
>>> 'landlord' in fb_model.wv.vocab # Word is out of vocabulary
False
>>> oov_term = fb_model.wv['landlord']
>>>
>>> 'landlady' in fb_model.wv.vocab # Word is in the vocabulary
True
>>> iv_term = fb_model.wv['landlady']
>>>
>>> new_sent = [['lord', 'of', 'the', 'rings'], ['lord', 'of', 'the', 'flies']]
>>> fb_model.build_vocab(new_sent, update=True)
>>> fb_model.train(sentences=new_sent, total_examples=len(new_sent), epochs=5)
Returns
-------
gensim.models.fasttext.FastText
The loaded model.
See Also
--------
:func:`~gensim.models.fasttext.load_facebook_vectors` loads
the word embeddings only. Its faster, but does not enable you to continue
training.
"""
return _load_fasttext_format(path, encoding=encoding, full_model=True)
|
26,013 | def cf_share_service(cli_ctx, kwargs):
client_kwargs = prepare_client_kwargs_track2(cli_ctx)
client_kwargs = _config_location_mode(kwargs, client_kwargs)
t_share_service = get_sdk(cli_ctx, ResourceType.DATA_STORAGE_FILESHARE, '_share_service_client#ShareServiceClient')
connection_string = kwargs.pop('connection_string', None)
account_key = kwargs.pop('account_key', None)
token_credential = kwargs.pop('token_credential', None)
sas_token = kwargs.pop('sas_token', None)
account_name = kwargs.pop('account_name', None)
account_url = kwargs.pop('account_url', None)
if connection_string:
return t_share_service.from_connection_string(conn_str=connection_string, **client_kwargs)
if not account_url:
account_url = get_account_url(cli_ctx, account_name=account_name, service='file')
credential = account_key or sas_token or token_credential
if account_url and credential:
return t_share_service(account_url=account_url, credential=credential, **client_kwargs)
return None
| def cf_share_service(cli_ctx, kwargs):
client_kwargs = prepare_client_kwargs_track2(cli_ctx)
client_kwargs = _config_location_mode(kwargs, client_kwargs)
t_share_service = get_sdk(cli_ctx, ResourceType.DATA_STORAGE_FILESHARE, '_share_service_client#ShareServiceClient')
connection_string = kwargs.pop('connection_string', None)
account_key = kwargs.pop('account_key', None)
token_credential = kwargs.pop('token_credential', None)
sas_token = kwargs.pop('sas_token', None)
account_name = kwargs.pop('account_name', None)
account_url = kwargs.pop('account_url', None)
if connection_string:
return t_share_service.from_connection_string(conn_str=connection_string, **client_kwargs)
if not account_url:
account_url = get_account_url(cli_ctx, account_name=account_name, service='file')
credential = account_key or sas_token or token_credential
return t_share_service(account_url=account_url, credential=credential, **client_kwargs)
|
47,882 | def CreateArgparse():
parser = argparse.ArgumentParser(description='Smart Library Sample')
parser.add_argument('-fr', type = str,
dest = 'fRec', required=True,
help = 'Required. \
Type of recognizer. \
Available DNN face recognizer - DNNfr')
parser.add_argument('-m_rd', type = str,
dest = 'rdModel', required=True,
help = 'Required. Path to .xml file')
parser.add_argument('-fd', type = str,
dest = 'fdDet', required=True,
help = 'Required. \
Type of detector. \
Available DNN face detector - DNNfd')
parser.add_argument('-m_fd', type = str,
dest = 'fdModel', required=True,
help = 'Required. Path to .xml file')
parser.add_argument('-lm', type = str, dest = 'lmDet', required=True,
help = 'Required. \
Type of detector. \
Available DNN landmarks regression - DNNlm')
parser.add_argument('-m_lm', type = str,
dest = 'lmModel', required=True,
help = 'Required. \
Path to .xml file')
parser.add_argument('-w_rd', type = int, default = 128,
dest = 'rdWidth',
help = 'Optional. Image width to resize')
parser.add_argument('-h_rd', type = int, default = 128,
dest = 'rdHeight',
help = 'Optional. Image height to resize')
parser.add_argument('-t_rd', type = float, default = 0.8,
dest = 'rdThreshold',
help = 'Optional. \
Probability threshold for face detections.')
parser.add_argument('-w_fd', type = int, default = 300,
dest = 'fdWidth',
help = 'Optional. Image width to resize')
parser.add_argument('-h_fd', type = int, default = 300,
dest = 'fdHeight',
help = 'Optional. Image height to resize')
parser.add_argument('-t_fd', type = float, default = 0.9,
dest = 'fdThreshold',
help = 'Optional. \
Probability threshold for face detections.')
parser.add_argument('-w_lm', type = int, default = 48,
dest = 'lmWidth',
help = 'Optional. Image width to resize')
parser.add_argument('-h_lm', type = int, default = 48,
dest = 'lmHeight',
help = 'Optional. Image height to resize')
parser.add_argument('-br', type = str, default = 'QR',
dest = 'br',
help = 'Optional. Type - QR')
parser.add_argument('-lib', type = str, default = 'library.json',
dest = 'lib',
help = 'Optional. Path to library.')
parser.add_argument('-w', type = int, default = 0,
dest = 'web',
help = 'Optional.\
Specify index of web-camera to open. Default is 0')
args = parser.parse_args()
return args
| def CreateArgparse():
parser = argparse.ArgumentParser(description='Smart Library Sample')
parser.add_argument('-fr', type = str,
required=True,
help = 'Required. \
Type of recognizer. \
Available DNN face recognizer - DNNfr')
parser.add_argument('-m_rd', type = str,
dest = 'rdModel', required=True,
help = 'Required. Path to .xml file')
parser.add_argument('-fd', type = str,
dest = 'fdDet', required=True,
help = 'Required. \
Type of detector. \
Available DNN face detector - DNNfd')
parser.add_argument('-m_fd', type = str,
dest = 'fdModel', required=True,
help = 'Required. Path to .xml file')
parser.add_argument('-lm', type = str, dest = 'lmDet', required=True,
help = 'Required. \
Type of detector. \
Available DNN landmarks regression - DNNlm')
parser.add_argument('-m_lm', type = str,
dest = 'lmModel', required=True,
help = 'Required. \
Path to .xml file')
parser.add_argument('-w_rd', type = int, default = 128,
dest = 'rdWidth',
help = 'Optional. Image width to resize')
parser.add_argument('-h_rd', type = int, default = 128,
dest = 'rdHeight',
help = 'Optional. Image height to resize')
parser.add_argument('-t_rd', type = float, default = 0.8,
dest = 'rdThreshold',
help = 'Optional. \
Probability threshold for face detections.')
parser.add_argument('-w_fd', type = int, default = 300,
dest = 'fdWidth',
help = 'Optional. Image width to resize')
parser.add_argument('-h_fd', type = int, default = 300,
dest = 'fdHeight',
help = 'Optional. Image height to resize')
parser.add_argument('-t_fd', type = float, default = 0.9,
dest = 'fdThreshold',
help = 'Optional. \
Probability threshold for face detections.')
parser.add_argument('-w_lm', type = int, default = 48,
dest = 'lmWidth',
help = 'Optional. Image width to resize')
parser.add_argument('-h_lm', type = int, default = 48,
dest = 'lmHeight',
help = 'Optional. Image height to resize')
parser.add_argument('-br', type = str, default = 'QR',
dest = 'br',
help = 'Optional. Type - QR')
parser.add_argument('-lib', type = str, default = 'library.json',
dest = 'lib',
help = 'Optional. Path to library.')
parser.add_argument('-w', type = int, default = 0,
dest = 'web',
help = 'Optional.\
Specify index of web-camera to open. Default is 0')
args = parser.parse_args()
return args
|
58,843 | def kronsum(A, B, format=None):
"""Kronecker sum of sparse matrices A and B.
Kronecker sum is matrix sum defined as sum of two Kronecker products
kron(I_n, A) + kron(B, I_m), where I_n and I_m are identity matrices
Args:
A (cupyx.scipy.sparse.spmatrix): a sparse matrix.
B (cupyx.scipy.sparse.spmatrix): a sparse matrix.
format (str): the format of the returned sparse matrix.
Returns:
cupyx.scipy.sparse.spmatrix:
Generated sparse matrix with the specified ``format``.
.. seealso:: :func:`scipy.sparse.kronsum`
"""
A = coo.coo_matrix(A)
B = coo.coo_matrix(B)
if A.shape[0] != A.shape[1]:
raise ValueError('A is not square matrix')
if B.shape[0] != B.shape[1]:
raise ValueError('B is not square matrix')
dtype = sputils.upcast(A.dtype, B.dtype)
L = kron(eye(B.shape[0], dtype=dtype), A, format=format)
R = kron(B, eye(A.shape[0], dtype=dtype), format=format)
return (L + R).asformat(format)
| def kronsum(A, B, format=None):
"""Kronecker sum of sparse matrices A and B.
Kronecker sum is the sum of two Kronecker products
``kron(I_n, A) + kron(B, I_m)``, where ``I_n`` and ``I_m`` are identity matrices.
Args:
A (cupyx.scipy.sparse.spmatrix): a sparse matrix.
B (cupyx.scipy.sparse.spmatrix): a sparse matrix.
format (str): the format of the returned sparse matrix.
Returns:
cupyx.scipy.sparse.spmatrix:
Generated sparse matrix with the specified ``format``.
.. seealso:: :func:`scipy.sparse.kronsum`
"""
A = coo.coo_matrix(A)
B = coo.coo_matrix(B)
if A.shape[0] != A.shape[1]:
raise ValueError('A is not square matrix')
if B.shape[0] != B.shape[1]:
raise ValueError('B is not square matrix')
dtype = sputils.upcast(A.dtype, B.dtype)
L = kron(eye(B.shape[0], dtype=dtype), A, format=format)
R = kron(B, eye(A.shape[0], dtype=dtype), format=format)
return (L + R).asformat(format)
|
6,938 | def setup_group_by(data):
'''Add columns for aggregated values e.g. count(name)'''
if data.group_by and data.aggregate_function:
if data.aggregate_function.lower() not in ('count', 'sum', 'avg'):
frappe.throw(_('Invalid aggregate function'))
if frappe.db.has_column(data.aggregate_on_doctype, data.aggregate_on_field):
data.fields.append('{aggregate_function}(`tab{aggregate_on_doctype}`.`{aggregate_on_field}`) AS _aggregate_column'.format(**data))
if data.aggregate_on_field:
data.fields.append("`tab{aggregate_on_doctype}`.`{aggregate_on_field}`".format(**data))
else:
raise_invalid_field(data.aggregate_on_field)
data.pop('aggregate_on_doctype')
data.pop('aggregate_on_field')
data.pop('aggregate_function')
| def setup_group_by(data):
'''Add columns for aggregated values e.g. count(name)'''
if data.group_by and data.aggregate_function:
if data.aggregate_function.lower() not in ('count', 'sum', 'avg'):
frappe.throw(_('Invalid aggregate function'))
if frappe.db.has_column(data.aggregate_on_doctype, data.aggregate_on_field):
data.fields.append('{aggregate_function}(`tab{aggregate_on_doctype}`.`{aggregate_on_field}`) AS _aggregate_column'.format(**data))
if data.aggregate_on_field:
data.fields.append(f"`tab{data.aggregate_on_doctype}`.`{data.aggregate_on_field}`")
else:
raise_invalid_field(data.aggregate_on_field)
data.pop('aggregate_on_doctype')
data.pop('aggregate_on_field')
data.pop('aggregate_function')
|
2,260 | def _check_valid_document(file_name, allowed_extensions, ignored_extensions):
"""
Checks if the file with file_name should be loaded in for load_files
Parameters
----------
file_name: str
The name of the file to check
allowed_extensions : list or set of str,
List of desired file extensions
ignored_extensions : list or set of str,
List of file extensions to exclude
Returns
-------
data : Boolean
Indicates whether or not the file should be
loaded in load_files
"""
if not allowed_extensions and not ignored_extensions:
return True
extension = os.path.splitext(file_name)[1]
if allowed_extensions:
return extension in allowed_extensions
else:
return extension not in ignored_extensions
| def _check_valid_document(file_name, allowed_extensions, ignored_extensions):
"""
Checks if the file with file_name should be loaded in for load_files
Parameters
----------
file_name: str
The name of the file to check
allowed_extensions : set of str or None
List of desired file extensions
ignored_extensions : list or set of str,
List of file extensions to exclude
Returns
-------
data : Boolean
Indicates whether or not the file should be
loaded in load_files
"""
if not allowed_extensions and not ignored_extensions:
return True
extension = os.path.splitext(file_name)[1]
if allowed_extensions:
return extension in allowed_extensions
else:
return extension not in ignored_extensions
|
43,043 | def duschinsky(
Li: np.ndarray, Lf: np.ndarray, ri: np.ndarray, rf: np.ndarray, wf: np.ndarray, m: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
r"""Generate the Duschinsky rotation matrix and displacement vector.
**Example usage:**
>>> Li = np.array([[-0.08727946], [ 0.00000000], [ 0.00000000],
>>> [ 0.95342606], [-0.00000000], [-0.00000000]])
>>> Lf = np.array([[-0.08727946], [ 0.00000000], [ 0.00000000],
>>> [ 0.95342606], [-0.00000000], [-0.00000000]])
>>> ri = np.array([-0.0236542994, 0.0000000000, 0.0000000000,
>>> 1.2236542994, 0.0000000000, 0.0000000000])
>>> rf = np.array([ 0.0000000000, 0.0000000000, 0.0000000000,
>>> 1.4397000000, 0.0000000000, 0.0000000000])
>>> wf = np.array([1363.210])
>>> m = np.array([11.00931] * 3 + [1.00782] * 3)
>>> Ud, delta = duschinsky(Li, Lf, ri, rf, wf, m)
(array([[0.99999546]]), array([-1.1755024]))
Args:
Li (array): normal modes of the initial electronic state
Lf (array): normal modes of the final electronic state
ri (array): equilibrium molecular geometry of the initial electronic state
rf (array): equilibrium molecular geometry of the final electronic state
wf (array): normal mode frequencies of the final electronic state in units of
:math:`\mbox{cm}^{-1}`
m (array): atomic masses in units of unified atomic mass unit
Returns:
tuple[array, array]: Duschinsky rotation matrix :math:`Ud`, Duschinsky displacement vector
:math:`\delta`
"""
Ud = (Lf.T * m ** 0.5) @ (Li.T * m ** 0.5).T
d = (ri - rf) @ (Lf.T * m).T
l0 = np.diag((h / (wf * 100.0 * c)) ** 0.5 / 2.0 / pi) * 1.0e10 / m_u ** 0.5
delta = np.array(d @ np.linalg.inv(l0))
return Ud, delta
| def duschinsky(
Li: np.ndarray, Lf: np.ndarray, ri: np.ndarray, rf: np.ndarray, wf: np.ndarray, m: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
r"""Generate the Duschinsky rotation matrix and displacement vector.
**Example usage:**
>>> Li = np.array([[-0.08727946], [ 0.00000000], [ 0.00000000],
>>> [ 0.95342606], [-0.00000000], [-0.00000000]])
>>> Lf = np.array([[-0.08727946], [ 0.00000000], [ 0.00000000],
>>> [ 0.95342606], [-0.00000000], [-0.00000000]])
>>> ri = np.array([-0.0236542994, 0.0000000000, 0.0000000000,
>>> 1.2236542994, 0.0000000000, 0.0000000000])
>>> rf = np.array([ 0.0000000000, 0.0000000000, 0.0000000000,
>>> 1.4397000000, 0.0000000000, 0.0000000000])
>>> wf = np.array([1363.210])
>>> m = np.array([11.00931] * 3 + [1.00782] * 3)
>>> Ud, delta = duschinsky(Li, Lf, ri, rf, wf, m)
(array([[0.99999546]]), array([-1.1755024]))
Args:
Li (array): normal modes of the initial electronic state
Lf (array): normal modes of the final electronic state
ri (array): equilibrium molecular geometry of the initial electronic state
rf (array): equilibrium molecular geometry of the final electronic state
wf (array): normal mode frequencies of the final electronic state in units of
:math:`\mbox{cm}^{-1}`
m (array): atomic masses in unified atomic mass units
Returns:
tuple[array, array]: Duschinsky rotation matrix :math:`Ud`, Duschinsky displacement vector
:math:`\delta`
"""
Ud = (Lf.T * m ** 0.5) @ (Li.T * m ** 0.5).T
d = (ri - rf) @ (Lf.T * m).T
l0 = np.diag((h / (wf * 100.0 * c)) ** 0.5 / 2.0 / pi) * 1.0e10 / m_u ** 0.5
delta = np.array(d @ np.linalg.inv(l0))
return Ud, delta
|
33,126 | def unbias_var(w=None, N_eff=None, avoid_pathological=False):
"""Compute unbias-ing factor for variance estimation.
Parameters
----------
w: ndarray, optional
Weight of something. Sum(w) = 1.
Only one of w and N_eff can be None. Default: None
N_eff: float, optional
Tolerance of the distance between w and one.
Only one of w and N_eff can be None. Default: None
avoid_pathological: bool, optional
Avoid weight collapse. Default: False
Returns
-------
ub: float
factor used to unbiasing variance
See Also
--------
[Wikipedia](https://wikipedia.org/wiki/Weighted_arithmetic_mean#Reliability_weights)
"""
if N_eff is None:
N_eff = 1/(w@w)
if avoid_pathological and weight_degeneracy(w):
ub = 1 # Don't do in case of weights collapse
else:
ub = 1/(1 - 1/N_eff) # =N/(N-1) if w==ones(N)/N.
return ub
| def unbias_var(w=None, N_eff=None, avoid_pathological=False):
"""Compute unbias-ing factor for variance estimation.
Parameters
----------
w: ndarray, optional
Weight of something. Sum(w) = 1.
Only one of w and N_eff can be None. Default: None
N_eff: float, optional
Tolerance of the distance between w and one.
Only one of w and N_eff can be None. Default: None
avoid_pathological: bool, optional
Avoid weight collapse. Default: False
Returns
-------
ub: float
factor used to unbiasing variance
Reference
--------
[Wikipedia](https://wikipedia.org/wiki/Weighted_arithmetic_mean#Reliability_weights)
"""
if N_eff is None:
N_eff = 1/(w@w)
if avoid_pathological and weight_degeneracy(w):
ub = 1 # Don't do in case of weights collapse
else:
ub = 1/(1 - 1/N_eff) # =N/(N-1) if w==ones(N)/N.
return ub
|
8,346 | def unfence_node(
env: LibraryEnvironment,
node: str,
original_devices: Iterable[str],
updated_devices: Iterable[str],
) -> None:
"""
Unfence scsi devices on a node by calling fence_scsi agent script. Only
newly added devices will be unfenced (set(updated_devices) -
set(original_devices)). Before unfencing, original devices are be checked
if any of them are not fenced. If there is a fenced device, unfencing will
be skipped.
env -- provides communication with externals
node -- node name on wich is unfencing performed
original_devices -- list of devices defined before update
updated_devices -- list of devices defined after update
"""
_unfence_node_devices(
env, node, original_devices, updated_devices, "fence_scsi"
)
| def unfence_node(
env: LibraryEnvironment,
node: str,
original_devices: Iterable[str],
updated_devices: Iterable[str],
) -> None:
"""
Unfence scsi devices on a node by calling fence_scsi agent script. Only
newly added devices will be unfenced (set(updated_devices) -
set(original_devices)). Before unfencing, original devices are checked
if any of them are not fenced. If there is a fenced device, unfencing will
be skipped.
env -- provides communication with externals
node -- node name on wich is unfencing performed
original_devices -- list of devices defined before update
updated_devices -- list of devices defined after update
"""
_unfence_node_devices(
env, node, original_devices, updated_devices, "fence_scsi"
)
|
2,074 | def plot_det_curve(
estimator,
X,
y,
*,
sample_weight=None,
response_method="auto",
name=None,
ax=None,
pos_label=None,
**kwargs
):
"""Plot detection error tradeoff (DET) curve.
Extra keyword arguments will be passed to matplotlib's `plot`.
Read more in the :ref:`User Guide <visualizations>`.
.. versionadded:: 0.24
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
response_method : {'predict_proba', 'decision_function', 'auto'} \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
name : str, default=None
Name of ROC Curve for labeling. If `None`, use the name of the
estimator.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
pos_label : str or int, default=None
The label of the positive class.
When `pos_label=None`, if `y_true` is in {-1, 1} or {0, 1},
`pos_label` is set to 1, otherwise an error will be raised.
Returns
-------
display : :class:`~sklearn.metrics.DetCurveDisplay`
Object that stores computed values.
See Also
--------
roc_auc_score : Compute the area under the ROC curve
roc_curve : Compute Receiver operating characteristic (ROC) curve
Examples
--------
"""
check_matplotlib_support('plot_det_curve')
y_pred, pos_label = _get_response(
X, estimator, response_method, pos_label=pos_label
)
fpr, fnr, _ = det_curve(
y, y_pred, pos_label=pos_label, sample_weight=sample_weight,
)
name = estimator.__class__.__name__ if name is None else name
viz = DetCurveDisplay(
fpr=fpr,
fnr=fnr,
estimator_name=name,
pos_label=pos_label
)
return viz.plot(ax=ax, name=name, **kwargs)
| def plot_det_curve(
estimator,
X,
y,
*,
sample_weight=None,
response_method="auto",
name=None,
ax=None,
pos_label=None,
**kwargs
):
"""Plot detection error tradeoff (DET) curve.
Extra keyword arguments will be passed to matplotlib's `plot`.
Read more in the :ref:`User Guide <visualizations>`.
.. versionadded:: 0.24
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
response_method : {'predict_proba', 'decision_function', 'auto'} \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
name : str, default=None
Name of DET curve for labeling. If `None`, use the name of the
estimator.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
pos_label : str or int, default=None
The label of the positive class.
When `pos_label=None`, if `y_true` is in {-1, 1} or {0, 1},
`pos_label` is set to 1, otherwise an error will be raised.
Returns
-------
display : :class:`~sklearn.metrics.DetCurveDisplay`
Object that stores computed values.
See Also
--------
roc_auc_score : Compute the area under the ROC curve
roc_curve : Compute Receiver operating characteristic (ROC) curve
Examples
--------
"""
check_matplotlib_support('plot_det_curve')
y_pred, pos_label = _get_response(
X, estimator, response_method, pos_label=pos_label
)
fpr, fnr, _ = det_curve(
y, y_pred, pos_label=pos_label, sample_weight=sample_weight,
)
name = estimator.__class__.__name__ if name is None else name
viz = DetCurveDisplay(
fpr=fpr,
fnr=fnr,
estimator_name=name,
pos_label=pos_label
)
return viz.plot(ax=ax, name=name, **kwargs)
|
19,995 | def find_color_card(img, threshold='adaptgauss', threshvalue=125, blurry=False, background='dark'):
"""Automatically detects a color card and output info to use in create_color_card_mask function
Inputs:
img = Input RGB image data containing a color card.
threshold = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss)
thresh_value = Thresholding value, optional (default 125)
blurry = Bool (default False) if True then image sharpening applied
background = Type of image background either 'dark' or 'light (default 'dark'); if 'light' then histogram
expansion applied to better detect edges, but histogram expansion will be hindered if there
is a dark background
Returns:
df = Dataframe containing information about the filtered contours
start_coord = Two element tuple of starting coordinates, location of the top left pixel detected
spacing = Two element tuple of spacing between centers of chips
:param img: numpy.ndarray
:param threshold: str
:param threshvalue: int
:param blurry: bool
:param background: str
:return df: pandas.core.frame.DataFrame
:return start_coord: tuple
:return spacing: tuple
"""
# Imports
import skimage
import pandas as pd
from scipy.spatial.distance import squareform, pdist
# Get image attributes
height, width, channels = img.shape
totalpx = float(height * width)
# Minimum and maximum square size based upon 12 MP image
minarea = 1000. / 12000000. * totalpx
maxarea = 8000000. / 12000000. * totalpx
# Create gray image for further processing
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Laplacian Fourier Transform detection of blurriness
blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var()
# If image is blurry then try to deblur using kernel
if blurry:
# from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening
kernel = np.array([[-1, -1, -1, -1, -1],
[-1, 2, 2, 2, -1],
[-1, 2, 8, 2, -1],
[-1, 2, 2, 2, -1],
[-1, -1, -1, -1, -1]]) / 8.0
# Store result back out for further processing
gray_img = cv2.filter2D(gray_img, -1, kernel)
# In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu
# thresholding. If your image has a bright background then apply
if background == 'light':
clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4))
# apply CLAHE histogram expansion to find squares better with canny edge detection
gray_img = clahe.apply(gray_img)
elif background != 'dark':
fatal_error('Background parameter ' + str(background) + ' is not "light" or "dark"!')
# Thresholding
if threshold == "otsu":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
elif threshold == "normal":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, threshvalue, 255, cv2.THRESH_BINARY)
elif threshold == "adaptgauss":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0)
threshold = cv2.adaptiveThreshold(gaussian, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV, 51, 2)
else:
fatal_error('Threshold ' + str(threshold) + ' is not "otsu", "normal", or "adaptgauss"!')
# Apply automatic Canny edge detection using the computed median
edges = skimage.feature.canny(threshold)
edges.dtype = 'uint8'
# Compute contours to find the squares of the card
_, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Variable of which contour is which
mindex = []
# Variable to store moments
mu = []
# Variable to x,y coordinates in tuples
mc = []
# Variable to x coordinate as integer
mx = []
# Variable to y coordinate as integer
my = []
# Variable to store area
marea = []
# Variable to store whether something is a square (1) or not (0)
msquare = []
# Variable to store square approximation coordinates
msquarecoords = []
# Variable to store child hierarchy element
mchild = []
# Fitted rectangle height
mheight = []
# Fitted rectangle width
mwidth = []
# Ratio of height/width
mwhratio = []
# Extract moments from contour image
for x in range(0, len(contours)):
mu.append(cv2.moments(contours[x]))
marea.append(cv2.contourArea(contours[x]))
mchild.append(int(hierarchy[0][x][2]))
mindex.append(x)
# Cycle through moment data and compute location for each moment
for m in mu:
if m['m00'] != 0: # This is the area term for a moment
mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00']))
mx.append(int(m['m10'] / m['m00']))
my.append(int(m['m01'] / m['m00']))
else:
mc.append((0, 0))
mx.append((0))
my.append((0))
# Loop over our contours and extract data about them
for index, c in enumerate(contours):
# Area isn't 0, but greater than min-area and less than max-area
if marea[index] != 0 and minarea < marea[index] < maxarea:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.15 * peri, True)
center, wh, angle = cv2.minAreaRect(c) # Rotated rectangle
mwidth.append(wh[0])
mheight.append(wh[1])
mwhratio.append(wh[0] / wh[1])
msquare.append(len(approx))
# If the approx contour has 4 points then we can assume we have 4-sided objects
if len(approx) == 4 or 5:
msquarecoords.append(approx)
else: # It's not square
msquare.append(0)
msquarecoords.append(0)
else: # Contour has area of 0, not interesting
msquare.append(0)
msquarecoords.append(0)
mwidth.append(0)
mheight.append(0)
mwhratio.append(0)
# Make a pandas df from data for filtering out junk
locarea = {'index': mindex, 'X': mx, 'Y': my, 'width': mwidth, 'height': mheight, 'WHratio': mwhratio,
'Area': marea, 'square': msquare, 'child': mchild}
df = pd.DataFrame(locarea)
# Add calculated blur factor to output
df['blurriness'] = blurfactor
# Filter df for attributes that would isolate squares of reasonable size
df = df[(df['Area'] > minarea) & (df['Area'] < maxarea) & (df['child'] != -1) &
(df['square'].isin([4, 5])) & (df['WHratio'] < 1.2) & (df['WHratio'] > 0.85)]
# Filter nested squares from dataframe, was having issues with median being towards smaller nested squares
df = df[~(df['index'].isin(df['index'] + 1))]
# Count up squares that are within a given radius, more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 6
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Compute how similar in area the squares are. lots of similar values indicates card
# isolate area measurements
filtered_area = df['Area']
# Create empty matrix for storing comparisons
sizecomp = np.zeros((len(filtered_area), len(filtered_area)))
# Double loop through all areas to compare to each other
for p in range(0, len(filtered_area)):
for o in range(0, len(filtered_area)):
big = max(filtered_area.iloc[p], filtered_area.iloc[o])
small = min(filtered_area.iloc[p], filtered_area.iloc[o])
pct = 100. * (small / big)
sizecomp[p][o] = pct
# How many comparisons given 90% square similarity
sizematrix = pd.DataFrame(sizecomp).apply(lambda sim: sim[sim >= 90].count() - 1, axis=1)
# Append sizeprox summary to dataframe
df = df.assign(sizeprox=sizematrix.values)
# Reorder dataframe for better printing
df = df[['index', 'X', 'Y', 'width', 'height', 'WHratio', 'Area', 'square', 'child',
'blurriness', 'distprox', 'sizeprox']]
# Loosely filter for size and distance (relative size to median)
minsqwidth = median_sq_width_px * 0.80
maxsqwidth = median_sq_width_px * 1.2
df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) & (df['width'] > minsqwidth) &
(df['width'] < maxsqwidth)]
# Filter for proximity again to root out stragglers
# Find and count up squares that are within given radius,
# more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 5
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Filter results for distance proximity to other squares
df = df[(df['distprox'] >= 4)]
# Extract the starting coordinate
start_coord = (int(df['X'].min()), int(df['Y'].min()))
# Calculate the range
spacingx_short = (df['X'].max() - df['X'].min()) / 3
spacingy_short = (df['Y'].max() - df['Y'].min()) / 3
spacingx_long = (df['X'].max() - df['X'].min()) / 5
spacingy_long = (df['Y'].max() - df['Y'].min()) / 5
# Chip spacing since 4x6 card assumed
spacing_short = min(spacingx_short, spacingy_short)
spacing_long = max(spacingx_long, spacingy_long)
# Smaller spacing measurement might have a chip missing
spacing = int(max(spacing_short, spacing_long))
spacing = (spacing, spacing)
return df, start_coord, spacing
| def find_color_card(img, threshold='adaptgauss', threshvalue=125, blurry=False, background='dark'):
"""Automatically detects a color card and output info to use in create_color_card_mask function
Inputs:
img = Input RGB image data containing a color card.
threshold = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss)
thresh_value = Thresholding value, optional (default 125)
blurry = Bool (default False) if True then image sharpening applied
background = Type of image background either 'dark' or 'light (default 'dark'); if 'light' then histogram
expansion applied to better detect edges, but histogram expansion will be hindered if there
is a dark background
Returns:
df = Dataframe containing information about the filtered contours
start_coord = Two element tuple of starting coordinates, location of the top left pixel detected
spacing = Two element tuple of spacing between centers of chips
:param img: numpy.ndarray
:param threshold: str
:param threshvalue: int
:param blurry: bool
:param background: str
:return df: pandas.core.frame.DataFrame
:return start_coord: tuple
:return spacing: tuple
"""
# Imports
import skimage
import pandas as pd
from scipy.spatial.distance import squareform, pdist
# Get image attributes
height, width, channels = img.shape
totalpx = float(height * width)
# Minimum and maximum square size based upon 12 MP image
minarea = 1000. / 12000000. * totalpx
maxarea = 8000000. / 12000000. * totalpx
# Create gray image for further processing
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
# Laplacian Fourier Transform detection of blurriness
blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var()
# If image is blurry then try to deblur using kernel
if blurry:
# from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening
kernel = np.array([[-1, -1, -1, -1, -1],
[-1, 2, 2, 2, -1],
[-1, 2, 8, 2, -1],
[-1, 2, 2, 2, -1],
[-1, -1, -1, -1, -1]]) / 8.0
# Store result back out for further processing
gray_img = cv2.filter2D(gray_img, -1, kernel)
# In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu
# thresholding. If your image has a bright background then apply
if background == 'light':
clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4))
# apply CLAHE histogram expansion to find squares better with canny edge detection
gray_img = clahe.apply(gray_img)
elif background != 'dark':
fatal_error('Background parameter ' + str(background) + ' is not "light" or "dark"!')
# Thresholding
if threshold == "otsu":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
elif threshold == "normal":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, threshvalue, 255, cv2.THRESH_BINARY)
elif threshold == "adaptgauss":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0)
threshold = cv2.adaptiveThreshold(gaussian, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV, 51, 2)
else:
fatal_error('Threshold ' + str(threshold) + ' is not "otsu", "normal", or "adaptgauss"!')
# Apply automatic Canny edge detection using the computed median
edges = skimage.feature.canny(threshold)
edges.dtype = 'uint8'
# Compute contours to find the squares of the card
_, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Variable of which contour is which
mindex = []
# Variable to store moments
mu = []
# Variable to x,y coordinates in tuples
mc = []
# Variable to x coordinate as integer
mx = []
# Variable to y coordinate as integer
my = []
# Variable to store area
marea = []
# Variable to store whether something is a square (1) or not (0)
msquare = []
# Variable to store square approximation coordinates
msquarecoords = []
# Variable to store child hierarchy element
mchild = []
# Fitted rectangle height
mheight = []
# Fitted rectangle width
mwidth = []
# Ratio of height/width
mwhratio = []
# Extract moments from contour image
for x in range(0, len(contours)):
mu.append(cv2.moments(contours[x]))
marea.append(cv2.contourArea(contours[x]))
mchild.append(int(hierarchy[0][x][2]))
mindex.append(x)
# Cycle through moment data and compute location for each moment
for m in mu:
if m['m00'] != 0: # This is the area term for a moment
mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00']))
mx.append(int(m['m10'] / m['m00']))
my.append(int(m['m01'] / m['m00']))
else:
mc.append((0, 0))
mx.append((0))
my.append((0))
# Loop over our contours and extract data about them
for index, c in enumerate(contours):
# Area isn't 0, but greater than min-area and less than max-area
if marea[index] != 0 and minarea < marea[index] < maxarea:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.15 * peri, True)
center, wh, angle = cv2.minAreaRect(c) # Rotated rectangle
mwidth.append(wh[0])
mheight.append(wh[1])
mwhratio.append(wh[0] / wh[1])
msquare.append(len(approx))
# If the approx contour has 4 points then we can assume we have 4-sided objects
if len(approx) == 4 or 5:
msquarecoords.append(approx)
else: # It's not square
msquare.append(0)
msquarecoords.append(0)
else: # Contour has area of 0, not interesting
msquare.append(0)
msquarecoords.append(0)
mwidth.append(0)
mheight.append(0)
mwhratio.append(0)
# Make a pandas df from data for filtering out junk
locarea = {'index': mindex, 'X': mx, 'Y': my, 'width': mwidth, 'height': mheight, 'WHratio': mwhratio,
'Area': marea, 'square': msquare, 'child': mchild}
df = pd.DataFrame(locarea)
# Add calculated blur factor to output
df['blurriness'] = blurfactor
# Filter df for attributes that would isolate squares of reasonable size
df = df[(df['Area'] > minarea) & (df['Area'] < maxarea) & (df['child'] != -1) &
(df['square'].isin([4, 5])) & (df['WHratio'] < 1.2) & (df['WHratio'] > 0.85)]
# Filter nested squares from dataframe, was having issues with median being towards smaller nested squares
df = df[~(df['index'].isin(df['index'] + 1))]
# Count up squares that are within a given radius, more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 6
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Compute how similar in area the squares are. lots of similar values indicates card
# isolate area measurements
filtered_area = df['Area']
# Create empty matrix for storing comparisons
sizecomp = np.zeros((len(filtered_area), len(filtered_area)))
# Double loop through all areas to compare to each other
for p in range(0, len(filtered_area)):
for o in range(0, len(filtered_area)):
big = max(filtered_area.iloc[p], filtered_area.iloc[o])
small = min(filtered_area.iloc[p], filtered_area.iloc[o])
pct = 100. * (small / big)
sizecomp[p][o] = pct
# How many comparisons given 90% square similarity
sizematrix = pd.DataFrame(sizecomp).apply(lambda sim: sim[sim >= 90].count() - 1, axis=1)
# Append sizeprox summary to dataframe
df = df.assign(sizeprox=sizematrix.values)
# Reorder dataframe for better printing
df = df[['index', 'X', 'Y', 'width', 'height', 'WHratio', 'Area', 'square', 'child',
'blurriness', 'distprox', 'sizeprox']]
# Loosely filter for size and distance (relative size to median)
minsqwidth = median_sq_width_px * 0.80
maxsqwidth = median_sq_width_px * 1.2
df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) & (df['width'] > minsqwidth) &
(df['width'] < maxsqwidth)]
# Filter for proximity again to root out stragglers
# Find and count up squares that are within given radius,
# more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 5
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Filter results for distance proximity to other squares
df = df[(df['distprox'] >= 4)]
# Extract the starting coordinate
start_coord = (int(df['X'].min()), int(df['Y'].min()))
# Calculate the range
spacingx_short = (df['X'].max() - df['X'].min()) / 3
spacingy_short = (df['Y'].max() - df['Y'].min()) / 3
spacingx_long = (df['X'].max() - df['X'].min()) / 5
spacingy_long = (df['Y'].max() - df['Y'].min()) / 5
# Chip spacing since 4x6 card assumed
spacing_short = min(spacingx_short, spacingy_short)
spacing_long = max(spacingx_long, spacingy_long)
# Smaller spacing measurement might have a chip missing
spacing = int(max(spacing_short, spacing_long))
spacing = (spacing, spacing)
return df, start_coord, spacing
|
2,557 | def precision_recall_fscore_support(
y_true,
y_pred,
*,
beta=1.0,
labels=None,
pos_label=1,
average=None,
warn_for=("precision", "recall", "f-score"),
sample_weight=None,
zero_division="warn",
):
"""Compute precision, recall, F-measure and support for each class.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, default=1.0
The strength of recall versus precision in the F-score.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'binary', 'micro', 'macro', 'samples','weighted'}, \
default=None
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division:
- recall: when there are no positive labels
- precision: when there are no positive predictions
- f-score: both
If set to "warn", this acts as 0, but warnings are also raised.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Estimated precision of data.
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Estimated recall of data.
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Estimated fbeta_score of data.
support : None (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
Notes
-----
When ``true positive + false positive == 0``, precision is undefined.
When ``true positive + false negative == 0``, recall is undefined.
In such cases, by default the metric will be set to 0, as will f-score,
and ``UndefinedMetricWarning`` will be raised. This behavior can be
modified with ``zero_division``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_.
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`_.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
(array([0. , 0. , 0.66...]),
array([0., 0., 1.]), array([0. , 0. , 0.8]),
array([2, 2, 2]))
"""
_check_zero_division(zero_division)
if beta < 0:
raise ValueError("beta should be >=0 in the F-beta score")
labels = _check_set_wise_labels(y_true, y_pred, average, labels, pos_label)
# Calculate tp_sum, pred_sum, true_sum ###
samplewise = average == "samples"
MCM = multilabel_confusion_matrix(
y_true,
y_pred,
sample_weight=sample_weight,
labels=labels,
samplewise=samplewise,
)
tp_sum = MCM[:, 1, 1]
pred_sum = tp_sum + MCM[:, 0, 1]
true_sum = tp_sum + MCM[:, 1, 0]
if average == "micro":
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
# Divide, and on zero-division, set scores and/or warn according to
# zero_division:
precision = _prf_divide(
tp_sum, pred_sum, "precision", "predicted", average, warn_for, zero_division
)
recall = _prf_divide(
tp_sum, true_sum, "recall", "true", average, warn_for, zero_division
)
# warn for f-score only if zero_division is warn, it is in warn_for
# and BOTH prec and rec are ill-defined
if zero_division == "warn" and ("f-score",) == warn_for:
if (pred_sum[true_sum == 0] == 0).any():
_warn_prf(average, "true nor predicted", "F-score is", len(true_sum))
# if tp == 0 F will be 1 only if all predictions are zero, all labels are
# zero, and zero_division=1. In all other case, 0
if np.isposinf(beta):
f_score = recall
else:
denom = beta2 * precision + recall
denom[denom == 0.0] = 1 # avoid division by 0
f_score = (1 + beta2) * precision * recall / denom
# Average the results
if average == "weighted":
weights = true_sum
if weights.sum() == 0:
zero_division_value = np.float64(1.0)
if zero_division in ["warn", 0]:
zero_division_value = np.float64(0.0)
# precision is zero_division if there are no positive predictions
# recall is zero_division if there are no positive labels
# fscore is zero_division if all labels AND predictions are
# negative
if pred_sum.sum() == 0:
return (
zero_division_value,
zero_division_value,
zero_division_value,
None,
)
else:
return (np.float64(0.0), zero_division_value, np.float64(0.0), None)
elif average == "samples":
weights = sample_weight
else:
weights = None
if average is not None:
assert average != "binary" or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
| def precision_recall_fscore_support(
y_true,
y_pred,
*,
beta=1.0,
labels=None,
pos_label=1,
average=None,
warn_for=("precision", "recall", "f-score"),
sample_weight=None,
zero_division="warn",
):
"""Compute precision, recall, F-measure and support for each class.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, default=1.0
The strength of recall versus precision in the F-score.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'binary', 'micro', 'macro', 'samples','weighted'}, \
default=None
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division:
- recall: when there are no positive labels
- precision: when there are no positive predictions
- f-score: both
If set to "warn", this acts as 0, but warnings are also raised.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Estimated precision of data.
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Estimated recall of data.
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score.
support : None (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
Notes
-----
When ``true positive + false positive == 0``, precision is undefined.
When ``true positive + false negative == 0``, recall is undefined.
In such cases, by default the metric will be set to 0, as will f-score,
and ``UndefinedMetricWarning`` will be raised. This behavior can be
modified with ``zero_division``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_.
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`_.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
(array([0. , 0. , 0.66...]),
array([0., 0., 1.]), array([0. , 0. , 0.8]),
array([2, 2, 2]))
"""
_check_zero_division(zero_division)
if beta < 0:
raise ValueError("beta should be >=0 in the F-beta score")
labels = _check_set_wise_labels(y_true, y_pred, average, labels, pos_label)
# Calculate tp_sum, pred_sum, true_sum ###
samplewise = average == "samples"
MCM = multilabel_confusion_matrix(
y_true,
y_pred,
sample_weight=sample_weight,
labels=labels,
samplewise=samplewise,
)
tp_sum = MCM[:, 1, 1]
pred_sum = tp_sum + MCM[:, 0, 1]
true_sum = tp_sum + MCM[:, 1, 0]
if average == "micro":
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
# Divide, and on zero-division, set scores and/or warn according to
# zero_division:
precision = _prf_divide(
tp_sum, pred_sum, "precision", "predicted", average, warn_for, zero_division
)
recall = _prf_divide(
tp_sum, true_sum, "recall", "true", average, warn_for, zero_division
)
# warn for f-score only if zero_division is warn, it is in warn_for
# and BOTH prec and rec are ill-defined
if zero_division == "warn" and ("f-score",) == warn_for:
if (pred_sum[true_sum == 0] == 0).any():
_warn_prf(average, "true nor predicted", "F-score is", len(true_sum))
# if tp == 0 F will be 1 only if all predictions are zero, all labels are
# zero, and zero_division=1. In all other case, 0
if np.isposinf(beta):
f_score = recall
else:
denom = beta2 * precision + recall
denom[denom == 0.0] = 1 # avoid division by 0
f_score = (1 + beta2) * precision * recall / denom
# Average the results
if average == "weighted":
weights = true_sum
if weights.sum() == 0:
zero_division_value = np.float64(1.0)
if zero_division in ["warn", 0]:
zero_division_value = np.float64(0.0)
# precision is zero_division if there are no positive predictions
# recall is zero_division if there are no positive labels
# fscore is zero_division if all labels AND predictions are
# negative
if pred_sum.sum() == 0:
return (
zero_division_value,
zero_division_value,
zero_division_value,
None,
)
else:
return (np.float64(0.0), zero_division_value, np.float64(0.0), None)
elif average == "samples":
weights = sample_weight
else:
weights = None
if average is not None:
assert average != "binary" or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
|
26,616 | def _trigger_dag(
dag_id: str,
dag_bag: DagBag,
dag_run: DagModel,
run_id: Optional[str],
conf: Optional[Union[dict, str]],
execution_date: Optional[datetime],
replace_microseconds: bool,
) -> List[DagRun]: # pylint: disable=too-many-arguments
"""Triggers DAG run.
:param dag_id: DAG ID
:param dag_bag: DAG Bag model
:param dag_run: DAG Run model
:param run_id: ID of the dag_run
:param conf: configuration
:param execution_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:return: list of triggered dags
"""
if dag_id not in dag_bag.dags:
raise DagNotFound("Dag id {} not found".format(dag_id))
dag = dag_bag.get_dag(dag_id)
execution_date = execution_date if execution_date else timezone.utcnow()
if not timezone.is_localized(execution_date):
raise ValueError("The execution_date shoudl be localized")
if replace_microseconds:
execution_date = execution_date.replace(microsecond=0)
if not run_id:
run_id = "manual__{0}".format(execution_date.isoformat())
dag_run_id = dag_run.find(dag_id=dag_id, run_id=run_id)
if dag_run_id:
raise DagRunAlreadyExists("Run id {} already exists for dag id {}".format(
run_id,
dag_id
))
run_conf = None
if conf:
if isinstance(conf, dict):
run_conf = conf
else:
run_conf = json.loads(conf)
triggers = []
dags_to_trigger = []
dags_to_trigger.append(dag)
while dags_to_trigger:
dag = dags_to_trigger.pop()
trigger = dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True,
)
triggers.append(trigger)
if dag.subdags:
dags_to_trigger.extend(dag.subdags)
return triggers
| def _trigger_dag(
dag_id: str,
dag_bag: DagBag,
dag_run: DagModel,
run_id: Optional[str],
conf: Optional[Union[dict, str]],
execution_date: Optional[datetime],
replace_microseconds: bool,
) -> List[DagRun]: # pylint: disable=too-many-arguments
"""Triggers DAG run.
:param dag_id: DAG ID
:param dag_bag: DAG Bag model
:param dag_run: DAG Run model
:param run_id: ID of the dag_run
:param conf: configuration
:param execution_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:return: list of triggered dags
"""
if dag_id not in dag_bag.dags:
raise DagNotFound("Dag id {} not found".format(dag_id))
dag = dag_bag.get_dag(dag_id)
execution_date = execution_date if execution_date else timezone.utcnow()
if not timezone.is_localized(execution_date):
raise ValueError("The execution_date should be localized")
if replace_microseconds:
execution_date = execution_date.replace(microsecond=0)
if not run_id:
run_id = "manual__{0}".format(execution_date.isoformat())
dag_run_id = dag_run.find(dag_id=dag_id, run_id=run_id)
if dag_run_id:
raise DagRunAlreadyExists("Run id {} already exists for dag id {}".format(
run_id,
dag_id
))
run_conf = None
if conf:
if isinstance(conf, dict):
run_conf = conf
else:
run_conf = json.loads(conf)
triggers = []
dags_to_trigger = []
dags_to_trigger.append(dag)
while dags_to_trigger:
dag = dags_to_trigger.pop()
trigger = dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True,
)
triggers.append(trigger)
if dag.subdags:
dags_to_trigger.extend(dag.subdags)
return triggers
|
21,871 | def main() -> None:
"""
Entrypoint for the forking launcher.
"""
# First argument is the path to the database config
db_config_path = sys.argv[1]
# Split up the subsequent arguments into each workers' arguments;
# `--` is our delimiter of choice.
args = sys.argv[2:]
args_by_worker: List[List[str]] = [
list(args)
for cond, args in itertools.groupby(args, lambda ele: ele != "--")
if cond and args
]
# Prevent Twisted from installing a shared reactor that all the workers will
# inherit when we fork(), by installing our own beforehand.
proxy_reactor = ProxiedReactor()
installReactor(proxy_reactor)
# Import the entrypoints for all the workers.
worker_functions = []
for worker_args in args_by_worker:
worker_module = importlib.import_module(worker_args[0])
worker_functions.append(worker_module.main)
# At this point, we've imported all the main entrypoints for all the workers.
# Now we basically just fork() out to create the workers we need.
# Because we're using fork(), all the workers get a clone of this launcher's
# memory space and don't need to repeat the work of loading the code!
# Instead of using fork() directly, we use the multiprocessing library,#
# which uses fork() on Unix platforms.
# We need to prepare the database first as otherwise all the workers will
# try to create a schema version table and some will crash out.
from synapse._scripts import update_synapse_database
update_proc = multiprocessing.Process(
target=_worker_entrypoint,
args=(
update_synapse_database.main,
proxy_reactor,
[
"update_synapse_database",
"--database-config",
db_config_path,
"--run-background-updates",
],
),
)
print("===== PREPARING DATABASE =====", file=sys.stderr)
update_proc.start()
update_proc.join()
print("===== PREPARED DATABASE =====", file=sys.stderr)
processes = []
for (func, worker_args) in zip(worker_functions, args_by_worker):
process = multiprocessing.Process(
target=_worker_entrypoint, args=(func, proxy_reactor, worker_args)
)
process.start()
processes.append(process)
# Be a good parent and wait for our children to die before exiting.
for process in processes:
process.join()
| def main() -> None:
"""
Entrypoint for the forking launcher.
"""
# First argument is the path to the database config
db_config_path = sys.argv[1]
# Split up the subsequent arguments into each workers' arguments;
# `--` is our delimiter of choice.
args = sys.argv[2:]
args_by_worker: List[List[str]] = [
list(args)
for cond, args in itertools.groupby(args, lambda ele: ele != "--")
if cond and args
]
# Prevent Twisted from installing a shared reactor that all the workers will
# inherit when we fork(), by installing our own beforehand.
proxy_reactor = ProxiedReactor()
installReactor(proxy_reactor)
# Import the entrypoints for all the workers.
worker_functions = []
for worker_args in args_by_worker:
worker_module = importlib.import_module(worker_args[0])
worker_functions.append(worker_module.main)
# At this point, we've imported all the main entrypoints for all the workers.
# Now we basically just fork() out to create the workers we need.
# Because we're using fork(), all the workers get a clone of this launcher's
# memory space and don't need to repeat the work of loading the code!
# Instead of using fork() directly, we use the multiprocessing library,
# which uses fork() on Unix platforms.
# We need to prepare the database first as otherwise all the workers will
# try to create a schema version table and some will crash out.
from synapse._scripts import update_synapse_database
update_proc = multiprocessing.Process(
target=_worker_entrypoint,
args=(
update_synapse_database.main,
proxy_reactor,
[
"update_synapse_database",
"--database-config",
db_config_path,
"--run-background-updates",
],
),
)
print("===== PREPARING DATABASE =====", file=sys.stderr)
update_proc.start()
update_proc.join()
print("===== PREPARED DATABASE =====", file=sys.stderr)
processes = []
for (func, worker_args) in zip(worker_functions, args_by_worker):
process = multiprocessing.Process(
target=_worker_entrypoint, args=(func, proxy_reactor, worker_args)
)
process.start()
processes.append(process)
# Be a good parent and wait for our children to die before exiting.
for process in processes:
process.join()
|
52,199 | def create_landing_page(page_title, page_slug, parent_path=None, \
has_email_signup=False, email_gd_code="USCFPB_000"):
# create a new page and set it as the child of an existing page
# return list of route paths
# get the root of the current site
site_model = apps.get_model('wagtailcore', 'Site')
site = site_model.objects.get(is_default_site=True)
root = site.root_page
# since parent was not provided, make root
parent = root
# if a parent path is provided, use that as parent
if parent_path:
path_components = \
[component for component in parent_path.split('/') if component]
try:
route = root.route(None, path_components)
except Http404:
print("skipping page creation")
parent = route.page
# create page, add it as a child of parent, save, and publish
new_page = LandingPage(title=page_title, slug=page_slug)
# update sidefoot streamfield if required
if has_email_signup:
new_page.sidefoot=json.dumps([
{'type':'email_signup', 'value':{'gd_code': email_gd_code}}
])
try:
parent.add_child(instance=new_page)
new_page.save_revision().publish()
except ValidationError:
print("skipping page creation")
# return path
return new_page.get_url(None, site)
| def create_landing_page(page_title, page_slug, parent_path=None, \
has_email_signup=False, email_gd_code="USCFPB_000"):
# create a new page and set it as the child of an existing page
# return list of route paths
# get the root of the current site
site_model = apps.get_model('wagtailcore', 'Site')
site = site_model.objects.get(is_default_site=True)
root = site.root_page
# since parent was not provided, make root
parent = root
# if a parent path is provided, use that as parent
if parent_path:
path_components = [
component for component in parent_path.split('/') if component
]
try:
route = root.route(None, path_components)
except Http404:
print("skipping page creation")
parent = route.page
# create page, add it as a child of parent, save, and publish
new_page = LandingPage(title=page_title, slug=page_slug)
# update sidefoot streamfield if required
if has_email_signup:
new_page.sidefoot=json.dumps([
{'type':'email_signup', 'value':{'gd_code': email_gd_code}}
])
try:
parent.add_child(instance=new_page)
new_page.save_revision().publish()
except ValidationError:
print("skipping page creation")
# return path
return new_page.get_url(None, site)
|
39,053 | def ipvXmessage(version=None):
if version == 6:
ip_repr = "%s://[%s]:%d"
elif version == 4:
ip_repr = "%s://%s:%d"
message = f"Uvicorn running on {ip_repr} (Press CTRL+C to quit)"
color_message = (
"Uvicorn running on "
+ click.style(ip_repr, bold=True)
+ " (Press CTRL+C to quit)"
)
return message, color_message
| def _get_server_start_message(host_ip_version: int = 4) -> Tuple[str, str]:
if version == 6:
ip_repr = "%s://[%s]:%d"
elif version == 4:
ip_repr = "%s://%s:%d"
message = f"Uvicorn running on {ip_repr} (Press CTRL+C to quit)"
color_message = (
"Uvicorn running on "
+ click.style(ip_repr, bold=True)
+ " (Press CTRL+C to quit)"
)
return message, color_message
|
339 | def sample_smc(
draws=2000,
kernel=IMH,
*,
start=None,
model=None,
random_seed=-1,
chains=None,
cores=None,
compute_convergence_checks=True,
return_inferencedata=True,
idata_kwargs=None,
progressbar=True,
**kernel_kwargs,
):
r"""
Sequential Monte Carlo based sampling.
Parameters
----------
draws: int
The number of samples to draw from the posterior (i.e. last stage). And also the number of
independent chains. Defaults to 2000.
kernel: SMC Kernel used. Defaults to pm.smc.IMH (Independent Metropolis Hastings)
start: dict, or array of dict
Starting point in parameter space. It should be a list of dict with length `chains`.
When None (default) the starting point is sampled from the prior distribution.
model: Model (optional if in ``with`` context)).
random_seed: int
random seed
chains : int
The number of chains to sample. Running independent chains is important for some
convergence statistics. If ``None`` (default), then set to either ``cores`` or 2, whichever
is larger.
cores : int
The number of chains to run in parallel. If ``None``, set to the number of CPUs in the
system.
compute_convergence_checks : bool
Whether to compute sampler statistics like Gelman-Rubin and ``effective_n``.
Defaults to ``True``.
return_inferencedata : bool, default=True
Whether to return the trace as an :class:`arviz:arviz.InferenceData` (True) object or a `MultiTrace` (False)
Defaults to ``True``.
idata_kwargs : dict, optional
Keyword arguments for :func:`pymc3.to_inference_data`
progressbar : bool, optional default=True
Whether or not to display a progress bar in the command line.
**kernel_kwargs: keyword arguments passed to the SMC kernel.
The default IMH kernel takes the following keywords:
threshold: float
Determines the change of beta from stage to stage, i.e.indirectly the number of stages,
the higher the value of `threshold` the higher the number of stages. Defaults to 0.5.
It should be between 0 and 1.
n_steps: int
The number of steps of each Markov Chain. If ``tune_steps == True`` ``n_steps`` will be used
for the first stage and for the others it will be determined automatically based on the
acceptance rate and `p_acc_rate`, the max number of steps is ``n_steps``.
tune_steps: bool
Whether to compute the number of steps automatically or not. Defaults to True
p_acc_rate: float
Used to compute ``n_steps`` when ``tune_steps == True``. The higher the value of
``p_acc_rate`` the higher the number of steps computed automatically. Defaults to 0.85.
It should be between 0 and 1.
For other kernels one should check their specific docstrings for details on specific keword arguments.
Notes
-----
SMC works by moving through successive stages. At each stage the inverse temperature
:math:`\beta` is increased a little bit (starting from 0 up to 1). When :math:`\beta` = 0
we have the prior distribution and when :math:`\beta` =1 we have the posterior distribution.
So in more general terms we are always computing samples from a tempered posterior that we can
write as:
.. math::
p(\theta \mid y)_{\beta} = p(y \mid \theta)^{\beta} p(\theta)
A summary of the algorithm is:
1. Initialize :math:`\beta` at zero and stage at zero.
2. Generate N samples :math:`S_{\beta}` from the prior (because when :math `\beta = 0` the
tempered posterior is the prior).
3. Increase :math:`\beta` in order to make the effective sample size equals some predefined
value (we use :math:`Nt`, where :math:`t` is 0.5 by default).
4. Compute a set of N importance weights W. The weights are computed as the ratio of the
likelihoods of a sample at stage i+1 and stage i.
5. Obtain :math:`S_{w}` by re-sampling according to W.
6. Use W to compute the mean and covariance for the proposal distribution, a MVNormal.
7. For stages other than 0 use the acceptance rate from the previous stage to estimate
`n_steps`.
8. Run N independent Metropolis-Hastings (IMH) chains (each one of length `n_steps`),
starting each one from a different sample in :math:`S_{w}`. Samples are IMH as the proposal
mean is the of the previous posterior stage and not the current point in parameter space.
9. Repeat from step 3 until :math:`\beta \ge 1`.
10. The final result is a collection of N samples from the posterior.
References
----------
.. [Minson2013] Minson, S. E. and Simons, M. and Beck, J. L., (2013),
Bayesian inversion for finite fault earthquake source models I- Theory and algorithm.
Geophysical Journal International, 2013, 194(3), pp.1701-1726,
`link <https://gji.oxfordjournals.org/content/194/3/1701.full>`__
.. [Ching2007] Ching, J. and Chen, Y. (2007).
Transitional Markov Chain Monte Carlo Method for Bayesian Model Updating, Model Class
Selection, and Model Averaging. J. Eng. Mech., 10.1061/(ASCE)0733-9399(2007)133:7(816),
816-832. `link <http://ascelibrary.org/doi/abs/10.1061/%28ASCE%290733-9399
%282007%29133:7%28816%29>`__
"""
if isinstance(kernel, str) and kernel.lower() in ("abc", "metropolis"):
warnings.warn(
f'The kernel string argument "{kernel}" in sample_smc has been deprecated. '
f"It is no longer needed to distinguish between `abc` and `metropolis`",
DeprecationWarning,
stacklevel=2,
)
kernel = IMH
if kernel_kwargs.pop("save_sim_data", None) is not None:
warnings.warn(
"save_sim_data has been deprecated. Use pm.sample_posterior_predictive "
"to obtain the same type of samples.",
DeprecationWarning,
stacklevel=2,
)
if kernel_kwargs.pop("save_log_pseudolikelihood", None) is not None:
warnings.warn(
"save_log_pseudolikelihood has been deprecated. This information is "
"now saved as log_likelihood in models with Simulator distributions.",
DeprecationWarning,
stacklevel=2,
)
parallel = kernel_kwargs.pop("parallel", None)
if parallel is not None:
warnings.warn(
"The argument parallel is deprecated, use the argument cores instead.",
DeprecationWarning,
stacklevel=2,
)
if parallel is False:
cores = 1
_log = logging.getLogger("pymc3")
_log.info("Initializing SMC sampler...")
model = modelcontext(model)
if model.name:
raise NotImplementedError(
"The SMC implementation currently does not support named models. "
"See https://github.com/pymc-devs/pymc3/pull/4365."
)
if cores is None:
cores = _cpu_count()
if chains is None:
chains = max(2, cores)
else:
cores = min(chains, cores)
_log.info(
f"Sampling {chains} chain{'s' if chains > 1 else ''} "
f"in {cores} job{'s' if cores > 1 else ''}"
)
if random_seed == -1:
random_seed = None
if chains == 1 and isinstance(random_seed, int):
random_seed = [random_seed]
if random_seed is None or isinstance(random_seed, int):
if random_seed is not None:
np.random.seed(random_seed)
random_seed = [np.random.randint(2 ** 30) for _ in range(chains)]
if not isinstance(random_seed, Iterable):
raise TypeError("Invalid value for `random_seed`. Must be tuple, list or int")
params = (
draws,
kernel,
start,
model,
)
t1 = time.time()
if cores > 1:
pbar = progress_bar((), total=100, display=progressbar)
pbar.update(0)
pbars = [pbar] + [None] * (chains - 1)
pool = mp.Pool(cores)
# "manually" (de)serialize params before/after multiprocessing
params = tuple(cloudpickle.dumps(p) for p in params)
kernel_kwargs = {key: cloudpickle.dumps(value) for key, value in kernel_kwargs.items()}
results = _starmap_with_kwargs(
pool,
_sample_smc_int,
[(*params, random_seed[chain], chain, pbars[chain]) for chain in range(chains)],
repeat(kernel_kwargs),
)
results = tuple(cloudpickle.loads(r) for r in results)
pool.close()
pool.join()
else:
results = []
pbar = progress_bar((), total=100 * chains, display=progressbar)
pbar.update(0)
for chain in range(chains):
pbar.offset = 100 * chain
pbar.base_comment = f"Chain: {chain+1}/{chains}"
results.append(
_sample_smc_int(*params, random_seed[chain], chain, pbar, **kernel_kwargs)
)
(
traces,
sample_stats,
sample_settings,
) = zip(*results)
trace = MultiTrace(traces)
idata = None
# Save sample_stats
_t_sampling = time.time() - t1
sample_settings_dict = sample_settings[0]
sample_settings_dict["_t_sampling"] = _t_sampling
sample_stats_dict = sample_stats[0]
if chains > 1:
# Collect the stat values from each chain in a single list
for stat in sample_stats[0].keys():
value_list = []
for chain_sample_stats in sample_stats:
value_list.append(chain_sample_stats[stat])
sample_stats_dict[stat] = value_list
if not return_inferencedata:
for stat, value in sample_stats_dict.items():
setattr(trace.report, stat, value)
for stat, value in sample_settings_dict.items():
setattr(trace.report, stat, value)
else:
for stat, value in sample_stats_dict.items():
if chains > 1:
# Different chains might have more iteration steps, leading to a
# non-square `sample_stats` dataset, we cast as `object` to avoid
# numpy ragged array deprecation warning
sample_stats_dict[stat] = np.array(value, dtype=object)
else:
sample_stats_dict[stat] = np.array(value)
sample_stats = dict_to_dataset(
sample_stats_dict,
attrs=sample_settings_dict,
library=pymc3,
)
ikwargs = dict(model=model)
if idata_kwargs is not None:
ikwargs.update(idata_kwargs)
idata = to_inference_data(trace, **ikwargs)
idata = InferenceData(**idata, sample_stats=sample_stats)
if compute_convergence_checks:
if draws < 100:
warnings.warn(
"The number of samples is too small to check convergence reliably.",
stacklevel=2,
)
else:
if idata is None:
idata = to_inference_data(trace, log_likelihood=False)
trace.report._run_convergence_checks(idata, model)
trace.report._log_summary()
return idata if return_inferencedata else trace
| def sample_smc(
draws=2000,
kernel=IMH,
*,
start=None,
model=None,
random_seed=-1,
chains=None,
cores=None,
compute_convergence_checks=True,
return_inferencedata=True,
idata_kwargs=None,
progressbar=True,
**kernel_kwargs,
):
r"""
Sequential Monte Carlo based sampling.
Parameters
----------
draws: int
The number of samples to draw from the posterior (i.e. last stage). And also the number of
independent chains. Defaults to 2000.
kernel: SMC Kernel used. Defaults to pm.smc.IMH (Independent Metropolis Hastings)
start: dict, or array of dict
Starting point in parameter space. It should be a list of dict with length `chains`.
When None (default) the starting point is sampled from the prior distribution.
model: Model (optional if in ``with`` context)).
random_seed: int
random seed
chains : int
The number of chains to sample. Running independent chains is important for some
convergence statistics. If ``None`` (default), then set to either ``cores`` or 2, whichever
is larger.
cores : int
The number of chains to run in parallel. If ``None``, set to the number of CPUs in the
system.
compute_convergence_checks : bool
Whether to compute sampler statistics like Gelman-Rubin and ``effective_n``.
Defaults to ``True``.
return_inferencedata : bool, default=True
Whether to return the trace as an :class:`arviz:arviz.InferenceData` (True) object or a `MultiTrace` (False)
Defaults to ``True``.
idata_kwargs : dict, optional
Keyword arguments for :func:`pymc3.to_inference_data`
progressbar : bool, optional default=True
Whether or not to display a progress bar in the command line.
**kernel_kwargs: keyword arguments passed to the SMC kernel.
The default IMH kernel takes the following keywords:
threshold: float
Determines the change of beta from stage to stage, i.e. indirectly the number of stages,
the higher the value of `threshold` the higher the number of stages. Defaults to 0.5.
It should be between 0 and 1.
n_steps: int
The number of steps of each Markov Chain. If ``tune_steps == True`` ``n_steps`` will be used
for the first stage and for the others it will be determined automatically based on the
acceptance rate and `p_acc_rate`, the max number of steps is ``n_steps``.
tune_steps: bool
Whether to compute the number of steps automatically or not. Defaults to True
p_acc_rate: float
Used to compute ``n_steps`` when ``tune_steps == True``. The higher the value of
``p_acc_rate`` the higher the number of steps computed automatically. Defaults to 0.85.
It should be between 0 and 1.
For other kernels one should check their specific docstrings for details on specific keword arguments.
Notes
-----
SMC works by moving through successive stages. At each stage the inverse temperature
:math:`\beta` is increased a little bit (starting from 0 up to 1). When :math:`\beta` = 0
we have the prior distribution and when :math:`\beta` =1 we have the posterior distribution.
So in more general terms we are always computing samples from a tempered posterior that we can
write as:
.. math::
p(\theta \mid y)_{\beta} = p(y \mid \theta)^{\beta} p(\theta)
A summary of the algorithm is:
1. Initialize :math:`\beta` at zero and stage at zero.
2. Generate N samples :math:`S_{\beta}` from the prior (because when :math `\beta = 0` the
tempered posterior is the prior).
3. Increase :math:`\beta` in order to make the effective sample size equals some predefined
value (we use :math:`Nt`, where :math:`t` is 0.5 by default).
4. Compute a set of N importance weights W. The weights are computed as the ratio of the
likelihoods of a sample at stage i+1 and stage i.
5. Obtain :math:`S_{w}` by re-sampling according to W.
6. Use W to compute the mean and covariance for the proposal distribution, a MVNormal.
7. For stages other than 0 use the acceptance rate from the previous stage to estimate
`n_steps`.
8. Run N independent Metropolis-Hastings (IMH) chains (each one of length `n_steps`),
starting each one from a different sample in :math:`S_{w}`. Samples are IMH as the proposal
mean is the of the previous posterior stage and not the current point in parameter space.
9. Repeat from step 3 until :math:`\beta \ge 1`.
10. The final result is a collection of N samples from the posterior.
References
----------
.. [Minson2013] Minson, S. E. and Simons, M. and Beck, J. L., (2013),
Bayesian inversion for finite fault earthquake source models I- Theory and algorithm.
Geophysical Journal International, 2013, 194(3), pp.1701-1726,
`link <https://gji.oxfordjournals.org/content/194/3/1701.full>`__
.. [Ching2007] Ching, J. and Chen, Y. (2007).
Transitional Markov Chain Monte Carlo Method for Bayesian Model Updating, Model Class
Selection, and Model Averaging. J. Eng. Mech., 10.1061/(ASCE)0733-9399(2007)133:7(816),
816-832. `link <http://ascelibrary.org/doi/abs/10.1061/%28ASCE%290733-9399
%282007%29133:7%28816%29>`__
"""
if isinstance(kernel, str) and kernel.lower() in ("abc", "metropolis"):
warnings.warn(
f'The kernel string argument "{kernel}" in sample_smc has been deprecated. '
f"It is no longer needed to distinguish between `abc` and `metropolis`",
DeprecationWarning,
stacklevel=2,
)
kernel = IMH
if kernel_kwargs.pop("save_sim_data", None) is not None:
warnings.warn(
"save_sim_data has been deprecated. Use pm.sample_posterior_predictive "
"to obtain the same type of samples.",
DeprecationWarning,
stacklevel=2,
)
if kernel_kwargs.pop("save_log_pseudolikelihood", None) is not None:
warnings.warn(
"save_log_pseudolikelihood has been deprecated. This information is "
"now saved as log_likelihood in models with Simulator distributions.",
DeprecationWarning,
stacklevel=2,
)
parallel = kernel_kwargs.pop("parallel", None)
if parallel is not None:
warnings.warn(
"The argument parallel is deprecated, use the argument cores instead.",
DeprecationWarning,
stacklevel=2,
)
if parallel is False:
cores = 1
_log = logging.getLogger("pymc3")
_log.info("Initializing SMC sampler...")
model = modelcontext(model)
if model.name:
raise NotImplementedError(
"The SMC implementation currently does not support named models. "
"See https://github.com/pymc-devs/pymc3/pull/4365."
)
if cores is None:
cores = _cpu_count()
if chains is None:
chains = max(2, cores)
else:
cores = min(chains, cores)
_log.info(
f"Sampling {chains} chain{'s' if chains > 1 else ''} "
f"in {cores} job{'s' if cores > 1 else ''}"
)
if random_seed == -1:
random_seed = None
if chains == 1 and isinstance(random_seed, int):
random_seed = [random_seed]
if random_seed is None or isinstance(random_seed, int):
if random_seed is not None:
np.random.seed(random_seed)
random_seed = [np.random.randint(2 ** 30) for _ in range(chains)]
if not isinstance(random_seed, Iterable):
raise TypeError("Invalid value for `random_seed`. Must be tuple, list or int")
params = (
draws,
kernel,
start,
model,
)
t1 = time.time()
if cores > 1:
pbar = progress_bar((), total=100, display=progressbar)
pbar.update(0)
pbars = [pbar] + [None] * (chains - 1)
pool = mp.Pool(cores)
# "manually" (de)serialize params before/after multiprocessing
params = tuple(cloudpickle.dumps(p) for p in params)
kernel_kwargs = {key: cloudpickle.dumps(value) for key, value in kernel_kwargs.items()}
results = _starmap_with_kwargs(
pool,
_sample_smc_int,
[(*params, random_seed[chain], chain, pbars[chain]) for chain in range(chains)],
repeat(kernel_kwargs),
)
results = tuple(cloudpickle.loads(r) for r in results)
pool.close()
pool.join()
else:
results = []
pbar = progress_bar((), total=100 * chains, display=progressbar)
pbar.update(0)
for chain in range(chains):
pbar.offset = 100 * chain
pbar.base_comment = f"Chain: {chain+1}/{chains}"
results.append(
_sample_smc_int(*params, random_seed[chain], chain, pbar, **kernel_kwargs)
)
(
traces,
sample_stats,
sample_settings,
) = zip(*results)
trace = MultiTrace(traces)
idata = None
# Save sample_stats
_t_sampling = time.time() - t1
sample_settings_dict = sample_settings[0]
sample_settings_dict["_t_sampling"] = _t_sampling
sample_stats_dict = sample_stats[0]
if chains > 1:
# Collect the stat values from each chain in a single list
for stat in sample_stats[0].keys():
value_list = []
for chain_sample_stats in sample_stats:
value_list.append(chain_sample_stats[stat])
sample_stats_dict[stat] = value_list
if not return_inferencedata:
for stat, value in sample_stats_dict.items():
setattr(trace.report, stat, value)
for stat, value in sample_settings_dict.items():
setattr(trace.report, stat, value)
else:
for stat, value in sample_stats_dict.items():
if chains > 1:
# Different chains might have more iteration steps, leading to a
# non-square `sample_stats` dataset, we cast as `object` to avoid
# numpy ragged array deprecation warning
sample_stats_dict[stat] = np.array(value, dtype=object)
else:
sample_stats_dict[stat] = np.array(value)
sample_stats = dict_to_dataset(
sample_stats_dict,
attrs=sample_settings_dict,
library=pymc3,
)
ikwargs = dict(model=model)
if idata_kwargs is not None:
ikwargs.update(idata_kwargs)
idata = to_inference_data(trace, **ikwargs)
idata = InferenceData(**idata, sample_stats=sample_stats)
if compute_convergence_checks:
if draws < 100:
warnings.warn(
"The number of samples is too small to check convergence reliably.",
stacklevel=2,
)
else:
if idata is None:
idata = to_inference_data(trace, log_likelihood=False)
trace.report._run_convergence_checks(idata, model)
trace.report._log_summary()
return idata if return_inferencedata else trace
|
3,889 | def greedy_modularity_communities(G, weight=None, resolution=1, n_communities=1):
r"""Find communities in G using greedy modularity maximization.
This function uses Clauset-Newman-Moore greedy modularity maximization [2]_.
This method currently supports the Graph class.
Greedy modularity maximization begins with each node in its own community
and joins the pair of communities that most increases modularity until no
such pair exists or until desired number of communities is reached.
This function maximizes the generalized modularity, where `resolution`
is the resolution parameter, often expressed as $\gamma$.
See :func:`~networkx.algorithms.community.quality.modularity`.
Parameters
----------
G : NetworkX graph
weight : string or None, optional (default=None)
The name of an edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
n_communities: int
desired number of communities, defaults to 1 and falls back to 1 if
the given number is larger than the initial amount of communities
Returns
-------
list
A list of sets of nodes, one for each community.
Sorted by length with largest communities first.
Examples
--------
>>> from networkx.algorithms.community import greedy_modularity_communities
>>> G = nx.karate_club_graph()
>>> c = list(greedy_modularity_communities(G))
>>> sorted(c[0])
[8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33]
See Also
--------
modularity
References
----------
.. [1] M. E. J Newman "Networks: An Introduction", page 224
Oxford University Press 2011.
.. [2] Clauset, A., Newman, M. E., & Moore, C.
"Finding community structure in very large networks."
Physical Review E 70(6), 2004.
.. [3] Reichardt and Bornholdt "Statistical Mechanics of Community
Detection" Phys. Rev. E74, 2006.
"""
# Count nodes and edges
N = len(G.nodes())
m = sum([d.get("weight", 1) for u, v, d in G.edges(data=True)])
q0 = 1.0 / (2.0 * m)
if n_communities > N:
warnings.warn(
"Input n_communities is greater than number of nodes of G."
"Falling back to default n_communities = 1."
)
n_communities = 1
# Map node labels to contiguous integers
label_for_node = {i: v for i, v in enumerate(G.nodes())}
node_for_label = {label_for_node[i]: i for i in range(N)}
# Calculate degrees
k_for_label = G.degree(G.nodes(), weight=weight)
k = [k_for_label[label_for_node[i]] for i in range(N)]
# Initialize community and merge lists
communities = {i: frozenset([i]) for i in range(N)}
merges = []
# Initial modularity
partition = [[label_for_node[x] for x in c] for c in communities.values()]
q_cnm = modularity(G, partition, resolution=resolution)
# Initialize data structures
# CNM Eq 8-9 (Eq 8 was missing a factor of 2 (from A_ij + A_ji)
# a[i]: fraction of edges within community i
# dq_dict[i][j]: dQ for merging community i, j
# dq_heap[i][n] : (-dq, i, j) for communitiy i nth largest dQ
# H[n]: (-dq, i, j) for community with nth largest max_j(dQ_ij)
a = [k[i] * q0 for i in range(N)]
dq_dict = {
i: {
j: 2 * q0 * G.get_edge_data(i, j).get(weight, 1.0)
- 2 * resolution * k[i] * k[j] * q0 * q0
for j in [node_for_label[u] for u in G.neighbors(label_for_node[i])]
if j != i
}
for i in range(N)
}
dq_heap = [
MappedQueue([(-dq, i, j) for j, dq in dq_dict[i].items()]) for i in range(N)
]
H = MappedQueue([dq_heap[i].h[0] for i in range(N) if len(dq_heap[i]) > 0])
# Merge communities until we can't improve modularity or until desired number of
# communities (n_communities) is reached.
while len(H) > n_communities:
# Find best merge
# Remove from heap of row maxes
# Ties will be broken by choosing the pair with lowest min community id
try:
dq, i, j = H.pop()
except IndexError:
break
dq = -dq
# Remove best merge from row i heap
dq_heap[i].pop()
# Push new row max onto H
if len(dq_heap[i]) > 0:
H.push(dq_heap[i].h[0])
# If this element was also at the root of row j, we need to remove the
# duplicate entry from H
if dq_heap[j].h[0] == (-dq, j, i):
H.remove((-dq, j, i))
# Remove best merge from row j heap
dq_heap[j].remove((-dq, j, i))
# Push new row max onto H
if len(dq_heap[j]) > 0:
H.push(dq_heap[j].h[0])
else:
# Duplicate wasn't in H, just remove from row j heap
dq_heap[j].remove((-dq, j, i))
# Stop when change is non-positive
if dq <= 0:
break
# Perform merge
communities[j] = frozenset(communities[i] | communities[j])
del communities[i]
merges.append((i, j, dq))
# New modularity
q_cnm += dq
# Get list of communities connected to merged communities
i_set = set(dq_dict[i].keys())
j_set = set(dq_dict[j].keys())
all_set = (i_set | j_set) - {i, j}
both_set = i_set & j_set
# Merge i into j and update dQ
for k in all_set:
# Calculate new dq value
if k in both_set:
dq_jk = dq_dict[j][k] + dq_dict[i][k]
elif k in j_set:
dq_jk = dq_dict[j][k] - 2.0 * resolution * a[i] * a[k]
else:
# k in i_set
dq_jk = dq_dict[i][k] - 2.0 * resolution * a[j] * a[k]
# Update rows j and k
for row, col in [(j, k), (k, j)]:
# Save old value for finding heap index
if k in j_set:
d_old = (-dq_dict[row][col], row, col)
else:
d_old = None
# Update dict for j,k only (i is removed below)
dq_dict[row][col] = dq_jk
# Save old max of per-row heap
if len(dq_heap[row]) > 0:
d_oldmax = dq_heap[row].h[0]
else:
d_oldmax = None
# Add/update heaps
d = (-dq_jk, row, col)
if d_old is None:
# We're creating a new nonzero element, add to heap
dq_heap[row].push(d)
else:
# Update existing element in per-row heap
dq_heap[row].update(d_old, d)
# Update heap of row maxes if necessary
if d_oldmax is None:
# No entries previously in this row, push new max
H.push(d)
else:
# We've updated an entry in this row, has the max changed?
if dq_heap[row].h[0] != d_oldmax:
H.update(d_oldmax, dq_heap[row].h[0])
# Remove row/col i from matrix
i_neighbors = dq_dict[i].keys()
for k in i_neighbors:
# Remove from dict
dq_old = dq_dict[k][i]
del dq_dict[k][i]
# Remove from heaps if we haven't already
if k != j:
# Remove both row and column
for row, col in [(k, i), (i, k)]:
# Check if replaced dq is row max
d_old = (-dq_old, row, col)
if dq_heap[row].h[0] == d_old:
# Update per-row heap and heap of row maxes
dq_heap[row].remove(d_old)
H.remove(d_old)
# Update row max
if len(dq_heap[row]) > 0:
H.push(dq_heap[row].h[0])
else:
# Only update per-row heap
dq_heap[row].remove(d_old)
del dq_dict[i]
# Mark row i as deleted, but keep placeholder
dq_heap[i] = MappedQueue()
# Merge i into j and update a
a[j] += a[i]
a[i] = 0
communities = [
frozenset([label_for_node[i] for i in c]) for c in communities.values()
]
return sorted(communities, key=len, reverse=True)
| def greedy_modularity_communities(G, weight=None, resolution=1, n_communities=1):
r"""Find communities in G using greedy modularity maximization.
This function uses Clauset-Newman-Moore greedy modularity maximization [2]_.
This method currently supports the Graph class.
Greedy modularity maximization begins with each node in its own community
and joins the pair of communities that most increases modularity until no
such pair exists or until `n_communities` number of communities is reached.
This function maximizes the generalized modularity, where `resolution`
is the resolution parameter, often expressed as $\gamma$.
See :func:`~networkx.algorithms.community.quality.modularity`.
Parameters
----------
G : NetworkX graph
weight : string or None, optional (default=None)
The name of an edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
n_communities: int
desired number of communities, defaults to 1 and falls back to 1 if
the given number is larger than the initial amount of communities
Returns
-------
list
A list of sets of nodes, one for each community.
Sorted by length with largest communities first.
Examples
--------
>>> from networkx.algorithms.community import greedy_modularity_communities
>>> G = nx.karate_club_graph()
>>> c = list(greedy_modularity_communities(G))
>>> sorted(c[0])
[8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33]
See Also
--------
modularity
References
----------
.. [1] M. E. J Newman "Networks: An Introduction", page 224
Oxford University Press 2011.
.. [2] Clauset, A., Newman, M. E., & Moore, C.
"Finding community structure in very large networks."
Physical Review E 70(6), 2004.
.. [3] Reichardt and Bornholdt "Statistical Mechanics of Community
Detection" Phys. Rev. E74, 2006.
"""
# Count nodes and edges
N = len(G.nodes())
m = sum([d.get("weight", 1) for u, v, d in G.edges(data=True)])
q0 = 1.0 / (2.0 * m)
if n_communities > N:
warnings.warn(
"Input n_communities is greater than number of nodes of G."
"Falling back to default n_communities = 1."
)
n_communities = 1
# Map node labels to contiguous integers
label_for_node = {i: v for i, v in enumerate(G.nodes())}
node_for_label = {label_for_node[i]: i for i in range(N)}
# Calculate degrees
k_for_label = G.degree(G.nodes(), weight=weight)
k = [k_for_label[label_for_node[i]] for i in range(N)]
# Initialize community and merge lists
communities = {i: frozenset([i]) for i in range(N)}
merges = []
# Initial modularity
partition = [[label_for_node[x] for x in c] for c in communities.values()]
q_cnm = modularity(G, partition, resolution=resolution)
# Initialize data structures
# CNM Eq 8-9 (Eq 8 was missing a factor of 2 (from A_ij + A_ji)
# a[i]: fraction of edges within community i
# dq_dict[i][j]: dQ for merging community i, j
# dq_heap[i][n] : (-dq, i, j) for communitiy i nth largest dQ
# H[n]: (-dq, i, j) for community with nth largest max_j(dQ_ij)
a = [k[i] * q0 for i in range(N)]
dq_dict = {
i: {
j: 2 * q0 * G.get_edge_data(i, j).get(weight, 1.0)
- 2 * resolution * k[i] * k[j] * q0 * q0
for j in [node_for_label[u] for u in G.neighbors(label_for_node[i])]
if j != i
}
for i in range(N)
}
dq_heap = [
MappedQueue([(-dq, i, j) for j, dq in dq_dict[i].items()]) for i in range(N)
]
H = MappedQueue([dq_heap[i].h[0] for i in range(N) if len(dq_heap[i]) > 0])
# Merge communities until we can't improve modularity or until desired number of
# communities (n_communities) is reached.
while len(H) > n_communities:
# Find best merge
# Remove from heap of row maxes
# Ties will be broken by choosing the pair with lowest min community id
try:
dq, i, j = H.pop()
except IndexError:
break
dq = -dq
# Remove best merge from row i heap
dq_heap[i].pop()
# Push new row max onto H
if len(dq_heap[i]) > 0:
H.push(dq_heap[i].h[0])
# If this element was also at the root of row j, we need to remove the
# duplicate entry from H
if dq_heap[j].h[0] == (-dq, j, i):
H.remove((-dq, j, i))
# Remove best merge from row j heap
dq_heap[j].remove((-dq, j, i))
# Push new row max onto H
if len(dq_heap[j]) > 0:
H.push(dq_heap[j].h[0])
else:
# Duplicate wasn't in H, just remove from row j heap
dq_heap[j].remove((-dq, j, i))
# Stop when change is non-positive
if dq <= 0:
break
# Perform merge
communities[j] = frozenset(communities[i] | communities[j])
del communities[i]
merges.append((i, j, dq))
# New modularity
q_cnm += dq
# Get list of communities connected to merged communities
i_set = set(dq_dict[i].keys())
j_set = set(dq_dict[j].keys())
all_set = (i_set | j_set) - {i, j}
both_set = i_set & j_set
# Merge i into j and update dQ
for k in all_set:
# Calculate new dq value
if k in both_set:
dq_jk = dq_dict[j][k] + dq_dict[i][k]
elif k in j_set:
dq_jk = dq_dict[j][k] - 2.0 * resolution * a[i] * a[k]
else:
# k in i_set
dq_jk = dq_dict[i][k] - 2.0 * resolution * a[j] * a[k]
# Update rows j and k
for row, col in [(j, k), (k, j)]:
# Save old value for finding heap index
if k in j_set:
d_old = (-dq_dict[row][col], row, col)
else:
d_old = None
# Update dict for j,k only (i is removed below)
dq_dict[row][col] = dq_jk
# Save old max of per-row heap
if len(dq_heap[row]) > 0:
d_oldmax = dq_heap[row].h[0]
else:
d_oldmax = None
# Add/update heaps
d = (-dq_jk, row, col)
if d_old is None:
# We're creating a new nonzero element, add to heap
dq_heap[row].push(d)
else:
# Update existing element in per-row heap
dq_heap[row].update(d_old, d)
# Update heap of row maxes if necessary
if d_oldmax is None:
# No entries previously in this row, push new max
H.push(d)
else:
# We've updated an entry in this row, has the max changed?
if dq_heap[row].h[0] != d_oldmax:
H.update(d_oldmax, dq_heap[row].h[0])
# Remove row/col i from matrix
i_neighbors = dq_dict[i].keys()
for k in i_neighbors:
# Remove from dict
dq_old = dq_dict[k][i]
del dq_dict[k][i]
# Remove from heaps if we haven't already
if k != j:
# Remove both row and column
for row, col in [(k, i), (i, k)]:
# Check if replaced dq is row max
d_old = (-dq_old, row, col)
if dq_heap[row].h[0] == d_old:
# Update per-row heap and heap of row maxes
dq_heap[row].remove(d_old)
H.remove(d_old)
# Update row max
if len(dq_heap[row]) > 0:
H.push(dq_heap[row].h[0])
else:
# Only update per-row heap
dq_heap[row].remove(d_old)
del dq_dict[i]
# Mark row i as deleted, but keep placeholder
dq_heap[i] = MappedQueue()
# Merge i into j and update a
a[j] += a[i]
a[i] = 0
communities = [
frozenset([label_for_node[i] for i in c]) for c in communities.values()
]
return sorted(communities, key=len, reverse=True)
|
5,743 | def kulczynski1(u, v, w=None):
"""
Compute the Kulczynski 1 dissimilarity between two boolean 1-D arrays.
The Kulczynski 1 dissimilarity between two boolean 1-D arrays `u` and `v`,
is defined as
.. math::
\\frac{c_{TT}}
{c_{FT} + c_{TF}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
kulczynski1 : double
The Kulczynski 1 distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.kulczynski1([1, 0, 0], [0, 1, 0])
0.0
>>> distance.kulczynski1([1, 0, 0], [1, 1, 0])
1.0
>>> distance.kulczynski1([1, 0, 0], [3, 1, 0])
-3.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
(_, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
return ntt / (ntf + nft)
| def kulczynski1(u, v, w=None):
"""
Compute the Kulczynski 1 dissimilarity between two boolean 1-D arrays.
The Kulczynski 1 dissimilarity between two boolean 1-D arrays `u` and `v`,
is defined as
.. math::
\\frac{c_{TT}}
{c_{FT} + c_{TF}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
kulczynski1 : float
The Kulczynski 1 distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.kulczynski1([1, 0, 0], [0, 1, 0])
0.0
>>> distance.kulczynski1([1, 0, 0], [1, 1, 0])
1.0
>>> distance.kulczynski1([1, 0, 0], [3, 1, 0])
-3.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
(_, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
return ntt / (ntf + nft)
|
59,806 | def normalize_reply(text: str, version=1) -> str:
"""
Standardize the capitalization and punctuation spacing of the input text.
Version 1: Fix sentence start casing, and punctuation.
Version 2: Add trailing period, if missing.
"""
switch_list = [(' .', '.'), (' ,', ','), (' ?', '?'), (' !', '!'), (" ' ", "'")]
# add spaces so that words and punctuation can be seaprated
new_text = text.lower()
# normalize in case of human:
for new, old in switch_list:
new_text = new_text.replace(old, new).replace(' ', ' ')
# split on punctuation to find sentence boundaries
# capitalize stuff
tokens = new_text.split(' ')
for i in range(len(tokens)):
if i == 0:
tokens[i] = uppercase(tokens[i])
elif tokens[i] in ('i', "i'm", "i've", "i'll", "i'd"):
tokens[i] = uppercase(tokens[i])
elif tokens[i] in '?.!' and i < len(tokens) - 1:
tokens[i + 1] = uppercase(tokens[i + 1])
new_text = ' '.join(tokens)
new_text = ' ' + new_text + ' '
for tup in switch_list:
new_text = new_text.replace(tup[0], tup[1])
# get rid of surrounding whitespace
new_text = new_text.strip()
new_text = new_text.replace(' ', ' ')
if version > 1 and (new_text[-1] not in ['!', '.', '?', ')']):
new_text += '.'
return new_text
| def normalize_reply(text: str, version=1) -> str:
"""
Standardize the capitalization and punctuation spacing of the input text.
- Version 1: Fix sentence start casing, and punctuation.
- Version 2: Add trailing period, if missing.
"""
switch_list = [(' .', '.'), (' ,', ','), (' ?', '?'), (' !', '!'), (" ' ", "'")]
# add spaces so that words and punctuation can be seaprated
new_text = text.lower()
# normalize in case of human:
for new, old in switch_list:
new_text = new_text.replace(old, new).replace(' ', ' ')
# split on punctuation to find sentence boundaries
# capitalize stuff
tokens = new_text.split(' ')
for i in range(len(tokens)):
if i == 0:
tokens[i] = uppercase(tokens[i])
elif tokens[i] in ('i', "i'm", "i've", "i'll", "i'd"):
tokens[i] = uppercase(tokens[i])
elif tokens[i] in '?.!' and i < len(tokens) - 1:
tokens[i + 1] = uppercase(tokens[i + 1])
new_text = ' '.join(tokens)
new_text = ' ' + new_text + ' '
for tup in switch_list:
new_text = new_text.replace(tup[0], tup[1])
# get rid of surrounding whitespace
new_text = new_text.strip()
new_text = new_text.replace(' ', ' ')
if version > 1 and (new_text[-1] not in ['!', '.', '?', ')']):
new_text += '.'
return new_text
|
3,199 | def convert_dict_case(obj, converter):
"""
Recursively converts the keys of a dictionary using the provided converter
param.
"""
if not isinstance(obj, dict):
return obj
obj = obj.copy()
for key in list(six.iterkeys(obj)):
converted_key = converter(key)
obj[converted_key] = convert_dict_case(obj.pop(key), converter)
return obj
| def convert_dict_key_case(obj, converter):
"""
Recursively converts the keys of a dictionary using the provided converter
param.
"""
if not isinstance(obj, dict):
return obj
obj = obj.copy()
for key in list(six.iterkeys(obj)):
converted_key = converter(key)
obj[converted_key] = convert_dict_case(obj.pop(key), converter)
return obj
|
17,930 | def test_preprocessing():
"""Tests the result of parameters preprocessing."""
parameters = tax_benefit_system.parameters
assert set(parameters.cotsoc.cotisations_employeur.children.keys()) == set([
'prive_cadre',
'prive_non_cadre',
'public_non_titulaire',
'public_titulaire_etat',
'public_titulaire_hospitaliere',
'public_titulaire_militaire',
'public_titulaire_territoriale',
]), "Les barèmes de cotisations employeur de certaines catégories de salariés sont manquants"
assert set(parameters.cotsoc.cotisations_salarie.children.keys()) == set([
'prive_cadre',
'prive_non_cadre',
'public_non_titulaire',
'public_titulaire_etat',
'public_titulaire_hospitaliere',
# 'public_titulaire_militaire', FIXME Il y en a sûrement mais pas actuellement
'public_titulaire_territoriale',
]), "Les barèmes de cotisations salarié de certaines catégories instant_sde salariés sont manquants"
categorie_salaries = [
"prive_cadre",
]
for categorie_salarie in categorie_salaries:
test = parameters.cotsoc.cotisations_salarie.children["prive_cadre"].children.keys()
target = cotisations_salarie_by_categorie_salarie[categorie_salarie]
assert set(test) == set(target), "Les barèmes de cotisations salarié {} ne sont pas les bons".format(
categorie_salarie)
cotisations_salaries = set(sum(
(cotisations_salarie_by_categorie_salarie[categorie_salarie] for categorie_salarie in categorie_salaries),
[],
))
for cotisation_salarie in sorted(cotisations_salaries):
bareme = parameters.cotsoc.cotisations_salarie.children["prive_cadre"].children[cotisation_salarie]
final_null_date = cotisations_salarie_by_name[cotisation_salarie].get("final_null_date")
if final_null_date:
thresholds = [
dict(
(parameter_at_instant.instant_str, parameter_at_instant.value)
for parameter_at_instant in bracket.threshold.values_list
)
for bracket in bareme.brackets
]
final_thresholds_by_instant_str = OrderedDict(sorted(threshold.items(), reverse = True)[0] for threshold in thresholds)
assert all([final_threshold is None for final_threshold in final_thresholds_by_instant_str.values()]), "Barème salarié {} ne s'éteint pas (il devrait en {})".format(
cotisation_salarie,
final_null_date,
)
assert max(final_thresholds_by_instant_str.keys()) == final_null_date, "Barème salarié {} ne s'éteint pas en {}".format(
cotisation_salarie,
final_null_date,
)
start_non_null_date = cotisations_salarie_by_name[cotisation_salarie].get("start_non_null_date")
if start_non_null_date:
thresholds = [
dict(
(parameter_at_instant.instant_str, parameter_at_instant.value)
for parameter_at_instant in bracket.threshold.values_list
)
for bracket in bareme.brackets
]
start_thresholds_by_instant_str = OrderedDict(sorted(threshold.items(), reverse = False)[0] for threshold in thresholds)
assert all([start_threshold is not None for start_threshold in start_thresholds_by_instant_str.values()]), "Barème salarié {} ne s'éteint pas (il devrait en {})".format(
cotisation_salarie,
start_non_null_date,
)
assert min(start_thresholds_by_instant_str.keys()) == start_non_null_date, "Barème salarié {} ne s'éteint pas en {}".format(
cotisation_salarie,
start_non_null_date,
) | def test_preprocessing():
"""Tests the result of parameters preprocessing."""
parameters = tax_benefit_system.parameters
assert set(parameters.cotsoc.cotisations_employeur.children.keys()) == set([
'prive_cadre',
'prive_non_cadre',
'public_non_titulaire',
'public_titulaire_etat',
'public_titulaire_hospitaliere',
'public_titulaire_militaire',
'public_titulaire_territoriale',
]), "Les barèmes de cotisations employeur de certaines catégories de salariés sont manquants"
assert set(parameters.cotsoc.cotisations_salarie.children.keys()) == set([
'prive_cadre',
'prive_non_cadre',
'public_non_titulaire',
'public_titulaire_etat',
'public_titulaire_hospitaliere',
# 'public_titulaire_militaire', FIXME Il y en a sûrement mais pas actuellement
'public_titulaire_territoriale',
]), "Les barèmes de cotisations salarié de certaines catégories instant_sde salariés sont manquants"
categorie_salaries = [
"prive_cadre",
]
for categorie_salarie in categorie_salaries:
test = parameters.cotsoc.cotisations_salarie.children["prive_cadre"].children.keys()
target = cotisations_salarie_by_categorie_salarie[categorie_salarie]
assert set(test) == set(target), "Les barèmes de cotisations salarié {} ne sont pas les bons".format(
categorie_salarie)
cotisations_salaries = set(sum(
(cotisations_salarie_by_categorie_salarie[categorie_salarie] for categorie_salarie in categorie_salaries),
[],
))
for cotisation_salarie in sorted(cotisations_salaries):
bareme = parameters.cotsoc.cotisations_salarie.children["prive_cadre"].children[cotisation_salarie]
final_null_date = cotisations_salarie_by_name[cotisation_salarie].get("final_null_date")
if final_null_date:
thresholds = [
dict(
(parameter_at_instant.instant_str, parameter_at_instant.value)
for parameter_at_instant in bracket.threshold.values_list
)
for bracket in bareme.brackets
]
final_thresholds_by_instant_str = OrderedDict(sorted(threshold.items(), reverse = True)[0] for threshold in thresholds)
assert all([final_threshold is None for final_threshold in final_thresholds_by_instant_str.values()]), "Barème salarié {} ne s'éteint pas (il devrait en {})".format(
cotisation_salarie,
final_null_date,
)
assert max(final_thresholds_by_instant_str.keys()) == final_null_date, "Barème salarié {} ne s'éteint pas en {}".format(
cotisation_salarie,
final_null_date,
)
start_non_null_date = cotisations_salarie_by_name[cotisation_salarie].get("start_non_null_date")
if start_non_null_date:
thresholds = [
dict(
(parameter_at_instant.instant_str, parameter_at_instant.value)
for parameter_at_instant in bracket.threshold.values_list
)
for bracket in bareme.brackets
]
start_thresholds_by_instant_str = OrderedDict(sorted(threshold.items())[0] for threshold in thresholds)
assert all([start_threshold is not None for start_threshold in start_thresholds_by_instant_str.values()]), "Barème salarié {} ne s'éteint pas (il devrait en {})".format(
cotisation_salarie,
start_non_null_date,
)
assert min(start_thresholds_by_instant_str.keys()) == start_non_null_date, "Barème salarié {} ne s'éteint pas en {}".format(
cotisation_salarie,
start_non_null_date,
) |
38,670 | def main():
# Setup command line options
argparser = argparse.ArgumentParser()
output_options = argparser.add_argument_group(
'Options controlling ReFrame output'
)
locate_options = argparser.add_argument_group(
'Options for discovering checks'
)
select_options = argparser.add_argument_group(
'Options for selecting checks'
)
action_options = argparser.add_argument_group(
'Options controlling actions'
)
run_options = argparser.add_argument_group(
'Options controlling the execution of checks'
)
env_options = argparser.add_argument_group(
'Options controlling the ReFrame environment'
)
misc_options = argparser.add_argument_group('Miscellaneous options')
# Output directory options
output_options.add_argument(
'--prefix', action='store', metavar='DIR',
help='Set general directory prefix to DIR',
envvar='RFM_PREFIX', configvar='systems/prefix'
)
output_options.add_argument(
'-o', '--output', action='store', metavar='DIR',
help='Set output directory prefix to DIR',
envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir'
)
output_options.add_argument(
'-s', '--stage', action='store', metavar='DIR',
help='Set stage directory prefix to DIR',
envvar='RFM_STAGE_DIR', configvar='systems/stagedir'
)
output_options.add_argument(
'--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT',
help=('Append a timestamp to the output and stage directory prefixes '
'(default: "%%FT%%T")'),
envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs'
)
output_options.add_argument(
'--perflogdir', action='store', metavar='DIR',
help=('Set performance log data directory prefix '
'(relevant only to the filelog log handler)'),
envvar='RFM_PERFLOG_DIR',
configvar='logging/handlers_perflog/filelog_basedir'
)
output_options.add_argument(
'--keep-stage-files', action='store_true',
help='Keep stage directories even for successful checks',
envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files'
)
output_options.add_argument(
'--dont-restage', action='store_false', dest='clean_stagedir',
help='Reuse the test stage directory',
envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir'
)
output_options.add_argument(
'--save-log-files', action='store_true', default=False,
help='Save ReFrame log files to the output directory',
envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files'
)
output_options.add_argument(
'--report-file', action='store', metavar='FILE',
help="Store JSON run report in FILE",
envvar='RFM_REPORT_FILE',
configvar='general/report_file'
)
# Check discovery options
locate_options.add_argument(
'-c', '--checkpath', action='append', metavar='PATH',
help="Add PATH to the check search path list",
envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path'
)
locate_options.add_argument(
'-R', '--recursive', action='store_true',
help='Search for checks in the search path recursively',
envvar='RFM_CHECK_SEARCH_RECURSIVE',
configvar='general/check_search_recursive'
)
locate_options.add_argument(
'--ignore-check-conflicts', action='store_true',
help='Skip checks with conflicting names',
envvar='RFM_IGNORE_CHECK_CONFLICTS',
configvar='general/ignore_check_conflicts'
)
# Select options
select_options.add_argument(
'-t', '--tag', action='append', dest='tags', metavar='PATTERN',
default=[],
help='Select checks with at least one tag matching PATTERN'
)
select_options.add_argument(
'-n', '--name', action='append', dest='names', default=[],
metavar='PATTERN', help='Select checks whose name matches PATTERN'
)
select_options.add_argument(
'-x', '--exclude', action='append', dest='exclude_names',
metavar='PATTERN', default=[],
help='Exclude checks whose name matches PATTERN'
)
select_options.add_argument(
'-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN',
help=('Select checks with at least one '
'programming environment matching PATTERN')
)
select_options.add_argument(
'--gpu-only', action='store_true',
help='Select only GPU checks'
)
select_options.add_argument(
'--cpu-only', action='store_true',
help='Select only CPU checks'
)
# Action options
action_options.add_argument(
'-l', '--list', action='store_true',
help='List the selected checks'
)
action_options.add_argument(
'-L', '--list-detailed', action='store_true',
help='List the selected checks providing details for each test'
)
action_options.add_argument(
'-r', '--run', action='store_true',
help='Run the selected checks'
)
# Run options
run_options.add_argument(
'-J', '--job-option', action='append', metavar='OPT',
dest='job_options', default=[],
help='Pass option OPT to job scheduler'
)
run_options.add_argument(
'--force-local', action='store_true',
help='Force local execution of checks'
)
run_options.add_argument(
'--skip-sanity-check', action='store_true',
help='Skip sanity checking'
)
run_options.add_argument(
'--skip-performance-check', action='store_true',
help='Skip performance checking'
)
run_options.add_argument(
'--strict', action='store_true',
help='Enforce strict performance checking'
)
run_options.add_argument(
'--skip-system-check', action='store_true',
help='Skip system check'
)
run_options.add_argument(
'--skip-prgenv-check', action='store_true',
help='Skip programming environment check'
)
run_options.add_argument(
'--exec-policy', metavar='POLICY', action='store',
choices=['async', 'serial'], default='async',
help='Set the execution policy of ReFrame (default: "async")'
)
run_options.add_argument(
'--mode', action='store', help='Execution mode to use'
)
run_options.add_argument(
'--max-retries', metavar='NUM', action='store', default=0,
help='Set the maximum number of times a failed regression test '
'may be retried (default: 0)'
)
run_options.add_argument(
'--retry-failed', metavar='NUM', action='store', default=None,
help='Retry failed tests in a given runreport'
)
run_options.add_argument(
'--flex-alloc-nodes', action='store',
dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None,
help='Set strategy for the flexible node allocation (default: "idle").'
)
run_options.add_argument(
'--disable-hook', action='append', metavar='NAME', dest='hooks',
default=[], help='Disable a pipeline hook for this run'
)
env_options.add_argument(
'-M', '--map-module', action='append', metavar='MAPPING',
dest='module_mappings', default=[],
help='Add a module mapping',
envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings'
)
env_options.add_argument(
'-m', '--module', action='append', default=[],
metavar='MOD', dest='user_modules',
help='Load module MOD before running any regression check',
envvar='RFM_USER_MODULES ,', configvar='general/user_modules'
)
env_options.add_argument(
'--module-mappings', action='store', metavar='FILE',
dest='module_map_file',
help='Load module mappings from FILE',
envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file'
)
env_options.add_argument(
'-u', '--unload-module', action='append', metavar='MOD',
dest='unload_modules', default=[],
help='Unload module MOD before running any regression check',
envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules'
)
env_options.add_argument(
'--purge-env', action='store_true', dest='purge_env', default=False,
help='Unload all modules before running any regression check',
envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment'
)
env_options.add_argument(
'--non-default-craype', action='store_true',
help='Test a non-default Cray Programming Environment',
envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype'
)
# Miscellaneous options
misc_options.add_argument(
'-C', '--config-file', action='store',
dest='config_file', metavar='FILE',
help='Set configuration file',
envvar='RFM_CONFIG_FILE'
)
misc_options.add_argument(
'--nocolor', action='store_false', dest='colorize',
help='Disable coloring of output',
envvar='RFM_COLORIZE', configvar='general/colorize'
)
misc_options.add_argument(
'--failure-stats', action='store_true', help='Print failure statistics'
)
misc_options.add_argument(
'--performance-report', action='store_true',
help='Print a report for performance tests'
)
misc_options.add_argument(
'--show-config', action='store', nargs='?', const='all',
metavar='PARAM',
help='Print the value of configuration parameter PARAM and exit'
)
misc_options.add_argument(
'--system', action='store', help='Load configuration for SYSTEM',
envvar='RFM_SYSTEM'
)
misc_options.add_argument(
'--upgrade-config-file', action='store', metavar='OLD[:NEW]',
help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax'
)
misc_options.add_argument(
'-V', '--version', action='version', version=os_ext.reframe_version()
)
misc_options.add_argument(
'-v', '--verbose', action='count',
help='Increase verbosity level of output',
envvar='RFM_VERBOSE', configvar='general/verbose'
)
# Options not associated with command-line arguments
argparser.add_argument(
dest='graylog_server',
envvar='RFM_GRAYLOG_ADDRESS',
configvar='logging/handlers_perflog/graylog_address',
help='Graylog server address'
)
argparser.add_argument(
dest='syslog_address',
envvar='RFM_SYSLOG_ADDRESS',
configvar='logging/handlers_perflog/syslog_address',
help='Syslog server address'
)
argparser.add_argument(
dest='ignore_reqnodenotavail',
envvar='RFM_IGNORE_REQNODENOTAVAIL',
configvar='schedulers/ignore_reqnodenotavail',
action='store_true',
help='Graylog server address'
)
argparser.add_argument(
dest='use_login_shell',
envvar='RFM_USE_LOGIN_SHELL',
configvar='general/use_login_shell',
action='store_true',
help='Use a login shell for job scripts'
)
if len(sys.argv) == 1:
argparser.print_help()
sys.exit(1)
# Parse command line
options = argparser.parse_args()
# First configure logging with our generic configuration so as to be able
# to print pretty messages; logging will be reconfigured by user's
# configuration later
site_config = config.load_config(
os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py')
)
site_config.select_subconfig('generic')
options.update_config(site_config)
logging.configure_logging(site_config)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer = PrettyPrinter()
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
if os.getenv('RFM_GRAYLOG_SERVER'):
printer.warning(
'RFM_GRAYLOG_SERVER environment variable is deprecated; '
'please use RFM_GRAYLOG_ADDRESS instead'
)
os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER')
if options.upgrade_config_file is not None:
old_config, *new_config = options.upgrade_config_file.split(
':', maxsplit=1)
new_config = new_config[0] if new_config else None
try:
new_config = config.convert_old_config(old_config, new_config)
except Exception as e:
printer.error(f'could not convert file: {e}')
sys.exit(1)
printer.info(
f'Conversion successful! '
f'The converted file can be found at {new_config!r}.'
)
sys.exit(0)
# Now configure ReFrame according to the user configuration file
try:
try:
site_config = config.load_config(options.config_file)
except ReframeDeprecationWarning as e:
printer.warning(e)
converted = config.convert_old_config(options.config_file)
printer.warning(
f"configuration file has been converted "
f"to the new syntax here: '{converted}'"
)
site_config = config.load_config(converted)
site_config.validate()
# We ignore errors about unresolved sections or configuration
# parameters here, because they might be defined at the individual
# partition level and will be caught when we will instantiating
# internally the system and partitions later on.
site_config.select_subconfig(options.system,
ignore_resolve_errors=True)
for err in options.update_config(site_config):
printer.warning(str(err))
# Update options from the selected execution mode
if options.mode:
mode_args = site_config.get(f'modes/@{options.mode}/options')
# Parse the mode's options and reparse the command-line
options = argparser.parse_args(mode_args)
options = argparser.parse_args(namespace=options.cmd_options)
options.update_config(site_config)
logging.configure_logging(site_config)
except (OSError, ConfigError) as e:
printer.error(f'failed to load configuration: {e}')
sys.exit(1)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
try:
runtime.init_runtime(site_config)
except ConfigError as e:
printer.error(f'failed to initialize runtime: {e}')
sys.exit(1)
rt = runtime.runtime()
try:
if site_config.get('general/0/module_map_file'):
rt.modules_system.load_mapping_from_file(
site_config.get('general/0/module_map_file')
)
if site_config.get('general/0/module_mappings'):
for m in site_config.get('general/0/module_mappings'):
rt.modules_system.load_mapping(m)
except (ConfigError, OSError) as e:
printer.error('could not load module mappings: %s' % e)
sys.exit(1)
if (os_ext.samefile(rt.stage_prefix, rt.output_prefix) and
not site_config.get('general/0/keep_stage_files')):
printer.error("stage and output refer to the same directory; "
"if this is on purpose, please use the "
"'--keep-stage-files' option.")
sys.exit(1)
# Show configuration after everything is set up
if options.show_config:
config_param = options.show_config
if config_param == 'all':
printer.info(str(rt.site_config))
else:
value = rt.get_option(config_param)
if value is None:
printer.error(
f'no such configuration parameter found: {config_param}'
)
else:
printer.info(json.dumps(value, indent=2))
sys.exit(0)
printer.debug(format_env(options.env_vars))
# Setup the check loader
if options.retry_failed:
with open(options.retry_failed) as f:
try:
restart_report = json.load(f)
except json.JSONDecodeError as e:
raise ReframeFatalError(
f"invalid runreport: '{restart_report}'"
) from e
schema_filename = os.path.join(reframe.INSTALL_PREFIX, 'reframe',
'schemas', 'runreport.json')
with open(schema_filename) as f:
try:
schema = json.load(f)
except json.JSONDecodeError as e:
raise ReframeFatalError(
f"invalid schema: '{schema_filename}'"
) from e
try:
jsonschema.validate(restart_report, schema)
except jsonschema.ValidationError as e:
raise ValueError(f"could not validate restart runreport: "
f"'{restart_report}'") from e
failed_checks = set()
failed_checks_prefixes = set()
# for run in restart_report['runs']:
for testcase in restart_report['runs'][-1]['testcases']:
if testcase['result'] == 'failure':
failed_checks.add(hash(testcase['name']) ^
hash(testcase['system']) ^
hash(testcase['environment']))
failed_checks_prefixes.add(testcase['prefix'])
loader = RegressionCheckLoader(
load_path=site_config.get('general/0/check_search_path'), #failed_checks_prefixes,
ignore_conflicts=site_config.get(
'general/0/ignore_check_conflicts')
)
else:
loader = RegressionCheckLoader(
load_path=site_config.get('general/0/check_search_path'),
recurse=site_config.get('general/0/check_search_recursive'),
ignore_conflicts=site_config.get(
'general/0/ignore_check_conflicts')
)
def print_infoline(param, value):
param = param + ':'
printer.info(f" {param.ljust(18)} {value}")
session_info = {
'cmdline': ' '.join(sys.argv),
'config_file': rt.site_config.filename,
'data_version': '1.0',
'hostname': socket.gethostname(),
'prefix_output': rt.output_prefix,
'prefix_stage': rt.stage_prefix,
'user': os_ext.osuser(),
'version': os_ext.reframe_version(),
'workdir': os.getcwd(),
}
# Print command line
printer.info(f"[ReFrame Setup]")
print_infoline('version', session_info['version'])
print_infoline('command', repr(session_info['cmdline']))
print_infoline(
f"launched by",
f"{session_info['user'] or '<unknown>'}@{session_info['hostname']}"
)
print_infoline('working directory', repr(session_info['workdir']))
print_infoline('settings file', f"{session_info['config_file']!r}")
print_infoline('check search path',
f"{'(R) ' if loader.recurse else ''}"
f"{':'.join(loader.load_path)!r}")
print_infoline('stage directory', repr(session_info['prefix_stage']))
print_infoline('output directory', repr(session_info['prefix_output']))
printer.info('')
try:
# Locate and load checks
try:
checks_found = loader.load_all()
except OSError as e:
raise ReframeError from e
# Filter checks by name
checks_matched = checks_found
if options.exclude_names:
for name in options.exclude_names:
checks_matched = filter(filters.have_not_name(name),
checks_matched)
if options.names:
checks_matched = filter(filters.have_name('|'.join(options.names)),
checks_matched)
# Filter checks by tags
for tag in options.tags:
checks_matched = filter(filters.have_tag(tag), checks_matched)
# Filter checks by prgenv
if not options.skip_prgenv_check:
for prgenv in options.prgenv:
checks_matched = filter(filters.have_prgenv(prgenv),
checks_matched)
# Filter checks by system
if not options.skip_system_check:
checks_matched = filter(
filters.have_partition(rt.system.partitions), checks_matched)
# Filter checks further
if options.gpu_only and options.cpu_only:
printer.error("options `--gpu-only' and `--cpu-only' "
"are mutually exclusive")
sys.exit(1)
if options.gpu_only:
checks_matched = filter(filters.have_gpu_only(), checks_matched)
elif options.cpu_only:
checks_matched = filter(filters.have_cpu_only(), checks_matched)
# Determine the allowed programming environments
allowed_environs = {e.name
for env_patt in options.prgenv
for p in rt.system.partitions
for e in p.environs if re.match(env_patt, e.name)}
# Generate the test cases, validate dependencies and sort them
checks_matched = list(checks_matched)
# Disable hooks
for c in checks_matched:
for h in options.hooks:
type(c).disable_hook(h)
testcases_og = generate_testcases(checks_matched,
options.skip_system_check,
options.skip_prgenv_check,
allowed_environs)
if options.retry_failed:
failed_cases = [tc for tc in testcases_og
if tc.__hash__() in failed_checks]
cases_graph = dependency.build_deps(failed_cases, testcases_og)
testcases = dependency.toposort(cases_graph, is_subgraph=True)
restored_tests = set()
for c in testcases:
for d in c.deps:
if d.__hash__() not in failed_checks:
restored_tests.add(d)
else:
testgraph = dependency.build_deps(testcases_og)
dependency.validate_deps(testgraph)
testcases = dependency.toposort(testgraph)
# Manipulate ReFrame's environment
if site_config.get('general/0/purge_environment'):
rt.modules_system.unload_all()
else:
for m in site_config.get('general/0/unload_modules'):
rt.modules_system.unload_module(m)
# Load the environment for the current system
try:
runtime.loadenv(rt.system.preload_environ)
except EnvironError as e:
printer.error("failed to load current system's environment; "
"please check your configuration")
printer.debug(str(e))
raise
for m in site_config.get('general/0/user_modules'):
try:
rt.modules_system.load_module(m, force=True)
except EnvironError as e:
printer.warning("could not load module '%s' correctly: "
"Skipping..." % m)
printer.debug(str(e))
options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle'
# Act on checks
success = True
if options.list or options.list_detailed:
list_checks(list(checks_matched), printer, options.list_detailed)
elif options.run:
# Setup the execution policy
if options.exec_policy == 'serial':
exec_policy = SerialExecutionPolicy()
elif options.exec_policy == 'async':
exec_policy = AsynchronousExecutionPolicy()
else:
# This should not happen, since choices are handled by
# argparser
printer.error("unknown execution policy `%s': Exiting...")
sys.exit(1)
exec_policy.skip_system_check = options.skip_system_check
exec_policy.force_local = options.force_local
exec_policy.strict_check = options.strict
exec_policy.skip_sanity_check = options.skip_sanity_check
exec_policy.skip_performance_check = options.skip_performance_check
exec_policy.keep_stage_files = site_config.get(
'general/0/keep_stage_files'
)
try:
errmsg = "invalid option for --flex-alloc-nodes: '{0}'"
sched_flex_alloc_nodes = int(options.flex_alloc_nodes)
if sched_flex_alloc_nodes <= 0:
raise ConfigError(errmsg.format(options.flex_alloc_nodes))
except ValueError:
sched_flex_alloc_nodes = options.flex_alloc_nodes
exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes
parsed_job_options = []
for opt in options.job_options:
if opt.startswith('-') or opt.startswith('#'):
parsed_job_options.append(opt)
elif len(opt) == 1:
parsed_job_options.append(f'-{opt}')
else:
parsed_job_options.append(f'--{opt}')
exec_policy.sched_options = parsed_job_options
try:
max_retries = int(options.max_retries)
except ValueError:
raise ConfigError('--max-retries is not a valid integer: %s' %
max_retries) from None
runner = Runner(exec_policy, printer, max_retries)
try:
time_start = time.time()
session_info['time_start'] = time.strftime(
'%FT%T%z', time.localtime(time_start),
)
if options.retry_failed:
runner.restore(restored_tests, restart_report)
runner.runall(testcases, testcases_og)
finally:
time_end = time.time()
session_info['time_end'] = time.strftime(
'%FT%T%z', time.localtime(time_end)
)
session_info['time_elapsed'] = time_end - time_start
# Print a retry report if we did any retries
if runner.stats.failures(run=0):
printer.info(runner.stats.retry_report())
# Print a failure report if we had failures in the last run
if runner.stats.failures():
printer.info(runner.stats.failure_report())
success = False
if options.failure_stats:
printer.info(runner.stats.failure_stats())
if options.performance_report:
printer.info(runner.stats.performance_report())
# Generate the report for this session
report_file = os.path.normpath(
os_ext.expandvars(rt.get_option('general/0/report_file'))
)
basedir = os.path.dirname(report_file)
if basedir:
os.makedirs(basedir, exist_ok=True)
# Build final JSON report
run_stats = runner.stats.json()
session_info.update({
'num_cases': run_stats[0]['num_cases'],
'num_failures': run_stats[-1]['num_failures']
})
json_report = {
'session_info': session_info,
'runs': run_stats
}
report_file = generate_report_filename(report_file)
try:
with open(report_file, 'w') as fp:
jsonext.dump(json_report, fp, indent=2)
except OSError as e:
printer.warning(
f'failed to generate report in {report_file!r}: {e}'
)
else:
printer.error("No action specified. Please specify `-l'/`-L' for "
"listing or `-r' for running. "
"Try `%s -h' for more options." %
argparser.prog)
sys.exit(1)
if not success:
sys.exit(1)
sys.exit(0)
except KeyboardInterrupt:
sys.exit(1)
except ReframeError as e:
printer.error(str(e))
sys.exit(1)
except (Exception, ReframeFatalError):
printer.error(format_exception(*sys.exc_info()))
sys.exit(1)
finally:
try:
if site_config.get('general/0/save_log_files'):
logging.save_log_files(rt.output_prefix)
except OSError as e:
printer.error('could not save log file: %s' % e)
sys.exit(1)
| def main():
# Setup command line options
argparser = argparse.ArgumentParser()
output_options = argparser.add_argument_group(
'Options controlling ReFrame output'
)
locate_options = argparser.add_argument_group(
'Options for discovering checks'
)
select_options = argparser.add_argument_group(
'Options for selecting checks'
)
action_options = argparser.add_argument_group(
'Options controlling actions'
)
run_options = argparser.add_argument_group(
'Options controlling the execution of checks'
)
env_options = argparser.add_argument_group(
'Options controlling the ReFrame environment'
)
misc_options = argparser.add_argument_group('Miscellaneous options')
# Output directory options
output_options.add_argument(
'--prefix', action='store', metavar='DIR',
help='Set general directory prefix to DIR',
envvar='RFM_PREFIX', configvar='systems/prefix'
)
output_options.add_argument(
'-o', '--output', action='store', metavar='DIR',
help='Set output directory prefix to DIR',
envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir'
)
output_options.add_argument(
'-s', '--stage', action='store', metavar='DIR',
help='Set stage directory prefix to DIR',
envvar='RFM_STAGE_DIR', configvar='systems/stagedir'
)
output_options.add_argument(
'--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT',
help=('Append a timestamp to the output and stage directory prefixes '
'(default: "%%FT%%T")'),
envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs'
)
output_options.add_argument(
'--perflogdir', action='store', metavar='DIR',
help=('Set performance log data directory prefix '
'(relevant only to the filelog log handler)'),
envvar='RFM_PERFLOG_DIR',
configvar='logging/handlers_perflog/filelog_basedir'
)
output_options.add_argument(
'--keep-stage-files', action='store_true',
help='Keep stage directories even for successful checks',
envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files'
)
output_options.add_argument(
'--dont-restage', action='store_false', dest='clean_stagedir',
help='Reuse the test stage directory',
envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir'
)
output_options.add_argument(
'--save-log-files', action='store_true', default=False,
help='Save ReFrame log files to the output directory',
envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files'
)
output_options.add_argument(
'--report-file', action='store', metavar='FILE',
help="Store JSON run report in FILE",
envvar='RFM_REPORT_FILE',
configvar='general/report_file'
)
# Check discovery options
locate_options.add_argument(
'-c', '--checkpath', action='append', metavar='PATH',
help="Add PATH to the check search path list",
envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path'
)
locate_options.add_argument(
'-R', '--recursive', action='store_true',
help='Search for checks in the search path recursively',
envvar='RFM_CHECK_SEARCH_RECURSIVE',
configvar='general/check_search_recursive'
)
locate_options.add_argument(
'--ignore-check-conflicts', action='store_true',
help='Skip checks with conflicting names',
envvar='RFM_IGNORE_CHECK_CONFLICTS',
configvar='general/ignore_check_conflicts'
)
# Select options
select_options.add_argument(
'-t', '--tag', action='append', dest='tags', metavar='PATTERN',
default=[],
help='Select checks with at least one tag matching PATTERN'
)
select_options.add_argument(
'-n', '--name', action='append', dest='names', default=[],
metavar='PATTERN', help='Select checks whose name matches PATTERN'
)
select_options.add_argument(
'-x', '--exclude', action='append', dest='exclude_names',
metavar='PATTERN', default=[],
help='Exclude checks whose name matches PATTERN'
)
select_options.add_argument(
'-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN',
help=('Select checks with at least one '
'programming environment matching PATTERN')
)
select_options.add_argument(
'--gpu-only', action='store_true',
help='Select only GPU checks'
)
select_options.add_argument(
'--cpu-only', action='store_true',
help='Select only CPU checks'
)
# Action options
action_options.add_argument(
'-l', '--list', action='store_true',
help='List the selected checks'
)
action_options.add_argument(
'-L', '--list-detailed', action='store_true',
help='List the selected checks providing details for each test'
)
action_options.add_argument(
'-r', '--run', action='store_true',
help='Run the selected checks'
)
# Run options
run_options.add_argument(
'-J', '--job-option', action='append', metavar='OPT',
dest='job_options', default=[],
help='Pass option OPT to job scheduler'
)
run_options.add_argument(
'--force-local', action='store_true',
help='Force local execution of checks'
)
run_options.add_argument(
'--skip-sanity-check', action='store_true',
help='Skip sanity checking'
)
run_options.add_argument(
'--skip-performance-check', action='store_true',
help='Skip performance checking'
)
run_options.add_argument(
'--strict', action='store_true',
help='Enforce strict performance checking'
)
run_options.add_argument(
'--skip-system-check', action='store_true',
help='Skip system check'
)
run_options.add_argument(
'--skip-prgenv-check', action='store_true',
help='Skip programming environment check'
)
run_options.add_argument(
'--exec-policy', metavar='POLICY', action='store',
choices=['async', 'serial'], default='async',
help='Set the execution policy of ReFrame (default: "async")'
)
run_options.add_argument(
'--mode', action='store', help='Execution mode to use'
)
run_options.add_argument(
'--max-retries', metavar='NUM', action='store', default=0,
help='Set the maximum number of times a failed regression test '
'may be retried (default: 0)'
)
run_options.add_argument(
'--retry-failed', metavar='NUM', action='store', default=None,
help='Retry failed tests in a given runreport'
)
run_options.add_argument(
'--flex-alloc-nodes', action='store',
dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None,
help='Set strategy for the flexible node allocation (default: "idle").'
)
run_options.add_argument(
'--disable-hook', action='append', metavar='NAME', dest='hooks',
default=[], help='Disable a pipeline hook for this run'
)
env_options.add_argument(
'-M', '--map-module', action='append', metavar='MAPPING',
dest='module_mappings', default=[],
help='Add a module mapping',
envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings'
)
env_options.add_argument(
'-m', '--module', action='append', default=[],
metavar='MOD', dest='user_modules',
help='Load module MOD before running any regression check',
envvar='RFM_USER_MODULES ,', configvar='general/user_modules'
)
env_options.add_argument(
'--module-mappings', action='store', metavar='FILE',
dest='module_map_file',
help='Load module mappings from FILE',
envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file'
)
env_options.add_argument(
'-u', '--unload-module', action='append', metavar='MOD',
dest='unload_modules', default=[],
help='Unload module MOD before running any regression check',
envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules'
)
env_options.add_argument(
'--purge-env', action='store_true', dest='purge_env', default=False,
help='Unload all modules before running any regression check',
envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment'
)
env_options.add_argument(
'--non-default-craype', action='store_true',
help='Test a non-default Cray Programming Environment',
envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype'
)
# Miscellaneous options
misc_options.add_argument(
'-C', '--config-file', action='store',
dest='config_file', metavar='FILE',
help='Set configuration file',
envvar='RFM_CONFIG_FILE'
)
misc_options.add_argument(
'--nocolor', action='store_false', dest='colorize',
help='Disable coloring of output',
envvar='RFM_COLORIZE', configvar='general/colorize'
)
misc_options.add_argument(
'--failure-stats', action='store_true', help='Print failure statistics'
)
misc_options.add_argument(
'--performance-report', action='store_true',
help='Print a report for performance tests'
)
misc_options.add_argument(
'--show-config', action='store', nargs='?', const='all',
metavar='PARAM',
help='Print the value of configuration parameter PARAM and exit'
)
misc_options.add_argument(
'--system', action='store', help='Load configuration for SYSTEM',
envvar='RFM_SYSTEM'
)
misc_options.add_argument(
'--upgrade-config-file', action='store', metavar='OLD[:NEW]',
help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax'
)
misc_options.add_argument(
'-V', '--version', action='version', version=os_ext.reframe_version()
)
misc_options.add_argument(
'-v', '--verbose', action='count',
help='Increase verbosity level of output',
envvar='RFM_VERBOSE', configvar='general/verbose'
)
# Options not associated with command-line arguments
argparser.add_argument(
dest='graylog_server',
envvar='RFM_GRAYLOG_ADDRESS',
configvar='logging/handlers_perflog/graylog_address',
help='Graylog server address'
)
argparser.add_argument(
dest='syslog_address',
envvar='RFM_SYSLOG_ADDRESS',
configvar='logging/handlers_perflog/syslog_address',
help='Syslog server address'
)
argparser.add_argument(
dest='ignore_reqnodenotavail',
envvar='RFM_IGNORE_REQNODENOTAVAIL',
configvar='schedulers/ignore_reqnodenotavail',
action='store_true',
help='Graylog server address'
)
argparser.add_argument(
dest='use_login_shell',
envvar='RFM_USE_LOGIN_SHELL',
configvar='general/use_login_shell',
action='store_true',
help='Use a login shell for job scripts'
)
if len(sys.argv) == 1:
argparser.print_help()
sys.exit(1)
# Parse command line
options = argparser.parse_args()
# First configure logging with our generic configuration so as to be able
# to print pretty messages; logging will be reconfigured by user's
# configuration later
site_config = config.load_config(
os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py')
)
site_config.select_subconfig('generic')
options.update_config(site_config)
logging.configure_logging(site_config)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer = PrettyPrinter()
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
if os.getenv('RFM_GRAYLOG_SERVER'):
printer.warning(
'RFM_GRAYLOG_SERVER environment variable is deprecated; '
'please use RFM_GRAYLOG_ADDRESS instead'
)
os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER')
if options.upgrade_config_file is not None:
old_config, *new_config = options.upgrade_config_file.split(
':', maxsplit=1)
new_config = new_config[0] if new_config else None
try:
new_config = config.convert_old_config(old_config, new_config)
except Exception as e:
printer.error(f'could not convert file: {e}')
sys.exit(1)
printer.info(
f'Conversion successful! '
f'The converted file can be found at {new_config!r}.'
)
sys.exit(0)
# Now configure ReFrame according to the user configuration file
try:
try:
site_config = config.load_config(options.config_file)
except ReframeDeprecationWarning as e:
printer.warning(e)
converted = config.convert_old_config(options.config_file)
printer.warning(
f"configuration file has been converted "
f"to the new syntax here: '{converted}'"
)
site_config = config.load_config(converted)
site_config.validate()
# We ignore errors about unresolved sections or configuration
# parameters here, because they might be defined at the individual
# partition level and will be caught when we will instantiating
# internally the system and partitions later on.
site_config.select_subconfig(options.system,
ignore_resolve_errors=True)
for err in options.update_config(site_config):
printer.warning(str(err))
# Update options from the selected execution mode
if options.mode:
mode_args = site_config.get(f'modes/@{options.mode}/options')
# Parse the mode's options and reparse the command-line
options = argparser.parse_args(mode_args)
options = argparser.parse_args(namespace=options.cmd_options)
options.update_config(site_config)
logging.configure_logging(site_config)
except (OSError, ConfigError) as e:
printer.error(f'failed to load configuration: {e}')
sys.exit(1)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
try:
runtime.init_runtime(site_config)
except ConfigError as e:
printer.error(f'failed to initialize runtime: {e}')
sys.exit(1)
rt = runtime.runtime()
try:
if site_config.get('general/0/module_map_file'):
rt.modules_system.load_mapping_from_file(
site_config.get('general/0/module_map_file')
)
if site_config.get('general/0/module_mappings'):
for m in site_config.get('general/0/module_mappings'):
rt.modules_system.load_mapping(m)
except (ConfigError, OSError) as e:
printer.error('could not load module mappings: %s' % e)
sys.exit(1)
if (os_ext.samefile(rt.stage_prefix, rt.output_prefix) and
not site_config.get('general/0/keep_stage_files')):
printer.error("stage and output refer to the same directory; "
"if this is on purpose, please use the "
"'--keep-stage-files' option.")
sys.exit(1)
# Show configuration after everything is set up
if options.show_config:
config_param = options.show_config
if config_param == 'all':
printer.info(str(rt.site_config))
else:
value = rt.get_option(config_param)
if value is None:
printer.error(
f'no such configuration parameter found: {config_param}'
)
else:
printer.info(json.dumps(value, indent=2))
sys.exit(0)
printer.debug(format_env(options.env_vars))
# Setup the check loader
if options.retry_failed:
with open(options.retry_failed) as f:
try:
restart_report = json.load(f)
except json.JSONDecodeError as e:
raise ReframeFatalError(
f"invalid runreport: '{restart_report}'"
) from e
schema_filename = os.path.join(reframe.INSTALL_PREFIX, 'reframe',
'schemas', 'runreport.json')
with open(schema_filename) as f:
try:
schema = json.load(f)
except json.JSONDecodeError as e:
raise ReframeFatalError(
f"invalid schema: '{schema_filename}'"
) from e
try:
jsonschema.validate(restart_report, schema)
except jsonschema.ValidationError as e:
raise ReframeError(f'could not validate run report: {restart_report!r}') from e
failed_checks = set()
failed_checks_prefixes = set()
# for run in restart_report['runs']:
for testcase in restart_report['runs'][-1]['testcases']:
if testcase['result'] == 'failure':
failed_checks.add(hash(testcase['name']) ^
hash(testcase['system']) ^
hash(testcase['environment']))
failed_checks_prefixes.add(testcase['prefix'])
loader = RegressionCheckLoader(
load_path=site_config.get('general/0/check_search_path'), #failed_checks_prefixes,
ignore_conflicts=site_config.get(
'general/0/ignore_check_conflicts')
)
else:
loader = RegressionCheckLoader(
load_path=site_config.get('general/0/check_search_path'),
recurse=site_config.get('general/0/check_search_recursive'),
ignore_conflicts=site_config.get(
'general/0/ignore_check_conflicts')
)
def print_infoline(param, value):
param = param + ':'
printer.info(f" {param.ljust(18)} {value}")
session_info = {
'cmdline': ' '.join(sys.argv),
'config_file': rt.site_config.filename,
'data_version': '1.0',
'hostname': socket.gethostname(),
'prefix_output': rt.output_prefix,
'prefix_stage': rt.stage_prefix,
'user': os_ext.osuser(),
'version': os_ext.reframe_version(),
'workdir': os.getcwd(),
}
# Print command line
printer.info(f"[ReFrame Setup]")
print_infoline('version', session_info['version'])
print_infoline('command', repr(session_info['cmdline']))
print_infoline(
f"launched by",
f"{session_info['user'] or '<unknown>'}@{session_info['hostname']}"
)
print_infoline('working directory', repr(session_info['workdir']))
print_infoline('settings file', f"{session_info['config_file']!r}")
print_infoline('check search path',
f"{'(R) ' if loader.recurse else ''}"
f"{':'.join(loader.load_path)!r}")
print_infoline('stage directory', repr(session_info['prefix_stage']))
print_infoline('output directory', repr(session_info['prefix_output']))
printer.info('')
try:
# Locate and load checks
try:
checks_found = loader.load_all()
except OSError as e:
raise ReframeError from e
# Filter checks by name
checks_matched = checks_found
if options.exclude_names:
for name in options.exclude_names:
checks_matched = filter(filters.have_not_name(name),
checks_matched)
if options.names:
checks_matched = filter(filters.have_name('|'.join(options.names)),
checks_matched)
# Filter checks by tags
for tag in options.tags:
checks_matched = filter(filters.have_tag(tag), checks_matched)
# Filter checks by prgenv
if not options.skip_prgenv_check:
for prgenv in options.prgenv:
checks_matched = filter(filters.have_prgenv(prgenv),
checks_matched)
# Filter checks by system
if not options.skip_system_check:
checks_matched = filter(
filters.have_partition(rt.system.partitions), checks_matched)
# Filter checks further
if options.gpu_only and options.cpu_only:
printer.error("options `--gpu-only' and `--cpu-only' "
"are mutually exclusive")
sys.exit(1)
if options.gpu_only:
checks_matched = filter(filters.have_gpu_only(), checks_matched)
elif options.cpu_only:
checks_matched = filter(filters.have_cpu_only(), checks_matched)
# Determine the allowed programming environments
allowed_environs = {e.name
for env_patt in options.prgenv
for p in rt.system.partitions
for e in p.environs if re.match(env_patt, e.name)}
# Generate the test cases, validate dependencies and sort them
checks_matched = list(checks_matched)
# Disable hooks
for c in checks_matched:
for h in options.hooks:
type(c).disable_hook(h)
testcases_og = generate_testcases(checks_matched,
options.skip_system_check,
options.skip_prgenv_check,
allowed_environs)
if options.retry_failed:
failed_cases = [tc for tc in testcases_og
if tc.__hash__() in failed_checks]
cases_graph = dependency.build_deps(failed_cases, testcases_og)
testcases = dependency.toposort(cases_graph, is_subgraph=True)
restored_tests = set()
for c in testcases:
for d in c.deps:
if d.__hash__() not in failed_checks:
restored_tests.add(d)
else:
testgraph = dependency.build_deps(testcases_og)
dependency.validate_deps(testgraph)
testcases = dependency.toposort(testgraph)
# Manipulate ReFrame's environment
if site_config.get('general/0/purge_environment'):
rt.modules_system.unload_all()
else:
for m in site_config.get('general/0/unload_modules'):
rt.modules_system.unload_module(m)
# Load the environment for the current system
try:
runtime.loadenv(rt.system.preload_environ)
except EnvironError as e:
printer.error("failed to load current system's environment; "
"please check your configuration")
printer.debug(str(e))
raise
for m in site_config.get('general/0/user_modules'):
try:
rt.modules_system.load_module(m, force=True)
except EnvironError as e:
printer.warning("could not load module '%s' correctly: "
"Skipping..." % m)
printer.debug(str(e))
options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle'
# Act on checks
success = True
if options.list or options.list_detailed:
list_checks(list(checks_matched), printer, options.list_detailed)
elif options.run:
# Setup the execution policy
if options.exec_policy == 'serial':
exec_policy = SerialExecutionPolicy()
elif options.exec_policy == 'async':
exec_policy = AsynchronousExecutionPolicy()
else:
# This should not happen, since choices are handled by
# argparser
printer.error("unknown execution policy `%s': Exiting...")
sys.exit(1)
exec_policy.skip_system_check = options.skip_system_check
exec_policy.force_local = options.force_local
exec_policy.strict_check = options.strict
exec_policy.skip_sanity_check = options.skip_sanity_check
exec_policy.skip_performance_check = options.skip_performance_check
exec_policy.keep_stage_files = site_config.get(
'general/0/keep_stage_files'
)
try:
errmsg = "invalid option for --flex-alloc-nodes: '{0}'"
sched_flex_alloc_nodes = int(options.flex_alloc_nodes)
if sched_flex_alloc_nodes <= 0:
raise ConfigError(errmsg.format(options.flex_alloc_nodes))
except ValueError:
sched_flex_alloc_nodes = options.flex_alloc_nodes
exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes
parsed_job_options = []
for opt in options.job_options:
if opt.startswith('-') or opt.startswith('#'):
parsed_job_options.append(opt)
elif len(opt) == 1:
parsed_job_options.append(f'-{opt}')
else:
parsed_job_options.append(f'--{opt}')
exec_policy.sched_options = parsed_job_options
try:
max_retries = int(options.max_retries)
except ValueError:
raise ConfigError('--max-retries is not a valid integer: %s' %
max_retries) from None
runner = Runner(exec_policy, printer, max_retries)
try:
time_start = time.time()
session_info['time_start'] = time.strftime(
'%FT%T%z', time.localtime(time_start),
)
if options.retry_failed:
runner.restore(restored_tests, restart_report)
runner.runall(testcases, testcases_og)
finally:
time_end = time.time()
session_info['time_end'] = time.strftime(
'%FT%T%z', time.localtime(time_end)
)
session_info['time_elapsed'] = time_end - time_start
# Print a retry report if we did any retries
if runner.stats.failures(run=0):
printer.info(runner.stats.retry_report())
# Print a failure report if we had failures in the last run
if runner.stats.failures():
printer.info(runner.stats.failure_report())
success = False
if options.failure_stats:
printer.info(runner.stats.failure_stats())
if options.performance_report:
printer.info(runner.stats.performance_report())
# Generate the report for this session
report_file = os.path.normpath(
os_ext.expandvars(rt.get_option('general/0/report_file'))
)
basedir = os.path.dirname(report_file)
if basedir:
os.makedirs(basedir, exist_ok=True)
# Build final JSON report
run_stats = runner.stats.json()
session_info.update({
'num_cases': run_stats[0]['num_cases'],
'num_failures': run_stats[-1]['num_failures']
})
json_report = {
'session_info': session_info,
'runs': run_stats
}
report_file = generate_report_filename(report_file)
try:
with open(report_file, 'w') as fp:
jsonext.dump(json_report, fp, indent=2)
except OSError as e:
printer.warning(
f'failed to generate report in {report_file!r}: {e}'
)
else:
printer.error("No action specified. Please specify `-l'/`-L' for "
"listing or `-r' for running. "
"Try `%s -h' for more options." %
argparser.prog)
sys.exit(1)
if not success:
sys.exit(1)
sys.exit(0)
except KeyboardInterrupt:
sys.exit(1)
except ReframeError as e:
printer.error(str(e))
sys.exit(1)
except (Exception, ReframeFatalError):
printer.error(format_exception(*sys.exc_info()))
sys.exit(1)
finally:
try:
if site_config.get('general/0/save_log_files'):
logging.save_log_files(rt.output_prefix)
except OSError as e:
printer.error('could not save log file: %s' % e)
sys.exit(1)
|
31,442 | def close_false_positive_command(client: Client, args: dict):
alert_ids = args.get('alert_ids')
custom_filter = args.get('custom_filter')
comment = args.get('comment')
reason = CLOSE_FALSE_POSITIVE_REASON_OPTIONS.get(str(args.get('reason')))
sendFeedback = bool(args.get('sendFeedback'))
feedbackText = args.get('feedbackText')
allowContact = bool(args.get('allowContact'))
contactEmail = args.get('contactEmail')
request_data = args_to_filter_close_alerts(alert_ids, custom_filter, comment, reason,
sendFeedback, feedbackText, allowContact, contactEmail)
closed_false_positive_data = {}
try:
closed_false_positive_data = client.close_false_positive(request_data)
except Exception as e:
if 'alertsNotFound' in str(e):
raise DemistoException('Error: This alert id is already closed or does not exist.')
number_of_closed_false_positive_alerts = closed_false_positive_data['closed_false_positive']
return CommandResults(
readable_output=f'{number_of_closed_false_positive_alerts} alerts are classified as closed false positive',
outputs_prefix='MicrosoftCloudAppSecurity.Alerts',
outputs_key_field='_id',
outputs=closed_false_positive_data)
| def close_false_positive_command(client: Client, args: dict):
alert_ids = args.get('alert_ids')
custom_filter = args.get('custom_filter')
comment = args.get('comment')
reason = CLOSE_FALSE_POSITIVE_REASON_OPTIONS.get(str(args.get('reason')))
sendFeedback = argToBoolean(args.get('sendFeedback'))
feedbackText = args.get('feedbackText')
allowContact = bool(args.get('allowContact'))
contactEmail = args.get('contactEmail')
request_data = args_to_filter_close_alerts(alert_ids, custom_filter, comment, reason,
sendFeedback, feedbackText, allowContact, contactEmail)
closed_false_positive_data = {}
try:
closed_false_positive_data = client.close_false_positive(request_data)
except Exception as e:
if 'alertsNotFound' in str(e):
raise DemistoException('Error: This alert id is already closed or does not exist.')
number_of_closed_false_positive_alerts = closed_false_positive_data['closed_false_positive']
return CommandResults(
readable_output=f'{number_of_closed_false_positive_alerts} alerts are classified as closed false positive',
outputs_prefix='MicrosoftCloudAppSecurity.Alerts',
outputs_key_field='_id',
outputs=closed_false_positive_data)
|
35,040 | def batch_matmul(
tensor_a,
tensor_b,
oshape=None,
out_dtype=None,
transpose_a=False,
transpose_b=True,
auto_scheduler_rewritten_layout="",
):
"""Computes batch matrix multiplication of `A` and `B` when `A` and `B` are
data in batch. Supports broadcasting for batch dimension.
The A & B can be transposed. For legacy reason, we use NT format(tensor_a non-transposed
and tensor_b transposed) by default.
Parameters
----------
tensor_a : tvm.te.Tensor
3-D with shape [batch, M, K] or [batch, K, M]
tensor_b : tvm.te.Tensor
3-D with shape [batch, K, N] or [batch, N, K]
oshape : List[Optional]
Explicit intended output shape of the computation. Can be useful in cases
with dynamic input shapes.
auto_scheduler_rewritten_layout: Optional[str] = ""
The layout after auto-scheduler's layout rewrite pass.
out_dtype : Optional[str]
Specifies the output data type for mixed precision batch matmul
transpose_a : Optional[bool] = False
Whether the data tensor is in transposed format.
transpose_b : Optional[bool] = True
Whether the weight tensor is in transposed format.
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
assert len(tensor_a.shape) == 3, "only support 3-dim batch_matmul"
if transpose_a:
XB, XK, XI = get_const_tuple(tensor_a.shape)
else:
XB, XI, XK = get_const_tuple(tensor_a.shape)
if auto_scheduler_rewritten_layout:
# Infer shape for the rewritten layout
YB, YK, YJ = auto_scheduler.get_shape_from_rewritten_layout(
auto_scheduler_rewritten_layout, ["b", "k", "j"]
)
auto_scheduler.remove_index_check(tensor_b)
else:
assert len(tensor_b.shape) == 3, "only support 3-dim batch_matmul"
if transpose_b:
YB, YJ, YK = get_const_tuple(tensor_b.shape)
else:
YB, YK, YJ = get_const_tuple(tensor_b.shape)
assert XK == YK or isinstance(YK, tvm.tir.expr.Var), "shapes of x and y is inconsistent"
k = te.reduce_axis((0, XK), name="k")
if oshape is None:
assert XB == YB or XB == 1 or YB == 1, "batch dimension doesn't match"
batch = (
tvm.tir.Any()
if isinstance(XB, tvm.tir.expr.Var) or isinstance(YB, tvm.tir.expr.Var)
else te.max(XB, YB)
)
oshape = (batch, XI, YJ)
if out_dtype is None:
out_dtype = tensor_a.dtype
if (transpose_a, transpose_b) == (True, True):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, k, i].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, j, k].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_TT"
elif (transpose_a, transpose_b) == (True, False):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, k, i].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, k, j].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_TN"
elif (transpose_a, transpose_b) == (False, True):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, i, k].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, j, k].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_NT"
else: # (transpose_a, transpose_b) == (False, False):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, i, k].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, k, j].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_NN"
output = te.compute(
oshape,
compute_lambda,
name=compute_name,
tag="batch_matmul",
attrs={"layout_free_placeholders": [tensor_b]},
)
if auto_scheduler_rewritten_layout:
output = auto_scheduler.rewrite_compute_body(output, auto_scheduler_rewritten_layout)
return output
| def batch_matmul(
tensor_a,
tensor_b,
oshape=None,
out_dtype=None,
transpose_a=False,
transpose_b=True,
auto_scheduler_rewritten_layout="",
):
"""Computes batch matrix multiplication of `A` and `B` when `A` and `B` are
data in batch. Supports broadcasting for batch dimension.
The A & B can be transposed. For legacy reason, we use NT format(tensor_a non-transposed
and tensor_b transposed) by default.
Parameters
----------
tensor_a : tvm.te.Tensor
3-D with shape [batch, M, K] or [batch, K, M]
tensor_b : tvm.te.Tensor
3-D with shape [batch, K, N] or [batch, N, K]
oshape : List[Optional]
Explicit intended output shape of the computation. Can be useful in cases
with dynamic input shapes.
auto_scheduler_rewritten_layout: Optional[str] = ""
The layout after auto-scheduler's layout rewrite pass.
out_dtype : Optional[str]
Specifies the output data type for mixed precision batch matmul
transpose_a : Optional[bool] = False
Whether the data tensor is in transposed format.
transpose_b : Optional[bool] = True
Whether the weight tensor is in transposed format.
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
assert len(tensor_a.shape) == 3, "only support 3-dim batch_matmul"
if transpose_a:
XB, XK, XI = get_const_tuple(tensor_a.shape)
else:
XB, XI, XK = get_const_tuple(tensor_a.shape)
if auto_scheduler_rewritten_layout:
# Infer shape for the rewritten layout
YB, YK, YJ = auto_scheduler.get_shape_from_rewritten_layout(
auto_scheduler_rewritten_layout, ["b", "k", "j"]
)
auto_scheduler.remove_index_check(tensor_b)
else:
assert len(tensor_b.shape) == 3, "only support 3-dim tensor_b"
if transpose_b:
YB, YJ, YK = get_const_tuple(tensor_b.shape)
else:
YB, YK, YJ = get_const_tuple(tensor_b.shape)
assert XK == YK or isinstance(YK, tvm.tir.expr.Var), "shapes of x and y is inconsistent"
k = te.reduce_axis((0, XK), name="k")
if oshape is None:
assert XB == YB or XB == 1 or YB == 1, "batch dimension doesn't match"
batch = (
tvm.tir.Any()
if isinstance(XB, tvm.tir.expr.Var) or isinstance(YB, tvm.tir.expr.Var)
else te.max(XB, YB)
)
oshape = (batch, XI, YJ)
if out_dtype is None:
out_dtype = tensor_a.dtype
if (transpose_a, transpose_b) == (True, True):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, k, i].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, j, k].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_TT"
elif (transpose_a, transpose_b) == (True, False):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, k, i].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, k, j].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_TN"
elif (transpose_a, transpose_b) == (False, True):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, i, k].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, j, k].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_NT"
else: # (transpose_a, transpose_b) == (False, False):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, i, k].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, k, j].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_NN"
output = te.compute(
oshape,
compute_lambda,
name=compute_name,
tag="batch_matmul",
attrs={"layout_free_placeholders": [tensor_b]},
)
if auto_scheduler_rewritten_layout:
output = auto_scheduler.rewrite_compute_body(output, auto_scheduler_rewritten_layout)
return output
|
37,417 | def _triu_to_dense(triu: np.ndarray) -> np.ndarray:
"""
Converts upper triangular part of matrix to dense matrix.
Args:
triu: array in the form [[A, B, C], [D, E], [F]]
Returns:
Array [[A, B, C], [B, D, E], [C, E, F]]
"""
dim = len(triu)
matrix = np.empty((dim, dim), dtype=complex)
for i in range(dim):
for j in range(dim - i):
matrix[i, i + j] = triu[i][j]
if j != 0:
matrix[i + j, i] = triu[i][j]
return matrix
| def _triu_to_dense(triu: np.ndarray) -> np.ndarray:
"""Converts upper triangular part of matrix to dense matrix.
Args:
triu: array in the form [[A, B, C], [D, E], [F]]
Returns:
Array [[A, B, C], [B, D, E], [C, E, F]]
"""
dim = len(triu)
matrix = np.empty((dim, dim), dtype=complex)
for i in range(dim):
for j in range(dim - i):
matrix[i, i + j] = triu[i][j]
if j != 0:
matrix[i + j, i] = triu[i][j]
return matrix
|
52,119 | def mask_bad(float_img, bad_type='native'):
""" Create a mask with desired "bad" pixels of the input floaat image marked.
Inputs:
float_img = image represented by an nd-array (data type: float). Most probably, it is the result of some calculation based on the original image. So the datatype is float, and it is possible to have some "bad" values, i.e. nan and/or inf
bad_type = definition of "bad" type, can be 'nan', 'inf' or 'native'
Returns:
mask = A mask indicating the locations of "bad" pixels
:param float_img: np.ndarray
:param bad_type: str
:return: np.ndarray
"""
params.device += 1
size_img = np.shape(float_img)
if len(size_img) != 2:
fatal_error('Input image is not a single channel image!')
mask = np.zeros(size_img, dtype='uint8')
idx_nan, idy_nan = np.where(np.isnan(float_img) == 1)
idx_inf, idy_inf = np.where(np.isinf(float_img) == 1)
# neither nan nor inf exists in the image, print out a message and the mask would just be all zero
if len(idx_nan) == 0 and len(idx_inf) == 0:
mask = mask
print('Neither nan nor inf appears in the current image.')
## at least one of the "bad" exists
# desired bad to mark is "native"
elif bad_type.lower() == 'native':
# mask[np.isnan(gray_img)] = 255
# mask[np.isinf(gray_img)] = 255
mask[idx_nan, idy_nan] = 255
mask[idx_inf, idy_inf] = 255
elif bad_type.lower() == 'nan' and len(idx_nan) >= 1:
mask[idx_nan, idy_nan] = 255
elif bad_type.lower() == 'inf' and len(idx_inf) >= 1:
mask[idx_inf, idy_inf] = 255
# "bad" exists but not the user desired bad type, return the all-zero mask
else:
mask = mask
print('{} does not appear in the current image.'.format(bad_type.lower()))
_debug(visual=mask, filename=os.path.join(params.debug_outdir, str(params.device) + "_bad_mask.png"))
return mask | def mask_bad(float_img, bad_type='native'):
""" Create a mask with desired "bad" pixels of the input floaat image marked.
Inputs:
float_img = image represented by an nd-array (data type: float). Most probably, it is the result of some calculation based on the original image. So the datatype is float, and it is possible to have some "bad" values, i.e. nan and/or inf
bad_type = definition of "bad" type, can be 'nan', 'inf' or 'native'
Returns:
mask = A mask indicating the locations of "bad" pixels
:param float_img: np.ndarray
:param bad_type: str
:return: np.ndarray
"""
params.device += 1
size_img = np.shape(float_img)
if len(size_img) != 2:
fatal_error('Input image is not a single channel image!')
mask = np.zeros(size_img, dtype='uint8')
idx_nan, idy_nan = np.where(np.isnan(float_img) == 1)
idx_inf, idy_inf = np.where(np.isinf(float_img) == 1)
# neither nan nor inf exists in the image, print out a message and the mask would just be all zero
if len(idx_nan) == 0 and len(idx_inf) == 0:
mask = mask
print('Neither nan nor inf appears in the current image.')
## at least one of the "bad" exists
# desired bad to mark is "native"
elif bad_type.lower() == 'native':
# mask[np.isnan(gray_img)] = 255
# mask[np.isinf(gray_img)] = 255
mask[idx_nan, idy_nan] = 255
mask[idx_inf, idy_inf] = 255
elif bad_type.lower() == 'nan' and len(idx_nan) >= 1:
mask[idx_nan, idy_nan] = 255
elif bad_type.lower() == 'inf' and len(idx_inf) >= 1:
mask[idx_inf, idy_inf] = 255
# "bad" exists but not the user desired bad type, return the all-zero mask
else:
mask = mask
print('{} does not appear in the current image.'.format(bad_type.lower()))
_debug(visual=mask, filename=os.path.join(params.debug_outdir, str(params.device) + "_bad_mask.png"))
return mask
|
8,651 | def find_config(config_dir, name, extension='.cfg'):
"""Build the absolute path for the given configuration file ``name``
:param str config_dir: path to the configuration directory
:param str name: configuration file ``name``
:param str extension: configuration file's extension (default to ``.cfg``)
:return: the path of the configuration file, either in the current
directory or from the ``config_dir`` directory
This function tries different location:
* the current directory,
* the ``config_dir`` directory with the ``extension`` suffix,
* the ``config_dir`` directory without a suffix,
Example::
>>> from sopel import run_script
>>> os.listdir()
['local.cfg', 'extra.ini']
>>> os.listdir(config.DEFAULT_HOMEDIR)
['config.cfg', 'extra.ini', 'module.cfg', 'README']
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'local.cfg')
'local.cfg'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'local')
'/home/username/.sopel/local'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'config')
'/home/username/.sopel/config.cfg'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'extra', '.ini')
'/home/username/.sopel/extra.ini'
"""
if os.path.isfile(name):
return name
name_ext = name + extension
for config in enumerate_configs(config_dir, extension):
if name_ext == config:
return os.path.join(config_dir, name_ext)
return os.path.join(config_dir, name)
| def find_config(config_dir, name, extension='.cfg'):
"""Build the absolute path for the given configuration file ``name``
:param str config_dir: path to the configuration directory
:param str name: configuration file ``name``
:param str extension: configuration file's extension (default to ``.cfg``)
:return: the path of the configuration file, either in the current
directory or from the ``config_dir`` directory
This function tries different location:
* the current directory,
* the ``config_dir`` directory with the ``extension`` suffix,
* the ``config_dir`` directory without a suffix
Example::
>>> from sopel import run_script
>>> os.listdir()
['local.cfg', 'extra.ini']
>>> os.listdir(config.DEFAULT_HOMEDIR)
['config.cfg', 'extra.ini', 'module.cfg', 'README']
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'local.cfg')
'local.cfg'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'local')
'/home/username/.sopel/local'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'config')
'/home/username/.sopel/config.cfg'
>>> run_script.find_config(config.DEFAULT_HOMEDIR, 'extra', '.ini')
'/home/username/.sopel/extra.ini'
"""
if os.path.isfile(name):
return name
name_ext = name + extension
for config in enumerate_configs(config_dir, extension):
if name_ext == config:
return os.path.join(config_dir, name_ext)
return os.path.join(config_dir, name)
|
41,240 | def mixture(
val: Any, default: Any = RaiseTypeErrorIfNotProvided
) -> Union[Sequence[Tuple[float, np.ndarray]], TDefault]:
"""Return a sequence of tuples representing a probabilistic unitary.
A mixture is described by an iterable of tuples of the form
(probability of unitary, unitary as numpy array)
The probability components of the tuples must sum to 1.0 and be
non-negative.
Determines the Mixture representation of `val` by the following strategies:
1. Try to use `val._mixture_()`.
Case a) Method not present or returns `None`.
Continue to next strategy.
Case b) Returns the Mixture representation.
Return the result.
2. Try to use `unitary()`.
Case a) Method not present or returns `None`.
Continue to next strategy.
Case b) Method returns a unitary.
Convert unitary into mixture and return.
3. Try to use serial concatenation recursively.
Case a) One or more decomposed operators doesn't have mixture.
`val` does not have a mixture representation.
Case b) All decomposed operators have mixture representation.
Serially concatenate and return the result using superoperator
as intermediate.
Args:
val: The value to decompose into a mixture of unitaries.
default: A default value if val does not support mixture.
Returns:
An iterable of tuples of size 2. The first element of the tuple is a
probability (between 0 and 1) and the second is the object that occurs
with that probability in the mixture. The probabilities will sum to 1.0.
Raises:
TypeError: If `val` has no `_mixture_` or `_unitary_` mehod, or if it
does and this method returned `NotImplemented`.
"""
mixture_result = _gettr_helper(val, ['_mixture_'])
if mixture_result is not None and mixture_result is not NotImplemented:
return mixture_result
unitary_result = unitary_protocol.unitary(val, None)
if unitary_result is not None and unitary_result is not NotImplemented:
return ((1.0, unitary_result),)
decomposed, qubits, _ = _try_decompose_into_operations_and_qubits(val)
# serial concatenation
if decomposed is not None and decomposed != [val] and decomposed != []:
if all([has_mixture(x) for x in decomposed]):
superoperator_list = [_moment_superoperator(x, qubits, None) for x in decomposed]
if not any([x is None for x in superoperator_list]):
superoperator_result = reduce(lambda x, y: x @ y, superoperator_list)
return tuple(_superoperator_to_mixture(superoperator_result))
if default is not RaiseTypeErrorIfNotProvided:
return default
if _gettr_helper(val, ['_unitary_', '_mixture_']) is None:
raise TypeError(f"object of type '{type(val)}' has no _mixture_ or _unitary_ method.")
raise TypeError(
"object of type '{}' does have a _mixture_ or _unitary_ "
"method, but it returned NotImplemented.".format(type(val))
)
| def mixture(
val: Any, default: Any = RaiseTypeErrorIfNotProvided
) -> Union[Sequence[Tuple[float, np.ndarray]], TDefault]:
"""Return a sequence of tuples representing a probabilistic unitary.
A mixture is described by an iterable of tuples of the form
(probability of unitary, unitary as numpy array)
The probability components of the tuples must sum to 1.0 and be
non-negative.
Determines the mixture representation of `val` by the following strategies:
1. Try to use `val._mixture_()`.
Case a) Method not present or returns `None`.
Continue to next strategy.
Case b) Returns the Mixture representation.
Return the result.
2. Try to use `unitary()`.
Case a) Method not present or returns `None`.
Continue to next strategy.
Case b) Method returns a unitary.
Convert unitary into mixture and return.
3. Try to use serial concatenation recursively.
Case a) One or more decomposed operators doesn't have mixture.
`val` does not have a mixture representation.
Case b) All decomposed operators have mixture representation.
Serially concatenate and return the result using superoperator
as intermediate.
Args:
val: The value to decompose into a mixture of unitaries.
default: A default value if val does not support mixture.
Returns:
An iterable of tuples of size 2. The first element of the tuple is a
probability (between 0 and 1) and the second is the object that occurs
with that probability in the mixture. The probabilities will sum to 1.0.
Raises:
TypeError: If `val` has no `_mixture_` or `_unitary_` mehod, or if it
does and this method returned `NotImplemented`.
"""
mixture_result = _gettr_helper(val, ['_mixture_'])
if mixture_result is not None and mixture_result is not NotImplemented:
return mixture_result
unitary_result = unitary_protocol.unitary(val, None)
if unitary_result is not None and unitary_result is not NotImplemented:
return ((1.0, unitary_result),)
decomposed, qubits, _ = _try_decompose_into_operations_and_qubits(val)
# serial concatenation
if decomposed is not None and decomposed != [val] and decomposed != []:
if all([has_mixture(x) for x in decomposed]):
superoperator_list = [_moment_superoperator(x, qubits, None) for x in decomposed]
if not any([x is None for x in superoperator_list]):
superoperator_result = reduce(lambda x, y: x @ y, superoperator_list)
return tuple(_superoperator_to_mixture(superoperator_result))
if default is not RaiseTypeErrorIfNotProvided:
return default
if _gettr_helper(val, ['_unitary_', '_mixture_']) is None:
raise TypeError(f"object of type '{type(val)}' has no _mixture_ or _unitary_ method.")
raise TypeError(
"object of type '{}' does have a _mixture_ or _unitary_ "
"method, but it returned NotImplemented.".format(type(val))
)
|
23,257 | def test_domain_cpp_ast_attributes():
# style: C++
check('member', '[[]] int f', {1: 'f__i', 2: '1f'})
check('member', '[ [ ] ] int f', {1: 'f__i', 2: '1f'},
# this will fail when the proper grammar is implemented
output='[[ ]] int f')
check('member', '[[a]] int f', {1: 'f__i', 2: '1f'})
# style: GNU
check('member', '__attribute__(()) int f', {1: 'f__i', 2: '1f'})
check('member', '__attribute__((a)) int f', {1: 'f__i', 2: '1f'})
check('member', '__attribute__((a, b)) int f', {1: 'f__i', 2: '1f'})
check('member', '__attribute__((optimize(3))) int f', {1: 'f__i', 2: '1f'})
check('member', '__attribute__((format(printf, 1, 2))) int f', {1: 'f__i', 2: '1f'})
# style: user-defined id
check('member', 'id_attr int f', {1: 'f__i', 2: '1f'})
# style: user-defined paren
check('member', 'paren_attr() int f', {1: 'f__i', 2: '1f'})
check('member', 'paren_attr(a) int f', {1: 'f__i', 2: '1f'})
check('member', 'paren_attr("") int f', {1: 'f__i', 2: '1f'})
check('member', 'paren_attr(()[{}][]{}) int f', {1: 'f__i', 2: '1f'})
with pytest.raises(DefinitionError):
parse('member', 'paren_attr(() int f')
with pytest.raises(DefinitionError):
parse('member', 'paren_attr([) int f')
with pytest.raises(DefinitionError):
parse('member', 'paren_attr({) int f')
with pytest.raises(DefinitionError):
parse('member', 'paren_attr([)]) int f')
with pytest.raises(DefinitionError):
parse('member', 'paren_attr((])) int f')
with pytest.raises(DefinitionError):
parse('member', 'paren_attr({]}) int f')
# position: decl specs
check('function', 'static inline __attribute__(()) void f()',
{1: 'f', 2: '1fv'},
output='__attribute__(()) static inline void f()')
check('function', '[[attr1]] [[attr2]] void f()', {1: 'f', 2: '1fv'})
# position: declarator
check('member', 'int *[[attr]] i', {1: 'i__iP', 2: '1i'})
check('member', 'int *const [[attr]] volatile i', {1: 'i__iPVC', 2: '1i'},
output='int *[[attr]] volatile const i')
check('member', 'int &[[attr]] i', {1: 'i__iR', 2: '1i'})
check('member', 'int *[[attr]] *i', {1: 'i__iPP', 2: '1i'})
# position: parameters and qualifiers
check('function', 'void f() [[attr1]] [[attr2]]', {1: 'f', 2: '1fv'})
# position: class
check('class', '{key}[[nodiscard]] Foo', {1: 'Foo', 2: '3Foo', 3: '3Foo', 4: '3Foo'},
key='class')
check('union', '{key}[[nodiscard]] Foo', {1: None, 2: '3Foo', 3: '3Foo', 4: '3Foo'},
key='union')
# position: enum
check('enum', '{key}[[nodiscard]] Foo', {1: None, 2: '3Foo', 3: '3Foo', 4: '3Foo'},
key='enum')
| def test_domain_cpp_ast_attributes():
# style: C++
check('member', '[[]] int f', {1: 'f__i', 2: '1f'})
check('member', '[ [ ] ] int f', {1: 'f__i', 2: '1f'},
# this will fail when the proper grammar is implemented
output='[[ ]] int f')
check('member', '[[a]] int f', {1: 'f__i', 2: '1f'})
# style: GNU
check('member', '__attribute__(()) int f', {1: 'f__i', 2: '1f'})
check('member', '__attribute__((a)) int f', {1: 'f__i', 2: '1f'})
check('member', '__attribute__((a, b)) int f', {1: 'f__i', 2: '1f'})
check('member', '__attribute__((optimize(3))) int f', {1: 'f__i', 2: '1f'})
check('member', '__attribute__((format(printf, 1, 2))) int f', {1: 'f__i', 2: '1f'})
# style: user-defined id
check('member', 'id_attr int f', {1: 'f__i', 2: '1f'})
# style: user-defined paren
check('member', 'paren_attr() int f', {1: 'f__i', 2: '1f'})
check('member', 'paren_attr(a) int f', {1: 'f__i', 2: '1f'})
check('member', 'paren_attr("") int f', {1: 'f__i', 2: '1f'})
check('member', 'paren_attr(()[{}][]{}) int f', {1: 'f__i', 2: '1f'})
with pytest.raises(DefinitionError):
parse('member', 'paren_attr(() int f')
with pytest.raises(DefinitionError):
parse('member', 'paren_attr([) int f')
with pytest.raises(DefinitionError):
parse('member', 'paren_attr({) int f')
with pytest.raises(DefinitionError):
parse('member', 'paren_attr([)]) int f')
with pytest.raises(DefinitionError):
parse('member', 'paren_attr((])) int f')
with pytest.raises(DefinitionError):
parse('member', 'paren_attr({]}) int f')
# position: decl specs
check('function', 'static inline __attribute__(()) void f()',
{1: 'f', 2: '1fv'},
output='__attribute__(()) static inline void f()')
check('function', '[[attr1]] [[attr2]] void f()', {1: 'f', 2: '1fv'})
# position: declarator
check('member', 'int *[[attr]] i', {1: 'i__iP', 2: '1i'})
check('member', 'int *const [[attr]] volatile i', {1: 'i__iPVC', 2: '1i'},
output='int *[[attr]] volatile const i')
check('member', 'int &[[attr]] i', {1: 'i__iR', 2: '1i'})
check('member', 'int *[[attr]] *i', {1: 'i__iPP', 2: '1i'})
# position: parameters and qualifiers
check('function', 'void f() [[attr1]] [[attr2]]', {1: 'f', 2: '1fv'})
# position: class
check('class', '{key}[[nodiscard]] Foo', {1: 'Foo', 2: '3Foo', 3: '3Foo', 4: '3Foo'},
key='class')
check('union', '{key}[[nodiscard]] Foo', {1: None, 2: '3Foo', 3: '3Foo', 4: '3Foo'},
key='union')
# position: enum
check('enum', '{key}[[nodiscard]] Foo', {1: None, 2: '3Foo'}, key='enum')
|
39,567 | def main():
"""
Synchronizes a github repository with a local repository.
"""
logging.basicConfig(
format='[%(asctime)s] %(levelname)s -- %(message)s',
level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Synchronizes a github repository with a local repository.')
parser.add_argument('git_url', help='Url of the repo to sync')
parser.add_argument('--branch_name', default=None, required=False, help='Branch of repo to sync', nargs='?')
parser.add_argument('--repo_dir', default='.', required=False, help='Path to clone repo under', nargs='?')
args = parser.parse_args()
for line in GitPuller(
args.git_url,
args.repo_dir,
branch=args.branch_name
).pull():
print(line)
| def main():
"""
Synchronizes a github repository with a local repository.
"""
logging.basicConfig(
format='[%(asctime)s] %(levelname)s -- %(message)s',
level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Synchronizes a github repository with a local repository.')
parser.add_argument('git_url', help='Url of the repo to sync')
parser.add_argument('--branch_name', default=None, required=False, help='Branch of repo to sync', nargs='?')
parser.add_argument('--repo-dir', default='.', required=False, help='Path to clone repo under', nargs='?')
args = parser.parse_args()
for line in GitPuller(
args.git_url,
args.repo_dir,
branch=args.branch_name
).pull():
print(line)
|
29,224 | def get_exploration_opportunity_summary_from_model(model):
"""Returns the ExplorationOpportunitySummary object out of the model.
Args:
model: ExplorationOpportunitySummaryModel. The exploration opportunity
summary model.
Returns:
ExplorationOpportunitySummary. The corresponding
ExplorationOpportunitySummary object.
"""
# We're making sure that the audio language codes in any exploration
# opportunity domain object match the ones in
# constants.SUPPORTED_AUDIO_LANGUAGES.
set_of_all_languages = set(
model.incomplete_translation_language_codes +
model.language_codes_needing_voice_artists +
model.language_codes_with_assigned_voice_artists)
supported_language_codes = set(
language['id'] for language in constants.SUPPORTED_AUDIO_LANGUAGES)
missing_language_codes = list(
supported_language_codes - set_of_all_languages)
if missing_language_codes:
logging.info(
'Missing language codes %s in exploration opportunity model with '
'id %s' % (missing_language_codes, model.id))
new_incomplete_translation_language_codes = (
model.incomplete_translation_language_codes + missing_language_codes)
translation_in_review_counts = {}
for lc in constants.SUPPORTED_CONTENT_LANGUAGES:
in_review_count = len(
suggestion_models.GeneralSuggestionModel
.get_translation_suggestions_in_review_with_exp_id(
model.id,
lc['code']))
if in_review_count > 0:
translation_in_review_counts[lc['code']] = in_review_count
return opportunity_domain.ExplorationOpportunitySummary(
model.id, model.topic_id, model.topic_name, model.story_id,
model.story_title, model.chapter_title, model.content_count,
new_incomplete_translation_language_codes, model.translation_counts,
model.language_codes_needing_voice_artists,
model.language_codes_with_assigned_voice_artists,
translation_in_review_counts)
| def get_exploration_opportunity_summary_from_model(model):
"""Returns the ExplorationOpportunitySummary object out of the model.
Args:
model: ExplorationOpportunitySummaryModel. The exploration opportunity
summary model.
Returns:
ExplorationOpportunitySummary. The corresponding
ExplorationOpportunitySummary object.
"""
# We're making sure that the audio language codes in any exploration
# opportunity domain object match the ones in
# constants.SUPPORTED_AUDIO_LANGUAGES.
set_of_all_languages = set(
model.incomplete_translation_language_codes +
model.language_codes_needing_voice_artists +
model.language_codes_with_assigned_voice_artists)
supported_language_codes = set(
language['id'] for language in constants.SUPPORTED_AUDIO_LANGUAGES)
missing_language_codes = list(
supported_language_codes - set_of_all_languages)
if missing_language_codes:
logging.info(
'Missing language codes %s in exploration opportunity model with '
'id %s' % (missing_language_codes, model.id))
new_incomplete_translation_language_codes = (
model.incomplete_translation_language_codes + missing_language_codes)
translation_in_review_counts = {}
for language_code in constants.SUPPORTED_CONTENT_LANGUAGES:
in_review_count = len(
suggestion_models.GeneralSuggestionModel
.get_translation_suggestions_in_review_with_exp_id(
model.id,
lc['code']))
if in_review_count > 0:
translation_in_review_counts[lc['code']] = in_review_count
return opportunity_domain.ExplorationOpportunitySummary(
model.id, model.topic_id, model.topic_name, model.story_id,
model.story_title, model.chapter_title, model.content_count,
new_incomplete_translation_language_codes, model.translation_counts,
model.language_codes_needing_voice_artists,
model.language_codes_with_assigned_voice_artists,
translation_in_review_counts)
|
11,576 | def scan_files(dirname):
"""
Scan pair of bin/meta files and return information about it.
"""
info_list = []
for root, dirs, files in os.walk(dirname):
for file in files:
if not file.endswith('.meta'):
continue
meta_filename = Path(root) / file
bin_filename = Path(root) / file.replace('.meta', '.bin')
meta = read_meta_file(meta_filename)
num_chan = int(meta['nSavedChans'])
# when file is Noise4Sam_g0_t0.nidq.bin or Noise4Sam_g0_t0.imec0.lf.bin
# name is the first part "Noise4Sam_g0_t0"
# gtX X is the seg_index here 0
# nidq or imec0 is the device
# lf or ap is "signal_kind"
# stream_name = device + signal_kind
name = file.split('.')[0]
r = re.findall(r'_g(\d*)_t', name)
seg_index = int(r[0][0])
device = file.split('.')[1]
if 'imec' in device:
signal_kind = file.split('.')[2]
stream_name = device + '.' + signal_kind
units = 'uV'
# please note the 1e6 in gain for this uV
# metad['imroTbl'] contain two gain per channel AP and LF
# except for the last fake channel
per_channel_gain = np.ones(num_chan, dtype='float64')
if signal_kind == 'ap':
index_imroTbl = 3
elif signal_kind == 'lf':
index_imroTbl = 4
for c in range(num_chan - 1):
# the last channel don't have gain
per_channel_gain[c] = 1. / float(meta['imroTbl'][c].split(' ')[index_imroTbl])
gain_factor = float(meta['imAiRangeMax']) / 512
channel_gains = per_channel_gain * gain_factor * 1e6
else:
signal_kind = ''
stream_name = device
units = 'V'
channel_gains = np.ones(num_chan)
# there differents kind of channel with diffrents gain
mn, ma, xa, dw = [int(e) for e in meta['snsMnMaXaDw'].split(sep=',')]
per_channel_gain = np.ones(num_chan, dtype='float64')
per_channel_gain[0:mn] = float(meta['niMNGain'])
per_channel_gain[mn:mn + ma] = float(meta['niMAGain'])
gain_factor = float(meta['niAiRangeMax']) / 32768
channel_gains = per_channel_gain * gain_factor
info = {}
info['name'] = name
info['meta'] = meta
info['bin_file'] = str(bin_filename)
for k in ('niSampRate', 'imSampRate'):
if k in meta:
info['sampling_rate'] = float(meta[k])
info['num_chan'] = num_chan
info['sample_length'] = int(meta['fileSizeBytes']) // 2 // num_chan
info['seg_index'] = seg_index
info['device'] = device
info['signal_kind'] = signal_kind
info['stream_name'] = stream_name
info['units'] = units
info['channel_names'] = [txt.split(';')[0] for txt in meta['snsChanMap']]
info['channel_gains'] = channel_gains
info['channel_offsets'] = np.zeros(info['num_chan'])
if signal_kind == 'ap':
channel_location = []
for e in meta['snsShankMap']:
x_pos = int(e.split(':')[1])
y_pos = int(e.split(':')[2])
channel_location.append([x_pos, y_pos])
info['channel_location'] = np.array(channel_location)
info_list.append(info)
return info_list
| def scan_files(dirname):
"""
Scan for pairs of `.bin` and `.meta` files and return information about it.
"""
info_list = []
for root, dirs, files in os.walk(dirname):
for file in files:
if not file.endswith('.meta'):
continue
meta_filename = Path(root) / file
bin_filename = Path(root) / file.replace('.meta', '.bin')
meta = read_meta_file(meta_filename)
num_chan = int(meta['nSavedChans'])
# when file is Noise4Sam_g0_t0.nidq.bin or Noise4Sam_g0_t0.imec0.lf.bin
# name is the first part "Noise4Sam_g0_t0"
# gtX X is the seg_index here 0
# nidq or imec0 is the device
# lf or ap is "signal_kind"
# stream_name = device + signal_kind
name = file.split('.')[0]
r = re.findall(r'_g(\d*)_t', name)
seg_index = int(r[0][0])
device = file.split('.')[1]
if 'imec' in device:
signal_kind = file.split('.')[2]
stream_name = device + '.' + signal_kind
units = 'uV'
# please note the 1e6 in gain for this uV
# metad['imroTbl'] contain two gain per channel AP and LF
# except for the last fake channel
per_channel_gain = np.ones(num_chan, dtype='float64')
if signal_kind == 'ap':
index_imroTbl = 3
elif signal_kind == 'lf':
index_imroTbl = 4
for c in range(num_chan - 1):
# the last channel don't have gain
per_channel_gain[c] = 1. / float(meta['imroTbl'][c].split(' ')[index_imroTbl])
gain_factor = float(meta['imAiRangeMax']) / 512
channel_gains = per_channel_gain * gain_factor * 1e6
else:
signal_kind = ''
stream_name = device
units = 'V'
channel_gains = np.ones(num_chan)
# there differents kind of channel with diffrents gain
mn, ma, xa, dw = [int(e) for e in meta['snsMnMaXaDw'].split(sep=',')]
per_channel_gain = np.ones(num_chan, dtype='float64')
per_channel_gain[0:mn] = float(meta['niMNGain'])
per_channel_gain[mn:mn + ma] = float(meta['niMAGain'])
gain_factor = float(meta['niAiRangeMax']) / 32768
channel_gains = per_channel_gain * gain_factor
info = {}
info['name'] = name
info['meta'] = meta
info['bin_file'] = str(bin_filename)
for k in ('niSampRate', 'imSampRate'):
if k in meta:
info['sampling_rate'] = float(meta[k])
info['num_chan'] = num_chan
info['sample_length'] = int(meta['fileSizeBytes']) // 2 // num_chan
info['seg_index'] = seg_index
info['device'] = device
info['signal_kind'] = signal_kind
info['stream_name'] = stream_name
info['units'] = units
info['channel_names'] = [txt.split(';')[0] for txt in meta['snsChanMap']]
info['channel_gains'] = channel_gains
info['channel_offsets'] = np.zeros(info['num_chan'])
if signal_kind == 'ap':
channel_location = []
for e in meta['snsShankMap']:
x_pos = int(e.split(':')[1])
y_pos = int(e.split(':')[2])
channel_location.append([x_pos, y_pos])
info['channel_location'] = np.array(channel_location)
info_list.append(info)
return info_list
|
41,175 | def _gen_gray_code(n: int):
"""Generate the Gray Code from 0 to 2^n-1.
Each iteration returns two elements. The first element is the decimal representation
of the gray code and the second one is the position of bits flipped for next gray code.
"""
gray_code = 0
for i in range(1, 2 ** n):
next_gray = i ^ (i >> 1)
bit_flip = int(np.log2(gray_code ^ next_gray))
yield gray_code, bit_flip
gray_code = next_gray
yield gray_code, int(np.log2(gray_code))
| def _gen_gray_code(n: int):
"""Generate the Gray Code from 0 to 2^n-1.
Each iteration yields a two-tuple, `(gray_code, bit_flip)`. `gray_code` is the decimal representation
of the gray code and the second one is the position of bits flipped for next gray code.
"""
gray_code = 0
for i in range(1, 2 ** n):
next_gray = i ^ (i >> 1)
bit_flip = int(np.log2(gray_code ^ next_gray))
yield gray_code, bit_flip
gray_code = next_gray
yield gray_code, int(np.log2(gray_code))
|
13,812 | def _get_event_properties(request, data):
"""
set event properties for course and program which are required in braze email template
"""
lms_url = configuration_helpers.get_value('LMS_ROOT_URL', settings.LMS_ROOT_URL)
event_properties = {
'time': datetime.now().isoformat(),
'name': 'user.send.save.for.later.email',
}
if data.get('type') == 'course':
course = data.get('course')
data = request.data
org_img_url = data.get('org_img_url')
marketing_url = data.get('marketing_url')
event_properties.update({
'properties': {
'course_image_url': '{base_url}{image_path}'.format(
base_url=lms_url, image_path=course.course_image_url
),
'partner_image_url': org_img_url,
'enroll_course_url': '{base_url}/register?course_id={course_id}&enrollment_action=enroll&email_opt_in='
'false&save_for_later=true'.format(base_url=lms_url, course_id=course.id),
'view_course_url': marketing_url + '?save_for_later=true' if marketing_url else '#',
'display_name': course.display_name,
'short_description': course.short_description,
'weeks_to_complete': data.get('weeks_to_complete'),
'min_effort': data.get('min_effort'),
'max_effort': data.get('max_effort'),
'pacing_type': 'Self-paced' if course.self_paced else 'Instructor-paced',
'type': 'course',
}
})
if data.get('type') == 'program':
program = data.get('program')
event_properties.update({
'properties': {
'program_image_url': program.get('card_image_url'),
'partner_image_url': program.get('authoring_organizations')[0].get('logo_image_url') if program.get(
'authoring_organizations') else None,
'view_program_url': program.get('marketing_url') + '?save_for_later=true' if program.get(
'marketing_url') else '#',
'title': program.get('title'),
'education_level': program.get('type'),
'total_courses': len(program.get('courses')) if program.get('courses') else 0,
'weeks_to_complete': program.get('weeks_to_complete'),
'min_effort': program.get('min_hours_effort_per_week'),
'max_effort': program.get('max_hours_effort_per_week'),
'pacing_type': _get_program_pacing(program.get('courses')[0].get('course_runs')),
'price': format_price(int(program.get('price_ranges')[0].get('total')), 0),
'registered': bool(program.get('type') in ['MicroMasters', 'MicroBachelors']),
'type': 'program',
}
})
return event_properties
| def _get_event_properties(request, data):
"""
set event properties for course and program which are required in braze email template
"""
lms_url = configuration_helpers.get_value('LMS_ROOT_URL', settings.LMS_ROOT_URL)
event_properties = {
'time': datetime.now().isoformat(),
'name': 'user.send.save.for.later.email',
}
if data.get('type') == 'course':
course = data.get('course')
data = request.data
org_img_url = data.get('org_img_url')
marketing_url = data.get('marketing_url')
event_properties.update({
'properties': {
'course_image_url': '{base_url}{image_path}'.format(
base_url=lms_url, image_path=course.course_image_url
),
'partner_image_url': org_img_url,
'enroll_course_url': '{base_url}/register?course_id={course_id}&enrollment_action=enroll&email_opt_in='
'false&save_for_later=true'.format(base_url=lms_url, course_id=course.id),
'view_course_url': marketing_url + '?save_for_later=true' if marketing_url else '#',
'display_name': course.display_name,
'short_description': course.short_description,
'weeks_to_complete': data.get('weeks_to_complete'),
'min_effort': data.get('min_effort'),
'max_effort': data.get('max_effort'),
'pacing_type': 'Self-paced' if course.self_paced else 'Instructor-led',
'type': 'course',
}
})
if data.get('type') == 'program':
program = data.get('program')
event_properties.update({
'properties': {
'program_image_url': program.get('card_image_url'),
'partner_image_url': program.get('authoring_organizations')[0].get('logo_image_url') if program.get(
'authoring_organizations') else None,
'view_program_url': program.get('marketing_url') + '?save_for_later=true' if program.get(
'marketing_url') else '#',
'title': program.get('title'),
'education_level': program.get('type'),
'total_courses': len(program.get('courses')) if program.get('courses') else 0,
'weeks_to_complete': program.get('weeks_to_complete'),
'min_effort': program.get('min_hours_effort_per_week'),
'max_effort': program.get('max_hours_effort_per_week'),
'pacing_type': _get_program_pacing(program.get('courses')[0].get('course_runs')),
'price': format_price(int(program.get('price_ranges')[0].get('total')), 0),
'registered': bool(program.get('type') in ['MicroMasters', 'MicroBachelors']),
'type': 'program',
}
})
return event_properties
|
31,936 | def checkpoint_delete_objects_batch_command(client: Client, object_type: str, name):
context_data = {}
readable_output = ''
object_names = argToList(name)
objects_to_delete = [{'name': object_name} for object_name in object_names]
result = current_result = client.delete_objects_batch(object_type, objects_to_delete)
if result:
context_data = {'task-id': result.get('task-id')}
readable_output = tableToMarkdown('CheckPoint data for delete-objects-batch command:',
context_data)
command_results = CommandResults(
outputs_prefix='CheckPoint.delete_objects_batch',
outputs_key_field='uid',
readable_output=readable_output,
outputs=context_data,
raw_response=result
)
return command_results
| def checkpoint_delete_objects_batch_command(client: Client, object_type: str, name):
context_data = {}
readable_output = ''
object_names = argToList(name)
objects_to_delete = [{'name': object_name} for object_name in object_names]
result = current_result = client.delete_objects_batch(object_type, objects_to_delete)
if result:
context_data = {'task-id': result.get('task-id')}
readable_output = tableToMarkdown('CheckPoint data for delete-objects-batch command:',
context_data)
command_results = CommandResults(
outputs_prefix='CheckPoint.delete_objects_batch',
outputs_key_field='task-id',
readable_output=readable_output,
outputs=context_data,
raw_response=result
)
return command_results
|
33,019 | def parse_poi_query(north, south, east, west, tags=None, timeout=180, maxsize=''):
"""
Construct an Overpass QL query to load POIs with certain tags.
By default, queries all features with an amenity tag.
Parameters
----------
north : float
Northernmost coordinate from bounding box of the search area.
south : float
Southernmost coordinate from bounding box of the search area.
east : float
Easternmost coordinate from bounding box of the search area.
west : float
Westernmost coordinate of the bounding box of the search area.
tags : dict
Dictionary of tag keys and values that will be used for finding POIs in the selected area.
Keys may be strings or lists of strings.
Values make be string, lists of strings, or None, if all values should be returned for a given key.
By default, all POIs with an 'amenity' key of any value will be be returned.
timeout : int
Timeout for the API request.
"""
# build default tags
if not tags:
tags = {'amenity':True}
# define templates for objects and extents
object_template = '({object_type}[{{keys}}{{op}}"{{values}}"]{{extent}});'
# object_template = '({object_type}[~"^({{keys}})$"{{op}}"{{values}}"]{{extent}});'
re_keys_template = '~"^({keys})$"'
single_key_template = '"{key}"'
extent_template = '({south:.6f},{west:.6f},{north:.6f},{east:.6f});(._;>;);'
extent = extent_template.format(south=south, west=west, north=north, east=east)
# initate query string
query_template = "[out:json][timeout:{timeout}]{maxsize};("
query_str = query_template.format(timeout=timeout, maxsize=maxsize)
# add statements for each object type
# templates = [object_template.format(object_type=x) for x in ['node','way','relation']]
templates = [object_template.format(object_type=x) for x in ['nwr']]
for template in templates:
# add statements for each key
for keys, values in tags.items():
# ensure keys is a list
keys = [keys] if not isinstance(keys, list) else keys
if values == True:
# get features with any value for these keys
# add positive statement with multiple keys and no specific values
query_str += template.format(keys=re_keys_template.format(keys='|'.join(keys)), values='.*', extent=extent, op='~')
elif values == False:
# get features wihout these keys, not matter their values
for key in keys:
# add negative statement with multiple keys and no specific values
# can only be added one at a time withough key regex
query_str += template.format(keys=single_key_template.format(key=key), values='.*', extent=extent, op='!~')
else:
# get features with specified values for these keys
# ensure values is a list
values = [values] if not isinstance(values, list) else values
# add positive statement with multiple keys in specific values
query_str += template.format(keys='{}'.format('|'.join(keys)), values='|'.join(values), extent=extent, op='~')
# terminate query string
query_str += ");out;"
return query_str
| def parse_poi_query(north, south, east, west, tags=None, timeout=180, maxsize=''):
"""
Construct an Overpass QL query to load POIs with certain tags.
By default, queries all features with an amenity tag.
Parameters
----------
north : float
Northernmost coordinate from bounding box of the search area.
south : float
Southernmost coordinate from bounding box of the search area.
east : float
Easternmost coordinate from bounding box of the search area.
west : float
Westernmost coordinate of the bounding box of the search area.
tags : dict
Dictionary of tag keys and values that will be used for finding POIs in the selected area.
Keys may be strings or lists of strings.
Values make be string, lists of strings, or None, if all values should be returned for a given key.
By default, all POIs with an 'amenity' key of any value will be be returned.
timeout : int
Timeout for the API request.
"""
# build default tags
if tags is not None:
tags = {'amenity':True}
# define templates for objects and extents
object_template = '({object_type}[{{keys}}{{op}}"{{values}}"]{{extent}});'
# object_template = '({object_type}[~"^({{keys}})$"{{op}}"{{values}}"]{{extent}});'
re_keys_template = '~"^({keys})$"'
single_key_template = '"{key}"'
extent_template = '({south:.6f},{west:.6f},{north:.6f},{east:.6f});(._;>;);'
extent = extent_template.format(south=south, west=west, north=north, east=east)
# initate query string
query_template = "[out:json][timeout:{timeout}]{maxsize};("
query_str = query_template.format(timeout=timeout, maxsize=maxsize)
# add statements for each object type
# templates = [object_template.format(object_type=x) for x in ['node','way','relation']]
templates = [object_template.format(object_type=x) for x in ['nwr']]
for template in templates:
# add statements for each key
for keys, values in tags.items():
# ensure keys is a list
keys = [keys] if not isinstance(keys, list) else keys
if values == True:
# get features with any value for these keys
# add positive statement with multiple keys and no specific values
query_str += template.format(keys=re_keys_template.format(keys='|'.join(keys)), values='.*', extent=extent, op='~')
elif values == False:
# get features wihout these keys, not matter their values
for key in keys:
# add negative statement with multiple keys and no specific values
# can only be added one at a time withough key regex
query_str += template.format(keys=single_key_template.format(key=key), values='.*', extent=extent, op='!~')
else:
# get features with specified values for these keys
# ensure values is a list
values = [values] if not isinstance(values, list) else values
# add positive statement with multiple keys in specific values
query_str += template.format(keys='{}'.format('|'.join(keys)), values='|'.join(values), extent=extent, op='~')
# terminate query string
query_str += ");out;"
return query_str
|
7,541 | def test_qtable_quantity_int_conversion():
"""Ensure the behavior when converting ``int`` ``Column`` to ``Quantity``, the dtype will be ``float``.
See https://github.com/astropy/astropy/issues/10964 for the rationale
"""
tab = QTable(dict(time=[1, 2, 3]))
tab['length'] = [9, 8, 7]
with pytest.warns(UserWarning, match="dtype is converted to float"):
tab['length'].unit = u.m
assert np.issubdtype(tab['length'].dtype, np.float)
# same for dimensionless unit
tab['col2'] = [6, 5, 4]
with pytest.warns(UserWarning, match="dtype is converted to float"):
tab['col2'].unit = u.dimensionless_unscaled
assert np.issubdtype(tab['col2'].dtype, np.float)
# An implied behavior is that when QTable reads a file with a Column of int data with units,
# it gets converted to float as well.
# See: https://github.com/astropy/astropy/pull/10950#issuecomment-718117133
# can't use in-memory IO, e.g., io.BytesIO - fits IO can't handle it.
filename = 'test_qtable_quantity_int_conversion.fits'
try:
Table([Column(np.arange(3), unit=u.m, name='length')]).write(filename, overwrite=True)
tab = table.QTable.read(filename)
assert np.issubdtype(tab['length'].dtype, np.float)
finally:
if os.path.isfile(filename):
os.remove(filename)
# Ensure warnings only happen in column update, but not column add
# - case adding a column
tab = QTable(dict(time=[1, 2, 3]))
tab['length'] = Column([9, 8, 7], unit=u.m)
# - case initial creation
tab = QTable([[1, 2, 3]], names=['time'], units=[u.m])
| def test_qtable_quantity_int_conversion():
"""Ensure the behavior when converting ``int`` ``Column`` to ``Quantity``, the dtype will be ``float``.
See https://github.com/astropy/astropy/issues/10964 for the rationale
"""
tab = QTable(dict(time=[1, 2, 3]))
tab['length'] = [9, 8, 7]
with pytest.warns(UserWarning, match="dtype is converted to float"):
tab['length'].unit = u.m
assert np.issubdtype(tab['length'].dtype, np.float)
# same for dimensionless unit
tab['col2'] = [6, 5, 4]
with pytest.warns(AstropyUserWarning, match="dtype is converted to float"):
tab['col2'].unit = u.dimensionless_unscaled
assert np.issubdtype(tab['col2'].dtype, np.float)
# An implied behavior is that when QTable reads a file with a Column of int data with units,
# it gets converted to float as well.
# See: https://github.com/astropy/astropy/pull/10950#issuecomment-718117133
# can't use in-memory IO, e.g., io.BytesIO - fits IO can't handle it.
filename = 'test_qtable_quantity_int_conversion.fits'
try:
Table([Column(np.arange(3), unit=u.m, name='length')]).write(filename, overwrite=True)
tab = table.QTable.read(filename)
assert np.issubdtype(tab['length'].dtype, np.float)
finally:
if os.path.isfile(filename):
os.remove(filename)
# Ensure warnings only happen in column update, but not column add
# - case adding a column
tab = QTable(dict(time=[1, 2, 3]))
tab['length'] = Column([9, 8, 7], unit=u.m)
# - case initial creation
tab = QTable([[1, 2, 3]], names=['time'], units=[u.m])
|
27,737 | def get_dirs_from_args(args: Iterable[str]) -> List[Path]:
def is_option(x: str) -> bool:
return x.startswith("-")
def get_file_part_from_node_id(x: str) -> str:
return x.split("::")[0]
def get_dir_from_path(path: Path) -> Path:
if path.is_dir():
return path
return path.parent
# These look like paths but may not exist
possible_paths = (
Path(get_file_part_from_node_id(arg)).resolve()
for arg in args
if not is_option(arg)
)
return [get_dir_from_path(path) for path in possible_paths if path.exists()]
| def get_dirs_from_args(args: Iterable[str]) -> List[Path]:
def is_option(x: str) -> bool:
return x.startswith("-")
def get_file_part_from_node_id(x: str) -> str:
return x.split("::")[0]
def get_dir_from_path(path: Path) -> Path:
if path.is_dir():
return path
return path.parent
# These look like paths but may not exist
possible_paths = (
Path(get_file_part_from_node_id(arg)).absolute()
for arg in args
if not is_option(arg)
)
return [get_dir_from_path(path) for path in possible_paths if path.exists()]
|
51,513 | def get_github_auth_url(next_path):
if settings.MULTI_ORG:
github_auth_url = url_for('github_oauth.authorize_org', next=next_path, org_slug=current_org.slug)
else:
github_auth_url = url_for('github_oauth.authorize', next=next_path)
return github_auth_url
| def get_github_auth_url(next_path):
kwargs = {"org_slug": current_org.slug} if settings.MULTI_ORG else {}
return url_for('github_oauth.authorize_org', next=next_path, **kwargs)
|
57,835 | def fetch_incidents(query, id_offset, should_get_attachments, should_get_comments, should_mirror_in, should_mirror_out,
comment_tag, attachment_tag, fetch_by_created=None):
last_run = demisto.getLastRun()
demisto.debug(f'last_run: {last_run}' if last_run else 'last_run is empty')
last_created_time = ''
if last_run:
id_offset = last_run.get('idOffset') if last_run.get('idOffset') else ''
last_created_time = last_run.get('lastCreatedTime') if last_run.get('lastCreatedTime') else ''
if not id_offset:
id_offset = 0
incidents, max_results = [], 50
if id_offset:
query = f'{query} AND id >= {id_offset}'
if fetch_by_created:
if last_created_time:
last_issue_time = parse(last_created_time)
minute_to_fetch = last_issue_time - timedelta(minutes=1)
formatted_minute_to_fetch = minute_to_fetch.strftime('%Y-%m-%d %H:%M')
query = f'{query} AND created>={formatted_minute_to_fetch}'
else:
query = f'{query} AND created>-1m'
res = run_query(query, '', max_results)
if res:
curr_id = id_offset
for ticket in res.get('issues'):
ticket_id = int(ticket.get('id'))
ticket_created = ticket.get('created')
if ticket_id <= curr_id:
continue
if ticket_id > id_offset:
id_offset = ticket_id
last_created_time = ticket_created
id_offset = max(int(id_offset), ticket_id)
incidents.append(create_incident_from_ticket(ticket, should_get_attachments, should_get_comments,
should_mirror_in, should_mirror_out, comment_tag,
attachment_tag))
demisto.setLastRun({'idOffset': id_offset, 'lastCreatedTime': last_created_time})
return incidents
| def fetch_incidents(query, id_offset, should_get_attachments, should_get_comments, should_mirror_in, should_mirror_out,
comment_tag, attachment_tag, fetch_by_created=None):
last_run = demisto.getLastRun()
demisto.debug(f'last_run: {last_run}' if last_run else 'last_run is empty')
last_created_time = ''
if last_run:
id_offset = last_run.get('idOffset', '')
last_created_time = last_run.get('lastCreatedTime', '')
id_offset = last_run.get('idOffset') or ''
last_created_time = last_run.get('lastCreatedTime') or ''
if not id_offset:
id_offset = 0
incidents, max_results = [], 50
if id_offset:
query = f'{query} AND id >= {id_offset}'
if fetch_by_created:
if last_created_time:
last_issue_time = parse(last_created_time)
minute_to_fetch = last_issue_time - timedelta(minutes=1)
formatted_minute_to_fetch = minute_to_fetch.strftime('%Y-%m-%d %H:%M')
query = f'{query} AND created>={formatted_minute_to_fetch}'
else:
query = f'{query} AND created>-1m'
res = run_query(query, '', max_results)
if res:
curr_id = id_offset
for ticket in res.get('issues'):
ticket_id = int(ticket.get('id'))
ticket_created = ticket.get('created')
if ticket_id <= curr_id:
continue
if ticket_id > id_offset:
id_offset = ticket_id
last_created_time = ticket_created
id_offset = max(int(id_offset), ticket_id)
incidents.append(create_incident_from_ticket(ticket, should_get_attachments, should_get_comments,
should_mirror_in, should_mirror_out, comment_tag,
attachment_tag))
demisto.setLastRun({'idOffset': id_offset, 'lastCreatedTime': last_created_time})
return incidents
|
36,252 | def check_presence_download_untar(filename: Path, tarfilename: Path, backup_url):
"""Check if file is present otherwise download and untar."""
if not filename.is_file():
if not tarfilename.is_file():
from .readwrite import _download
_download(backup_url, tarfilename)
import tarfile
tarfile.open(tarfilename).extractall(tarfilename.parents[0])
| def check_presence_download_untar(filename: Path, tarfilename: Path, backup_url):
"""Check if file is present otherwise download and untar."""
if not filename.is_file():
if not tarfilename.is_file():
from .readwrite import _download
_download(backup_url, tarfilename)
import tarfile
tarfile.open(tarfilename).extract(filename, tarfilename.parent)
|
40,940 | def annotate_pages(pages):
"""A generator to annotate pages with nesting information.
This is required to build a nested list of pages from the flat mptree list of pages.
Returns:
tuple(page, nb_open, nb_close, is_empty):
- page: the current page to wrap in a <li> tag,
- nb_open(int): number of opening <ul> tags. Should be 1 when the nesting level
increases, 0 otherwise,
- nb_close(int): number of closing </ul> tags. Should be the gap between one page
and the next when less nested. When reaching the last page we should close all
the tags still open.
- is_empty(boolean): True if the current <li> tag is a leaf, False otherwise. If
the page is not empty, it has children and we should delay closing the <li>.
"""
nb_open = nb_close = 0
nb_pages = len(pages)
for i in range(nb_pages):
page = pages[i]
if i == 0:
nb_open = 0
else:
nb_open = max(pages[i].level - pages[i - 1].level, 0)
is_empty = True
if i == nb_pages - 1:
nb_close = pages[i].level - 1
else:
gap = pages[i].level - pages[i + 1].level
nb_close = max(pages[i].level - pages[i + 1].level, 0)
is_empty = gap >= 0
yield page, nb_open, nb_close, is_empty
| def annotate_pages(pages):
"""A generator to annotate pages with nesting information.
This is required to build a nested list of pages from the flat mptree list of pages.
Returns:
tuple(page, nb_open, nb_close, is_empty):
- page: the current page to wrap in a <li> tag,
- nb_open(int): number of opening <ul> tags. Should be 1 when the nesting level
increases, 0 otherwise,
- nb_close(int): number of closing </ul> tags. Should be the gap between one page
and the next when less nested. When reaching the last page we should close all
the tags still open.
- is_empty(boolean): True if the current <li> tag is a leaf, False otherwise. If
the page is not empty, it has children and we should delay closing the <li>.
"""
nb_open = nb_close = 0
nb_pages = len(pages)
for i in range(nb_pages):
page = pages[i]
if i == 0:
nb_open = 0
else:
nb_open = max(pages[i].level - pages[i - 1].level, 0)
is_empty = True
if i == nb_pages - 1:
nb_close = pages[i].level - 1
else:
gap = pages[i].level - pages[i + 1].level
nb_close = max(gap, 0)
is_empty = gap >= 0
yield page, nb_open, nb_close, is_empty
|
3,602 | def get_praw_config(config: ConfigParser = None) -> ConfigParser:
if config is None:
config = ConfigParser()
config.read(get_praw_ini_potential_locations())
return config
| def get_praw_config(config: ConfigParser = None) -> ConfigParser:
if config is None:
config = ConfigParser()
config.read(get_potential_praw_ini_locations())
return config
|
49,108 | def evalf_add(v: 'Add', prec: int, options: OPT_DICT) -> TMP_RES:
res = pure_complex(v)
if res:
h, c = res
re, _, re_acc, _ = evalf(h, prec, options)
im, _, im_acc, _ = evalf(c, prec, options)
return re, im, re_acc, im_acc
oldmaxprec = options.get('maxprec', DEFAULT_MAXPREC)
i = 0
target_prec = prec
while 1:
options['maxprec'] = min(oldmaxprec, 2*prec)
terms = [evalf(arg, prec + 10, options) for arg in v.args]
n = sum([term.count(S.ComplexInfinity) for term in terms])
if n >= 2:
return fnan, None, prec, None
re, re_acc = add_terms(
[a[0::2] for a in terms if isinstance(a, tuple) and a[0]], prec, target_prec)
im, im_acc = add_terms(
[a[1::2] for a in terms if isinstance(a, tuple) and a[1]], prec, target_prec)
if n == 1:
if re in (finf, fninf, fnan) or im in (finf, fninf, fnan):
return fnan, None, prec, None
return S.ComplexInfinity
acc = complex_accuracy((re, im, re_acc, im_acc))
if acc >= target_prec:
if options.get('verbose'):
print("ADD: wanted", target_prec, "accurate bits, got", re_acc, im_acc)
break
else:
if (prec - target_prec) > options['maxprec']:
break
prec = prec + max(10 + 2**i, target_prec - acc)
i += 1
if options.get('verbose'):
print("ADD: restarting with prec", prec)
options['maxprec'] = oldmaxprec
if iszero(re, scaled=True):
re = scaled_zero(re)
if iszero(im, scaled=True):
im = scaled_zero(im)
return re, im, re_acc, im_acc
| def evalf_add(v: 'Add', prec: int, options: OPT_DICT) -> TMP_RES:
res = pure_complex(v)
if res:
h, c = res
re, _, re_acc, _ = evalf(h, prec, options)
im, _, im_acc, _ = evalf(c, prec, options)
return re, im, re_acc, im_acc
oldmaxprec = options.get('maxprec', DEFAULT_MAXPREC)
i = 0
target_prec = prec
while 1:
options['maxprec'] = min(oldmaxprec, 2*prec)
terms = [evalf(arg, prec + 10, options) for arg in v.args]
# remove any None so they won't be sympified during comparison
n = filter(None, terms).count(S.ComplexInfinity)
if n >= 2:
return fnan, None, prec, None
re, re_acc = add_terms(
[a[0::2] for a in terms if isinstance(a, tuple) and a[0]], prec, target_prec)
im, im_acc = add_terms(
[a[1::2] for a in terms if isinstance(a, tuple) and a[1]], prec, target_prec)
if n == 1:
if re in (finf, fninf, fnan) or im in (finf, fninf, fnan):
return fnan, None, prec, None
return S.ComplexInfinity
acc = complex_accuracy((re, im, re_acc, im_acc))
if acc >= target_prec:
if options.get('verbose'):
print("ADD: wanted", target_prec, "accurate bits, got", re_acc, im_acc)
break
else:
if (prec - target_prec) > options['maxprec']:
break
prec = prec + max(10 + 2**i, target_prec - acc)
i += 1
if options.get('verbose'):
print("ADD: restarting with prec", prec)
options['maxprec'] = oldmaxprec
if iszero(re, scaled=True):
re = scaled_zero(re)
if iszero(im, scaled=True):
im = scaled_zero(im)
return re, im, re_acc, im_acc
|
43,315 | def _default_if_none(value, default, name, ensure_not_none=True):
value = value if value is not None else default
if ensure_not_none and value is None:
raise ValueError(
f"{name}: expected a value to be specified in either `__init__` or `run`, found: None."
)
return value
| def _default_if_none(value, default, name, ensure_not_none=True):
value = value if value is not None else default
if ensure_not_none and value is None:
raise ValueError(
f"{name}: expected a value to be specified in either `__init__` or `run`, found None in both"
)
return value
|
31,691 | def remove_unwanted_keys(response_data: Dict[str, List], keys: List[str]) -> None:
"""
Removes all keys that were not specified by the user as the desired keys to return
:type response_data: ``Dict[str, List]``
:param response_data: The data key from the API's response
:type keys: ``List[str]``
:param keys: IOC Types to return
"""
keys_list = list(response_data.keys())
for ioc_type in keys_list:
if ioc_type not in keys or not response_data[ioc_type]:
del response_data[ioc_type]
| def remove_unwanted_keys(response_data: Dict[str, List], keys: List[str]) -> None:
"""
Removes all keys that were not specified by the user as the desired keys to return
:type response_data: ``Dict[str, List]``
:param response_data: The data key from the API's response
:type keys: ``List[str]``
:param keys: IOC Types to return
"""
keys_list = response_data.keys()
for ioc_type in keys_list:
if ioc_type not in keys or not response_data[ioc_type]:
del response_data[ioc_type]
|
44,245 | def vn_entropy(wires, log_base=None):
r"""Von Neumann entropy of the system prior to measurement.
.. math::
S( \rho ) = -\text{Tr}( \rho \log ( \rho ))
Args:
wires (Sequence[int] or int): The wires of the subsystem
log_base (float): Base for the logarithm. If None, the natural logarithm is used.
**Example:**
.. code-block:: python3
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit_entropy(x):
qml.IsingXX(x, wires=[0, 1])
return qml.vn_entropy(wires=[0])
Executing this QNode:
>>> circuit_entropy(np.pi/2)
0.6931472
It is also possible to get the gradient of the previous QNode:
>>> param = pennylane.numpy.array(np.pi/4, requires_grad=True)
>>> qml.grad(circuit_entropy)(param)
0.6232252401402305
.. note::
Calculating the derivative of :func:`~.vn_entropy` is currently supported when
using the classical backpropagation differentiation method (``diff_method="backprop"``)
with a compatible device and finite differences (``diff_method="finite-diff"``).
"""
wires = qml.wires.Wires(wires)
return MeasurementProcess(VnEntropy, wires=wires, log_base=log_base)
| def vn_entropy(wires, log_base=None):
r"""Von Neumann entropy of the system prior to measurement.
.. math::
S( \rho ) = -\text{Tr}( \rho \log ( \rho ))
Args:
wires (Sequence[int] or int): The wires of the subsystem
log_base (float): Base for the logarithm. If None, the natural logarithm is used.
**Example:**
.. code-block:: python3
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit_entropy(x):
qml.IsingXX(x, wires=[0, 1])
return qml.vn_entropy(wires=[0])
Executing this QNode:
>>> circuit_entropy(np.pi/2)
0.6931472
It is also possible to get the gradient of the previous QNode:
>>> param = np.array(np.pi/4, requires_grad=True)
>>> qml.grad(circuit_entropy)(param)
0.6232252401402305
.. note::
Calculating the derivative of :func:`~.vn_entropy` is currently supported when
using the classical backpropagation differentiation method (``diff_method="backprop"``)
with a compatible device and finite differences (``diff_method="finite-diff"``).
"""
wires = qml.wires.Wires(wires)
return MeasurementProcess(VnEntropy, wires=wires, log_base=log_base)
|
7,310 | def crop(image, bounding_box, axis=None):
"""Cropping images from a bounding box.
Bounding_box (which is a 2-tuple (min_val, max_val) for each axis)
and (optional) axis for corresponding axis order to bounding_box.
Parameters
----------
Image : ndarray
Input array.
Bounding_box : list of 2-tuple (x, y) where x < y.
Bounding box.
axis : tuple, optional
Axis order for cropping.
if provided, same legth as bounding_box.
Default: None
Returns
----------
out : ndarray
Cropped array.
Examples
--------
>>> from skimage import data
>>> from skimage.util.crop import crop
>>> img = data.camera()
>>> img.shape
(512, 512)
>>> cropped_img = crop(img, [(0, 100)])
>>> cropped_img.shape
(100, 512)
>>> cropped_img = crop(img, [(0, 100), (0, 100)])
>>> cropped_img.shape
(100, 100)
>>> cropped_img = crop(img, [(0, 100), (0, 75)], axis=[1, 0])
>>> cropped_img.shape
(75, 100)
"""
# empty legth of bounding box detected on None detected
if not bounding_box:
return image
# check data isinstance of numpy array
if not isinstance(image, np.ndarray):
raise ValueError("data must be numpy array")
# if not axis provided,
# consider sequential cropping on axis
if not axis:
axis = list(range(len(bounding_box)))
else:
if len(axis) != len(set(axis)):
raise ValueError("axis must be unique")
if len(axis) != len(bounding_box):
raise ValueError("axis and bounding_box must have same length")
if not all(isinstance(a, int) for a in axis):
raise ValueError("axis must be integer")
if not all(a >= 0 for a in axis):
raise ValueError("axis must be positive")
if not all(a < image.ndim for a in axis):
raise ValueError("axis must be less than image.ndim")
bbox_with_axis = list(zip(bounding_box, axis))
# sort axis by decreasing
bbox_with_axis.sort(key=lambda x: x[1], reverse=True)
full_bbox_data = []
for idx in range(image.ndim):
if bbox_with_axis and bbox_with_axis[-1][1] == idx:
bbox, _ = bbox_with_axis.pop()
axis_min, axis_max = bbox
if axis_min > axis_max:
raise ValueError(
"In bounding_box, tuple should be sorted (min_val, max_val)")
if axis_min < 0:
raise ValueError("In bounding_box, values must be positive")
if axis_max < 0:
raise ValueError("In bounding_box, values must be positive")
if axis_min > image.shape[idx]:
raise ValueError("Invalid bounding_box!")
if axis_max > image.shape[idx]:
raise ValueError("Invalid bounding_box!")
full_bbox_data.append(range(*bbox))
else:
full_bbox_data.append(range(image.shape[idx]))
return image[np.ix_(*full_bbox_data)]
| def crop(image, bounding_box, axis=None):
"""Cropping images from a bounding box.
Bounding_box (which is a 2-tuple (min_val, max_val) for each axis)
and (optional) axis for corresponding axis order to bounding_box.
Parameters
----------
Image : ndarray
Input array.
Bounding_box : list of 2-tuple (x, y) where x < y.
Bounding box.
axis : tuple, optional
Axis order for cropping.
if provided, same legth as bounding_box.
Default: None
Returns
----------
out : ndarray
Cropped array.
Examples
--------
>>> from skimage import data
>>> from skimage.util.crop import crop
>>> img = data.camera()
>>> img.shape
(512, 512)
>>> cropped_img = crop(img, [(0, 100)])
>>> cropped_img.shape
(100, 512)
>>> cropped_img = crop(img, [(0, 100), (0, 100)])
>>> cropped_img.shape
(100, 100)
>>> cropped_img = crop(img, [(0, 100), (0, 75)], axis=[1, 0])
>>> cropped_img.shape
(75, 100)
"""
# empty length of bounding box detected on None detected
if not bounding_box:
return image
# check data isinstance of numpy array
if not isinstance(image, np.ndarray):
raise ValueError("data must be numpy array")
# if not axis provided,
# consider sequential cropping on axis
if not axis:
axis = list(range(len(bounding_box)))
else:
if len(axis) != len(set(axis)):
raise ValueError("axis must be unique")
if len(axis) != len(bounding_box):
raise ValueError("axis and bounding_box must have same length")
if not all(isinstance(a, int) for a in axis):
raise ValueError("axis must be integer")
if not all(a >= 0 for a in axis):
raise ValueError("axis must be positive")
if not all(a < image.ndim for a in axis):
raise ValueError("axis must be less than image.ndim")
bbox_with_axis = list(zip(bounding_box, axis))
# sort axis by decreasing
bbox_with_axis.sort(key=lambda x: x[1], reverse=True)
full_bbox_data = []
for idx in range(image.ndim):
if bbox_with_axis and bbox_with_axis[-1][1] == idx:
bbox, _ = bbox_with_axis.pop()
axis_min, axis_max = bbox
if axis_min > axis_max:
raise ValueError(
"In bounding_box, tuple should be sorted (min_val, max_val)")
if axis_min < 0:
raise ValueError("In bounding_box, values must be positive")
if axis_max < 0:
raise ValueError("In bounding_box, values must be positive")
if axis_min > image.shape[idx]:
raise ValueError("Invalid bounding_box!")
if axis_max > image.shape[idx]:
raise ValueError("Invalid bounding_box!")
full_bbox_data.append(range(*bbox))
else:
full_bbox_data.append(range(image.shape[idx]))
return image[np.ix_(*full_bbox_data)]
|
9,516 | def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']),
default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']),
logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']),
direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
delete=dict(type='bool', default=False),
route=dict(type='bool', default=False),
insert=dict(type='str'),
rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']),
interface=dict(type='str', aliases=['if']),
log=dict(type='bool', default=False),
from_ip=dict(type='str', default='any', aliases=['from', 'src']),
from_port=dict(type='str'),
to_ip=dict(type='str', default='any', aliases=['dest', 'to']),
to_port=dict(type='str', aliases=['port']),
proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp']),
app=dict(type='str', aliases=['name']),
comment=dict(type='str'),
),
supports_check_mode=True,
mutually_exclusive=[
['app', 'proto', 'logging']
],
)
cmds = []
ipv4_regexp = compile_ipv4_regexp()
ipv6_regexp = compile_ipv6_regexp()
def filter_line_that_not_start_with(pattern, content):
return ''.join([line for line in content.splitlines(True) if line.startswith(pattern)])
def filter_line_that_contains(pattern, content):
return [line for line in content.splitlines(True) if pattern in line]
def filter_line_that_not_contains(pattern, content):
return ''.join([line for line in content.splitlines(True) if not line.contains(pattern)])
def filter_line_that_match_func(match_func, content):
return ''.join([line for line in content.splitlines(True) if match_func(line) is not None])
def filter_line_that_contains_ipv4(content):
return filter_line_that_match_func(ipv4_regexp.search, content)
def filter_line_that_contains_ipv6(content):
return filter_line_that_match_func(ipv6_regexp.search, content)
def is_starting_by_ipv4(ip):
return ipv4_regexp.match(ip) is not None
def is_starting_by_ipv6(ip):
return ipv6_regexp.match(ip) is not None
def execute(cmd, ignore_error=False):
cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
cmds.append(cmd)
(rc, out, err) = module.run_command(cmd, environ_update={"LANG": "C"})
if rc != 0 and not ignore_error:
module.fail_json(msg=err or out, commands=cmds)
return out
def get_current_rules():
user_rules_files = ["/lib/ufw/user.rules",
"/lib/ufw/user6.rules",
"/etc/ufw/user.rules",
"/etc/ufw/user6.rules",
"/var/lib/ufw/user.rules",
"/var/lib/ufw/user6.rules"]
cmd = [[grep_bin], ["-h"], ["'^### tuple'"]]
cmd.extend([[f] for f in user_rules_files])
return execute(cmd, ignore_error=True)
def ufw_version():
"""
Returns the major and minor version of ufw installed on the system.
"""
out = execute([[ufw_bin], ["--version"]])
lines = [x for x in out.split('\n') if x.strip() != '']
if len(lines) == 0:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0])
if matches is None:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
# Convert version to numbers
major = int(matches.group(1))
minor = int(matches.group(2))
rev = 0
if matches.group(3) is not None:
rev = int(matches.group(3))
return major, minor, rev
params = module.params
# Ensure at least one of the command arguments are given
command_keys = ['state', 'default', 'rule', 'logging']
commands = dict((key, params[key]) for key in command_keys if params[key])
if len(commands) < 1:
module.fail_json(msg="Not any of the command arguments %s given" % commands)
if (params['interface'] is not None and params['direction'] is None):
module.fail_json(msg="Direction must be specified when creating a rule on an interface")
# Ensure ufw is available
ufw_bin = module.get_bin_path('ufw', True)
grep_bin = module.get_bin_path('grep', True)
# Save the pre state and rules in order to recognize changes
pre_state = execute([[ufw_bin], ['status verbose']])
pre_rules = get_current_rules()
changed = False
# Execute filter
for (command, value) in commands.items():
cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
if command == 'state':
states = {'enabled': 'enable', 'disabled': 'disable',
'reloaded': 'reload', 'reset': 'reset'}
if value in ['reloaded', 'reset']:
changed = True
if module.check_mode:
# "active" would also match "inactive", hence the space
ufw_enabled = pre_state.find(" active") != -1
if (value == 'disabled' and ufw_enabled) or (value == 'enabled' and not ufw_enabled):
changed = True
else:
execute(cmd + [['-f'], [states[value]]])
elif command == 'logging':
extract = re.search(r'Logging: (on|off) \(([a-z]+)\)', pre_state)
if extract:
current_level = extract.group(2)
current_on_off_value = extract.group(1)
if value != "off":
if value != "on" and (value != current_level or current_on_off_value == "off"):
changed = True
elif current_on_off_value != "off":
changed = True
else:
changed = True
if not module.check_mode:
execute(cmd + [[command], [value]])
elif command == 'default':
if params['direction'] in ['in', 'out', None]:
module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed".')
if module.check_mode:
regexp = r'Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)'
extract = re.search(regexp, pre_state)
if extract is not None:
current_default_values = {}
current_default_values["incoming"] = extract.group(1)
current_default_values["outgoing"] = extract.group(2)
current_default_values["routed"] = extract.group(3)
if current_default_values[params['direction']] != value:
changed = True
else:
changed = True
else:
execute(cmd + [[command], [value], [params['direction']]])
elif command == 'rule':
if params['direction'] in ['outgoing', 'incoming', 'routed']:
module.fail_json(msg='For rules, direction must be one of "in" and "out".')
# Rules are constructed according to the long format
#
# ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
# [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
# [proto protocol] [app application] [comment COMMENT]
cmd.append([module.boolean(params['route']), 'route'])
cmd.append([module.boolean(params['delete']), 'delete'])
cmd.append([params['insert'], "insert %s" % params['insert']])
cmd.append([value])
cmd.append([params['direction'], "%s" % params['direction']])
cmd.append([params['interface'], "on %s" % params['interface']])
cmd.append([module.boolean(params['log']), 'log'])
for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"),
('to_ip', "to %s"), ('to_port', "port %s"),
('proto', "proto %s"), ('app', "app '%s'")]:
value = params[key]
cmd.append([value, template % (value)])
ufw_major, ufw_minor, _ = ufw_version()
# comment is supported only in ufw version after 0.35
if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
cmd.append([params['comment'], "comment '%s'" % params['comment']])
rules_dry = execute(cmd)
if module.check_mode:
nb_skipping_line = len(filter_line_that_contains("Skipping", rules_dry))
if not (nb_skipping_line > 0 and nb_skipping_line == len(rules_dry.splitlines(True))):
rules_dry = filter_line_that_not_start_with("### tuple", rules_dry)
# ufw dry-run doesn't send all rules so have to compare ipv4 or ipv6 rules
if is_starting_by_ipv4(params['from_ip']) or is_starting_by_ipv4(params['to_ip']):
if filter_line_that_contains_ipv4(pre_rules) != filter_line_that_contains_ipv4(rules_dry):
changed = True
elif is_starting_by_ipv6(params['from_ip']) or is_starting_by_ipv6(params['to_ip']):
if filter_line_that_contains_ipv6(pre_rules) != filter_line_that_contains_ipv6(rules_dry):
changed = True
elif pre_rules != rules_dry:
changed = True
# Get the new state
if module.check_mode:
return module.exit_json(changed=changed, commands=cmds)
else:
post_state = execute([[ufw_bin], ['status'], ['verbose']])
if not changed:
post_rules = get_current_rules()
changed = (pre_state != post_state) or (pre_rules != post_rules)
return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
| def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']),
default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']),
logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']),
direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
delete=dict(type='bool', default=False),
route=dict(type='bool', default=False),
insert=dict(type='str'),
rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']),
interface=dict(type='str', aliases=['if']),
log=dict(type='bool', default=False),
from_ip=dict(type='str', default='any', aliases=['from', 'src']),
from_port=dict(type='str'),
to_ip=dict(type='str', default='any', aliases=['dest', 'to']),
to_port=dict(type='str', aliases=['port']),
proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp']),
app=dict(type='str', aliases=['name']),
comment=dict(type='str'),
),
supports_check_mode=True,
mutually_exclusive=[
['app', 'proto', 'logging']
],
)
cmds = []
ipv4_regexp = compile_ipv4_regexp()
ipv6_regexp = compile_ipv6_regexp()
def filter_line_that_not_start_with(pattern, content):
return ''.join([line for line in content.splitlines(True) if line.startswith(pattern)])
def filter_line_that_contains(pattern, content):
return [line for line in content.splitlines(True) if pattern in line]
def filter_line_that_not_contains(pattern, content):
return ''.join([line for line in content.splitlines(True) if not line.contains(pattern)])
def filter_line_that_match_func(match_func, content):
return ''.join([line for line in content.splitlines(True) if match_func(line) is not None])
def filter_line_that_contains_ipv4(content):
return filter_line_that_match_func(ipv4_regexp.search, content)
def filter_line_that_contains_ipv6(content):
return filter_line_that_match_func(ipv6_regexp.search, content)
def is_starting_by_ipv4(ip):
return ipv4_regexp.match(ip) is not None
def is_starting_by_ipv6(ip):
return ipv6_regexp.match(ip) is not None
def execute(cmd, ignore_error=False):
cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
cmds.append(cmd)
(rc, out, err) = module.run_command(cmd, environ_update={"LANG": "C"})
if rc != 0 and not ignore_error:
module.fail_json(msg=err or out, commands=cmds)
return out
def get_current_rules():
user_rules_files = ["/lib/ufw/user.rules",
"/lib/ufw/user6.rules",
"/etc/ufw/user.rules",
"/etc/ufw/user6.rules",
"/var/lib/ufw/user.rules",
"/var/lib/ufw/user6.rules"]
cmd = [[grep_bin], ["-h"], ["'^### tuple'"]]
cmd.extend([[f] for f in user_rules_files])
return execute(cmd, ignore_error=True)
def ufw_version():
"""
Returns the major and minor version of ufw installed on the system.
"""
out = execute([[ufw_bin], ["--version"]])
lines = [x for x in out.split('\n') if x.strip() != '']
if len(lines) == 0:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0])
if matches is None:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
# Convert version to numbers
major = int(matches.group(1))
minor = int(matches.group(2))
rev = 0
if matches.group(3) is not None:
rev = int(matches.group(3))
return major, minor, rev
params = module.params
# Ensure at least one of the command arguments are given
command_keys = ['state', 'default', 'rule', 'logging']
commands = dict((key, params[key]) for key in command_keys if params[key])
if len(commands) < 1:
module.fail_json(msg="Not any of the command arguments %s given" % commands)
if (params['interface'] is not None and params['direction'] is None):
module.fail_json(msg="Direction must be specified when creating a rule on an interface")
# Ensure ufw is available
ufw_bin = module.get_bin_path('ufw', True)
grep_bin = module.get_bin_path('grep', True)
# Save the pre state and rules in order to recognize changes
pre_state = execute([[ufw_bin], ['status verbose']])
pre_rules = get_current_rules()
changed = False
# Execute filter
for (command, value) in commands.items():
cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
if command == 'state':
states = {'enabled': 'enable', 'disabled': 'disable',
'reloaded': 'reload', 'reset': 'reset'}
if value in ['reloaded', 'reset']:
changed = True
if module.check_mode:
# "active" would also match "inactive", hence the space
ufw_enabled = pre_state.find(" active") != -1
if (value == 'disabled' and ufw_enabled) or (value == 'enabled' and not ufw_enabled):
changed = True
else:
execute(cmd + [['-f'], [states[value]]])
elif command == 'logging':
extract = re.search(r'Logging: (on|off) \(([a-z]+)\)', pre_state)
if extract:
current_level = extract.group(2)
current_on_off_value = extract.group(1)
if value != "off":
if value != "on" and (value != current_level or current_on_off_value == "off"):
changed = True
elif current_on_off_value != "off":
changed = True
else:
changed = True
if not module.check_mode:
execute(cmd + [[command], [value]])
elif command == 'default':
if params['direction'] not in ['outgoing', 'incoming', 'routed']:
module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed".')
if module.check_mode:
regexp = r'Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)'
extract = re.search(regexp, pre_state)
if extract is not None:
current_default_values = {}
current_default_values["incoming"] = extract.group(1)
current_default_values["outgoing"] = extract.group(2)
current_default_values["routed"] = extract.group(3)
if current_default_values[params['direction']] != value:
changed = True
else:
changed = True
else:
execute(cmd + [[command], [value], [params['direction']]])
elif command == 'rule':
if params['direction'] in ['outgoing', 'incoming', 'routed']:
module.fail_json(msg='For rules, direction must be one of "in" and "out".')
# Rules are constructed according to the long format
#
# ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
# [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
# [proto protocol] [app application] [comment COMMENT]
cmd.append([module.boolean(params['route']), 'route'])
cmd.append([module.boolean(params['delete']), 'delete'])
cmd.append([params['insert'], "insert %s" % params['insert']])
cmd.append([value])
cmd.append([params['direction'], "%s" % params['direction']])
cmd.append([params['interface'], "on %s" % params['interface']])
cmd.append([module.boolean(params['log']), 'log'])
for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"),
('to_ip', "to %s"), ('to_port', "port %s"),
('proto', "proto %s"), ('app', "app '%s'")]:
value = params[key]
cmd.append([value, template % (value)])
ufw_major, ufw_minor, _ = ufw_version()
# comment is supported only in ufw version after 0.35
if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
cmd.append([params['comment'], "comment '%s'" % params['comment']])
rules_dry = execute(cmd)
if module.check_mode:
nb_skipping_line = len(filter_line_that_contains("Skipping", rules_dry))
if not (nb_skipping_line > 0 and nb_skipping_line == len(rules_dry.splitlines(True))):
rules_dry = filter_line_that_not_start_with("### tuple", rules_dry)
# ufw dry-run doesn't send all rules so have to compare ipv4 or ipv6 rules
if is_starting_by_ipv4(params['from_ip']) or is_starting_by_ipv4(params['to_ip']):
if filter_line_that_contains_ipv4(pre_rules) != filter_line_that_contains_ipv4(rules_dry):
changed = True
elif is_starting_by_ipv6(params['from_ip']) or is_starting_by_ipv6(params['to_ip']):
if filter_line_that_contains_ipv6(pre_rules) != filter_line_that_contains_ipv6(rules_dry):
changed = True
elif pre_rules != rules_dry:
changed = True
# Get the new state
if module.check_mode:
return module.exit_json(changed=changed, commands=cmds)
else:
post_state = execute([[ufw_bin], ['status'], ['verbose']])
if not changed:
post_rules = get_current_rules()
changed = (pre_state != post_state) or (pre_rules != post_rules)
return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
|
57,776 | def arduino_get_pin_command(server: Server, args: any) -> str:
pin_type: str = args.get('pin_type')
prefix = "Arduino.DigitalPins" if pin_type == "digital" else "Arduino.AnalogPins"
pin_number: int = int(args.get('pin_number'))
result: int = int(server.send_data(f"get:{pin_type}:{pin_number}"))
results = [{
"PinType": "Digital" if pin_type == "digital" else "Analog",
"PinNumber": pin_number,
"PinValue": result
}]
command_results = CommandResults(
outputs_prefix=prefix,
outputs_key_field=['PinNumber', 'PinType'],
outputs=results,
readable_output=tableToMarkdown(f"Get pin {pin_number} on {server.host}({server.port}):", results)
)
return command_results
| def arduino_get_pin_command(server: Server, args: any) -> str:
pin_type: str = args.get('pin_type')
prefix = "Arduino.DigitalPins" if pin_type == "digital" else "Arduino.AnalogPins"
pin_number = int(args.get('pin_number'))
result = int(server.send_data(f"get:{pin_type}:{pin_number}"))
results = [{
"PinType": "Digital" if pin_type == "digital" else "Analog",
"PinNumber": pin_number,
"PinValue": result
}]
command_results = CommandResults(
outputs_prefix=prefix,
outputs_key_field=['PinNumber', 'PinType'],
outputs=results,
readable_output=tableToMarkdown(f"Get pin {pin_number} on {server.host}({server.port}):", results)
)
return command_results
|
58,464 | def time_perfcounter_correlation():
""" on Windows platform this will help with the accuracy of using the time.time fucntion.
Returns
-------
t, performance_counter : (time.time, float)
time.time value and perf_counter value when the time.time is updated
"""
if platform.system() == "Windows":
t0 = time()
while 1:
t1, performance_counter = time(), perf_counter()
if t1 != t0:
break
else:
return time(), perf_counter()
return t1, performance_counter
| def time_perfcounter_correlation():
"""Get the `perf_counter` value nearest to when time.time() is updated on Windows.
On non Windows platforms the current time and perf_counter is directly returned.
Note this value is based on when `time.time()` is observed to update from Python, it is not directly returned by the operating system.
Returns
-------
t, performance_counter : (time.time, float)
time.time value and perf_counter value when the time.time is updated
"""
if platform.system() == "Windows":
t0 = time()
while 1:
t1, performance_counter = time(), perf_counter()
if t1 != t0:
break
else:
return time(), perf_counter()
return t1, performance_counter
|
42,962 | def energies(samples: np.ndarray, wp: np.ndarray) -> np.ndarray:
r"""Computes the energy of GBS samples in :math:`\text{cm}^{-1}` unit.
**Example usage:**
>>> samples = np.array([[1, 1, 0], [1, 0, 2]])
>>> wp = np.array([700.0, 600.0, 500.0])
>>> energies(samples, wp)
[1300.0, 1700.0]
Args:
samples (array): GBS samples
wp (array): normal mode frequencies in :math:`\text{cm}^{-1}`
Returns:
E (list): list of GBS sample energies in :math:`\text{cm}^{-1}`
"""
E = []
for sample in samples:
e = sum(sample * wp)
E.append(e)
return E
| def energies(samples: np.ndarray, wp: np.ndarray) -> np.ndarray:
r"""Computes the energy of GBS samples in :math:`\text{cm}^{-1}` unit.
**Example usage:**
>>> samples = np.array([[1, 1, 0], [1, 0, 2]])
>>> wp = np.array([700.0, 600.0, 500.0])
>>> energies(samples, wp)
[1300.0, 1700.0]
Args:
samples (array): GBS samples
wp (array): normal mode frequencies in units of :math:`\text{cm}^{-1}`
Returns:
E (list): list of GBS sample energies in :math:`\text{cm}^{-1}`
"""
E = []
for sample in samples:
e = sum(sample * wp)
E.append(e)
return E
|
50,061 | def get_parser() -> ArgumentParser:
parser = ArgumentParser(
usage="%(prog)s [options] [connection string]",
description=(
"htop like application for PostgreSQL " "server activity monitoring."
),
epilog=(
"The connection string can be in the form of a list of "
"Key/Value parameters or an URI as described in the PostgreSQL documentation. "
"The parsing is delegated to the libpq: different versions of the client library "
"may support different formats or parameters (for example, connection URIs are "
"only supported from libpq 9.2)"
),
add_help=False,
)
# Connection string
parser.add_argument(
"connection_string",
help="A valid connection string to the database, e.g.: "
"'host=HOSTNAME port=PORT user=USER dbname=DBNAME'.",
nargs="?",
)
# -U / --username
parser.add_argument(
"-U",
"--username",
dest="username",
help="Database user name",
metavar="USERNAME",
)
# -p / --port
parser.add_argument(
"-p",
"--port",
dest="port",
help="Database server port",
metavar="PORT",
)
# -h / --host
parser.add_argument(
"-h",
"--host",
dest="host",
help="Database server host or socket directory",
metavar="HOSTNAME",
)
# -d / --dbname
parser.add_argument(
"-d",
"--dbname",
dest="dbname",
help="Database name to connect to",
metavar="DBNAME",
)
# --blocksize
parser.add_argument(
"--blocksize",
dest="blocksize",
help="Filesystem blocksize (default: %(default)s)",
metavar="BLOCKSIZE",
type=int,
default=4096,
)
# --rds
parser.add_argument(
"--rds",
dest="rds",
action="store_true",
help="Enable support for AWS RDS",
default=False,
)
# --output
parser.add_argument(
"--output",
dest="output",
help="Store running queries as CSV",
metavar="FILEPATH",
default=None,
)
# --no-db-size
parser.add_argument(
"--no-db-size",
dest="nodbsize",
action="store_true",
help="Skip total size of DB",
default=False,
)
# --wrap-query
parser.add_argument(
"-w",
"--wrap-query",
dest="wrap_query",
action="store_true",
help="Wrap query column instead of truncating",
default=False,
)
# --duration-mode
parser.add_argument(
"--duration-mode",
dest="durationmode",
help="Duration mode. Values: 1-QUERY(default), 2-TRANSACTION, 3-BACKEND",
metavar="DURATION_MODE",
choices=["1", "2", "3"],
default="1",
)
# --min-duration
parser.add_argument(
"--min-duration",
dest="minduration",
help="Don't display queries with smaller than specified duration (in seconds)",
metavar="SECONDS",
type=float,
default=0,
)
# --filter
parser.add_argument(
"--filter",
dest="filters",
help=(
"Filter activities with a (case insensitive) regular expression applied on selected fields. "
"Known fields are: dbname."
),
action="append",
metavar="FIELD:REGEX",
default=[],
)
# --version
parser.add_argument(
"--version",
help="show program's version number and exit",
action="version",
version=f"%(prog)s {__version__}",
)
# --help
parser.add_argument(
"--help",
dest="help",
action="store_true",
help="Show this help message and exit",
default=False,
)
group = parser.add_argument_group(
"Display Options", "you can exclude some columns by using them."
)
# --no-pid
group.add_argument(
"--no-pid",
dest="nopid",
action="store_true",
help="Disable PID.",
default=False,
)
# --no-database
group.add_argument(
"--no-database",
dest="nodb",
action="store_true",
help="Disable DATABASE",
default=False,
)
# --no-user
group.add_argument(
"--no-user",
dest="nouser",
action="store_true",
help="Disable USER",
default=False,
)
# --no-client
group.add_argument(
"--no-client",
dest="noclient",
action="store_true",
help="Disable CLIENT",
default=False,
)
# --no-cpu
group.add_argument(
"--no-cpu",
dest="nocpu",
action="store_true",
help="Disable CPU%%",
default=False,
)
# --no-mem
group.add_argument(
"--no-mem",
dest="nomem",
action="store_true",
help="Disable MEM%%",
default=False,
)
# --no-read
group.add_argument(
"--no-read",
dest="noread",
action="store_true",
help="Disable READ/s",
default=False,
)
# --no-write
group.add_argument(
"--no-write",
dest="nowrite",
action="store_true",
help="Disable WRITE/s",
default=False,
)
# --no-time
group.add_argument(
"--no-time",
dest="notime",
action="store_true",
help="Disable TIME+",
default=False,
)
# --no-wait
group.add_argument(
"--no-wait", dest="nowait", action="store_true", help="Disable W", default=False
)
# --no-app-name
group.add_argument(
"--no-app-name",
dest="noappname",
action="store_true",
help="Disable App",
default=False,
)
# --hide-queries-in-logs
group.add_argument(
"--hide-queries-in-logs",
dest="hide_queries_in_logs",
action="store_true",
help="Disable log_min_duration_statements and log_min_duration_sample for pg_activity",
default=False,
)
# --no-inst-info
group.add_argument(
"--no-inst-info",
dest="show_instance_info_in_header",
action="store_false",
help="Display instance information in header",
default=True,
)
# --no-sys-info
group.add_argument(
"--no-sys-info",
dest="show_system_info_in_header",
action="store_false",
help="Display system information in header",
default=True,
)
# --no-proc-info
group.add_argument(
"--no-proc-info",
dest="show_worker_info_in_header",
action="store_false",
help="Display workers process information in header",
default=True,
)
return parser
| def get_parser() -> ArgumentParser:
parser = ArgumentParser(
usage="%(prog)s [options] [connection string]",
description=(
"htop like application for PostgreSQL " "server activity monitoring."
),
epilog=(
"The connection string can be in the form of a list of "
"Key/Value parameters or an URI as described in the PostgreSQL documentation. "
"The parsing is delegated to the libpq: different versions of the client library "
"may support different formats or parameters (for example, connection URIs are "
"only supported from libpq 9.2)"
),
add_help=False,
)
# Connection string
parser.add_argument(
"connection_string",
help=(
"A valid connection string to the database, e.g.: "
"'host=HOSTNAME port=PORT user=USER dbname=DBNAME'."
),
nargs="?",
)
# -U / --username
parser.add_argument(
"-U",
"--username",
dest="username",
help="Database user name",
metavar="USERNAME",
)
# -p / --port
parser.add_argument(
"-p",
"--port",
dest="port",
help="Database server port",
metavar="PORT",
)
# -h / --host
parser.add_argument(
"-h",
"--host",
dest="host",
help="Database server host or socket directory",
metavar="HOSTNAME",
)
# -d / --dbname
parser.add_argument(
"-d",
"--dbname",
dest="dbname",
help="Database name to connect to",
metavar="DBNAME",
)
# --blocksize
parser.add_argument(
"--blocksize",
dest="blocksize",
help="Filesystem blocksize (default: %(default)s)",
metavar="BLOCKSIZE",
type=int,
default=4096,
)
# --rds
parser.add_argument(
"--rds",
dest="rds",
action="store_true",
help="Enable support for AWS RDS",
default=False,
)
# --output
parser.add_argument(
"--output",
dest="output",
help="Store running queries as CSV",
metavar="FILEPATH",
default=None,
)
# --no-db-size
parser.add_argument(
"--no-db-size",
dest="nodbsize",
action="store_true",
help="Skip total size of DB",
default=False,
)
# --wrap-query
parser.add_argument(
"-w",
"--wrap-query",
dest="wrap_query",
action="store_true",
help="Wrap query column instead of truncating",
default=False,
)
# --duration-mode
parser.add_argument(
"--duration-mode",
dest="durationmode",
help="Duration mode. Values: 1-QUERY(default), 2-TRANSACTION, 3-BACKEND",
metavar="DURATION_MODE",
choices=["1", "2", "3"],
default="1",
)
# --min-duration
parser.add_argument(
"--min-duration",
dest="minduration",
help="Don't display queries with smaller than specified duration (in seconds)",
metavar="SECONDS",
type=float,
default=0,
)
# --filter
parser.add_argument(
"--filter",
dest="filters",
help=(
"Filter activities with a (case insensitive) regular expression applied on selected fields. "
"Known fields are: dbname."
),
action="append",
metavar="FIELD:REGEX",
default=[],
)
# --version
parser.add_argument(
"--version",
help="show program's version number and exit",
action="version",
version=f"%(prog)s {__version__}",
)
# --help
parser.add_argument(
"--help",
dest="help",
action="store_true",
help="Show this help message and exit",
default=False,
)
group = parser.add_argument_group(
"Display Options", "you can exclude some columns by using them."
)
# --no-pid
group.add_argument(
"--no-pid",
dest="nopid",
action="store_true",
help="Disable PID.",
default=False,
)
# --no-database
group.add_argument(
"--no-database",
dest="nodb",
action="store_true",
help="Disable DATABASE",
default=False,
)
# --no-user
group.add_argument(
"--no-user",
dest="nouser",
action="store_true",
help="Disable USER",
default=False,
)
# --no-client
group.add_argument(
"--no-client",
dest="noclient",
action="store_true",
help="Disable CLIENT",
default=False,
)
# --no-cpu
group.add_argument(
"--no-cpu",
dest="nocpu",
action="store_true",
help="Disable CPU%%",
default=False,
)
# --no-mem
group.add_argument(
"--no-mem",
dest="nomem",
action="store_true",
help="Disable MEM%%",
default=False,
)
# --no-read
group.add_argument(
"--no-read",
dest="noread",
action="store_true",
help="Disable READ/s",
default=False,
)
# --no-write
group.add_argument(
"--no-write",
dest="nowrite",
action="store_true",
help="Disable WRITE/s",
default=False,
)
# --no-time
group.add_argument(
"--no-time",
dest="notime",
action="store_true",
help="Disable TIME+",
default=False,
)
# --no-wait
group.add_argument(
"--no-wait", dest="nowait", action="store_true", help="Disable W", default=False
)
# --no-app-name
group.add_argument(
"--no-app-name",
dest="noappname",
action="store_true",
help="Disable App",
default=False,
)
# --hide-queries-in-logs
group.add_argument(
"--hide-queries-in-logs",
dest="hide_queries_in_logs",
action="store_true",
help="Disable log_min_duration_statements and log_min_duration_sample for pg_activity",
default=False,
)
# --no-inst-info
group.add_argument(
"--no-inst-info",
dest="show_instance_info_in_header",
action="store_false",
help="Display instance information in header",
default=True,
)
# --no-sys-info
group.add_argument(
"--no-sys-info",
dest="show_system_info_in_header",
action="store_false",
help="Display system information in header",
default=True,
)
# --no-proc-info
group.add_argument(
"--no-proc-info",
dest="show_worker_info_in_header",
action="store_false",
help="Display workers process information in header",
default=True,
)
return parser
|
27,696 | def idmaker(argnames, parametersets, idfn=None, ids=None, config=None, item=None):
ids = [
_idvalset(valindex, parameterset, argnames, idfn, ids, config=config, item=item)
for valindex, parameterset in enumerate(parametersets)
]
# All ids must be unique!
unique_ids = set(ids)
if len(unique_ids) != len(ids):
# Record the number of occurances of each test id
testid_counts = Counter(ids)
# Map the test id to its next suffix.
testid_suffixes = Counter(unique_ids)
for testid in testid_suffixes.keys():
testid_suffixes[testid] -= 1 # start each suffix at 0
# Suffix non-unique ids to make them unique:
for index, testid in enumerate(ids):
if testid_counts[testid] > 1:
ids[index] = f"{testid}{testid_suffixes[testid]}"
testid_suffixes[testid] += 1
return ids
| def idmaker(argnames, parametersets, idfn=None, ids=None, config=None, item=None):
ids = [
_idvalset(valindex, parameterset, argnames, idfn, ids, config=config, item=item)
for valindex, parameterset in enumerate(parametersets)
]
# All ids must be unique!
unique_ids = set(ids)
if len(unique_ids) != len(ids):
# Record the number of occurrences of each test id
testid_counts = Counter(ids)
# Map the test id to its next suffix.
testid_suffixes = Counter(unique_ids)
for testid in testid_suffixes.keys():
testid_suffixes[testid] -= 1 # start each suffix at 0
# Suffix non-unique ids to make them unique:
for index, testid in enumerate(ids):
if testid_counts[testid] > 1:
ids[index] = f"{testid}{testid_suffixes[testid]}"
testid_suffixes[testid] += 1
return ids
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.