body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
499972ec83124e553a0be0c0ecc9cedbd3c60f923306fbeff410501ece6397c4 | def get_label_units(projected_file):
"Input: Any file with a projected spatial reference\n Returns: list storing unit label for filenames, linear unit itself, and the spatial reference file\n Example return: ['m', 'Meters', *spatial_ref_object*]"
spatial_ref = arcpy.Describe(projected_file).spatialReference
unit = spatial_ref.linearUnitName
if (unit == 'Meter'):
u = 'm'
else:
u = 'ft'
return [u, unit, spatial_ref] | Input: Any file with a projected spatial reference
Returns: list storing unit label for filenames, linear unit itself, and the spatial reference file
Example return: ['m', 'Meters', *spatial_ref_object*] | file_functions.py | get_label_units | xaviernogueira/gcs_gui | 4 | python | def get_label_units(projected_file):
"Input: Any file with a projected spatial reference\n Returns: list storing unit label for filenames, linear unit itself, and the spatial reference file\n Example return: ['m', 'Meters', *spatial_ref_object*]"
spatial_ref = arcpy.Describe(projected_file).spatialReference
unit = spatial_ref.linearUnitName
if (unit == 'Meter'):
u = 'm'
else:
u = 'ft'
return [u, unit, spatial_ref] | def get_label_units(projected_file):
"Input: Any file with a projected spatial reference\n Returns: list storing unit label for filenames, linear unit itself, and the spatial reference file\n Example return: ['m', 'Meters', *spatial_ref_object*]"
spatial_ref = arcpy.Describe(projected_file).spatialReference
unit = spatial_ref.linearUnitName
if (unit == 'Meter'):
u = 'm'
else:
u = 'ft'
return [u, unit, spatial_ref]<|docstring|>Input: Any file with a projected spatial reference
Returns: list storing unit label for filenames, linear unit itself, and the spatial reference file
Example return: ['m', 'Meters', *spatial_ref_object*]<|endoftext|> |
341c2e60629fd5ede334cd49526818e01d2162700ca0b9dd4e0790ea78f53204 | def get_data(self):
"Return the object's matrix\n\n Parameters\n ----------\n self : ImportMatrixVal\n An ImportMatrixVal object\n\n Returns\n -------\n matrix: ndarray\n The object's matrix\n\n "
return self.edit_matrix(self.value) | Return the object's matrix
Parameters
----------
self : ImportMatrixVal
An ImportMatrixVal object
Returns
-------
matrix: ndarray
The object's matrix | pyleecan/Methods/Import/ImportMatrixVal/get_data.py | get_data | IrakozeFD/pyleecan | 95 | python | def get_data(self):
"Return the object's matrix\n\n Parameters\n ----------\n self : ImportMatrixVal\n An ImportMatrixVal object\n\n Returns\n -------\n matrix: ndarray\n The object's matrix\n\n "
return self.edit_matrix(self.value) | def get_data(self):
"Return the object's matrix\n\n Parameters\n ----------\n self : ImportMatrixVal\n An ImportMatrixVal object\n\n Returns\n -------\n matrix: ndarray\n The object's matrix\n\n "
return self.edit_matrix(self.value)<|docstring|>Return the object's matrix
Parameters
----------
self : ImportMatrixVal
An ImportMatrixVal object
Returns
-------
matrix: ndarray
The object's matrix<|endoftext|> |
7cfe0449b1fa24c86da86a4cbbf2631ab271c3c043f3883fdd26bc5206b6cc3b | def get_mean_std(self, type, mean_std_path):
"\n 计算数据集的均值和标准差\n :param type: 使用的是那个数据集的数据,有'train', 'test', 'testing'\n :param mean_std_path: 计算出来的均值和标准差存储的文件\n :return:\n "
num_imgs = len(self.dataset[type])
for data in self.dataset[type]:
img = data[0]
for i in range(3):
self.means[i] += img[(i, :, :)].mean()
self.stdevs[i] += img[(i, :, :)].std()
self.means = (np.asarray(self.means) / num_imgs)
self.stdevs = (np.asarray(self.stdevs) / num_imgs)
print('{} : normMean = {}'.format(type, self.means))
print('{} : normstdevs = {}'.format(type, self.stdevs))
with open(mean_std_path, 'wb') as f:
pickle.dump(self.means, f)
pickle.dump(self.stdevs, f)
print('pickle done') | 计算数据集的均值和标准差
:param type: 使用的是那个数据集的数据,有'train', 'test', 'testing'
:param mean_std_path: 计算出来的均值和标准差存储的文件
:return: | MT-CNV/get_mean_std.py | get_mean_std | Wangzheaos/DARD-Net | 0 | python | def get_mean_std(self, type, mean_std_path):
"\n 计算数据集的均值和标准差\n :param type: 使用的是那个数据集的数据,有'train', 'test', 'testing'\n :param mean_std_path: 计算出来的均值和标准差存储的文件\n :return:\n "
num_imgs = len(self.dataset[type])
for data in self.dataset[type]:
img = data[0]
for i in range(3):
self.means[i] += img[(i, :, :)].mean()
self.stdevs[i] += img[(i, :, :)].std()
self.means = (np.asarray(self.means) / num_imgs)
self.stdevs = (np.asarray(self.stdevs) / num_imgs)
print('{} : normMean = {}'.format(type, self.means))
print('{} : normstdevs = {}'.format(type, self.stdevs))
with open(mean_std_path, 'wb') as f:
pickle.dump(self.means, f)
pickle.dump(self.stdevs, f)
print('pickle done') | def get_mean_std(self, type, mean_std_path):
"\n 计算数据集的均值和标准差\n :param type: 使用的是那个数据集的数据,有'train', 'test', 'testing'\n :param mean_std_path: 计算出来的均值和标准差存储的文件\n :return:\n "
num_imgs = len(self.dataset[type])
for data in self.dataset[type]:
img = data[0]
for i in range(3):
self.means[i] += img[(i, :, :)].mean()
self.stdevs[i] += img[(i, :, :)].std()
self.means = (np.asarray(self.means) / num_imgs)
self.stdevs = (np.asarray(self.stdevs) / num_imgs)
print('{} : normMean = {}'.format(type, self.means))
print('{} : normstdevs = {}'.format(type, self.stdevs))
with open(mean_std_path, 'wb') as f:
pickle.dump(self.means, f)
pickle.dump(self.stdevs, f)
print('pickle done')<|docstring|>计算数据集的均值和标准差
:param type: 使用的是那个数据集的数据,有'train', 'test', 'testing'
:param mean_std_path: 计算出来的均值和标准差存储的文件
:return:<|endoftext|> |
1263a0022ee8b5baccaa7d24cd33feeb2da52d6a90dbc098378c32edfb5fc223 | def __init__(self, *args, **kwargs):
'Constructor which sets up variables used by tests.\n :param args: arguments.\n :param kwargs: keyword arguments.\n '
super(TestDoabTelescope, self).__init__(*args, **kwargs)
self.project_id = os.getenv('TEST_GCP_PROJECT_ID')
self.data_location = os.getenv('TEST_GCP_DATA_LOCATION')
self.first_download_path = test_fixtures_folder('doab', 'doab1.csv')
self.first_execution_date = pendulum.datetime(year=2021, month=2, day=1)
self.second_download_path = test_fixtures_folder('doab', 'doab2.csv')
self.second_execution_date = pendulum.datetime(year=2021, month=3, day=1) | Constructor which sets up variables used by tests.
:param args: arguments.
:param kwargs: keyword arguments. | oaebu_workflows/workflows/tests/test_doab_telescope.py | __init__ | The-Academic-Observatory/oaebu-workflows | 2 | python | def __init__(self, *args, **kwargs):
'Constructor which sets up variables used by tests.\n :param args: arguments.\n :param kwargs: keyword arguments.\n '
super(TestDoabTelescope, self).__init__(*args, **kwargs)
self.project_id = os.getenv('TEST_GCP_PROJECT_ID')
self.data_location = os.getenv('TEST_GCP_DATA_LOCATION')
self.first_download_path = test_fixtures_folder('doab', 'doab1.csv')
self.first_execution_date = pendulum.datetime(year=2021, month=2, day=1)
self.second_download_path = test_fixtures_folder('doab', 'doab2.csv')
self.second_execution_date = pendulum.datetime(year=2021, month=3, day=1) | def __init__(self, *args, **kwargs):
'Constructor which sets up variables used by tests.\n :param args: arguments.\n :param kwargs: keyword arguments.\n '
super(TestDoabTelescope, self).__init__(*args, **kwargs)
self.project_id = os.getenv('TEST_GCP_PROJECT_ID')
self.data_location = os.getenv('TEST_GCP_DATA_LOCATION')
self.first_download_path = test_fixtures_folder('doab', 'doab1.csv')
self.first_execution_date = pendulum.datetime(year=2021, month=2, day=1)
self.second_download_path = test_fixtures_folder('doab', 'doab2.csv')
self.second_execution_date = pendulum.datetime(year=2021, month=3, day=1)<|docstring|>Constructor which sets up variables used by tests.
:param args: arguments.
:param kwargs: keyword arguments.<|endoftext|> |
e385552b51f2a0076f298cbb5be7535374f10962b946ccdbc32ad229b7fde5cc | def test_dag_structure(self):
'Test that the DOAB DAG has the correct structure.\n :return: None\n '
dag = DoabTelescope().make_dag()
self.assert_dag_structure({'check_dependencies': ['download'], 'download': ['upload_downloaded'], 'upload_downloaded': ['transform'], 'transform': ['upload_transformed'], 'upload_transformed': ['bq_load_partition'], 'bq_load_partition': ['bq_delete_old'], 'bq_delete_old': ['bq_append_new'], 'bq_append_new': ['cleanup'], 'cleanup': []}, dag) | Test that the DOAB DAG has the correct structure.
:return: None | oaebu_workflows/workflows/tests/test_doab_telescope.py | test_dag_structure | The-Academic-Observatory/oaebu-workflows | 2 | python | def test_dag_structure(self):
'Test that the DOAB DAG has the correct structure.\n :return: None\n '
dag = DoabTelescope().make_dag()
self.assert_dag_structure({'check_dependencies': ['download'], 'download': ['upload_downloaded'], 'upload_downloaded': ['transform'], 'transform': ['upload_transformed'], 'upload_transformed': ['bq_load_partition'], 'bq_load_partition': ['bq_delete_old'], 'bq_delete_old': ['bq_append_new'], 'bq_append_new': ['cleanup'], 'cleanup': []}, dag) | def test_dag_structure(self):
'Test that the DOAB DAG has the correct structure.\n :return: None\n '
dag = DoabTelescope().make_dag()
self.assert_dag_structure({'check_dependencies': ['download'], 'download': ['upload_downloaded'], 'upload_downloaded': ['transform'], 'transform': ['upload_transformed'], 'upload_transformed': ['bq_load_partition'], 'bq_load_partition': ['bq_delete_old'], 'bq_delete_old': ['bq_append_new'], 'bq_append_new': ['cleanup'], 'cleanup': []}, dag)<|docstring|>Test that the DOAB DAG has the correct structure.
:return: None<|endoftext|> |
fb5e28297a5557010d62fed32e64c9bbfc6b1e7fb4b67eee0de4f895c264ae46 | def test_dag_load(self):
'Test that the DOAB DAG can be loaded from a DAG bag.\n :return: None\n '
with ObservatoryEnvironment().create():
dag_file = os.path.join(module_file_path('oaebu_workflows.dags'), 'doab_telescope.py')
self.assert_dag_load('doab', dag_file) | Test that the DOAB DAG can be loaded from a DAG bag.
:return: None | oaebu_workflows/workflows/tests/test_doab_telescope.py | test_dag_load | The-Academic-Observatory/oaebu-workflows | 2 | python | def test_dag_load(self):
'Test that the DOAB DAG can be loaded from a DAG bag.\n :return: None\n '
with ObservatoryEnvironment().create():
dag_file = os.path.join(module_file_path('oaebu_workflows.dags'), 'doab_telescope.py')
self.assert_dag_load('doab', dag_file) | def test_dag_load(self):
'Test that the DOAB DAG can be loaded from a DAG bag.\n :return: None\n '
with ObservatoryEnvironment().create():
dag_file = os.path.join(module_file_path('oaebu_workflows.dags'), 'doab_telescope.py')
self.assert_dag_load('doab', dag_file)<|docstring|>Test that the DOAB DAG can be loaded from a DAG bag.
:return: None<|endoftext|> |
81935d7c2cc12d42a882bbaa07df08c9d76b2a0fcaf06bf319aeb3dc135ab4af | def test_telescope(self):
'Test the DOAB telescope end to end.\n :return: None.\n '
env = ObservatoryEnvironment(self.project_id, self.data_location)
dataset_id = env.add_dataset()
telescope = DoabTelescope(dataset_id=dataset_id)
dag = telescope.make_dag()
with env.create():
with env.create_dag_run(dag, self.first_execution_date) as m_dagrun:
env.run_task(telescope.check_dependencies.__name__)
(start_date, end_date, first_release) = telescope.get_release_info(execution_date=self.first_execution_date, dag_run=m_dagrun, dag=dag, next_execution_date=pendulum.datetime(2021, 3, 1))
release = DoabRelease(telescope.dag_id, start_date, end_date, first_release)
with httpretty.enabled():
self.setup_mock_file_download(DoabTelescope.CSV_URL, self.first_download_path)
env.run_task(telescope.download.__name__)
self.assertEqual(1, len(release.download_files))
download_path = release.download_files[0]
expected_file_hash = get_file_hash(file_path=self.first_download_path, algorithm='md5')
self.assert_file_integrity(download_path, expected_file_hash, 'md5')
env.run_task(telescope.upload_downloaded.__name__)
self.assert_blob_integrity(env.download_bucket, blob_name(download_path), download_path)
env.run_task(telescope.transform.__name__)
self.assertEqual(1, len(release.transform_files))
transform_path = release.transform_files[0]
expected_file_hash = '97a86394'
self.assert_file_integrity(transform_path, expected_file_hash, 'gzip_crc')
env.run_task(telescope.upload_transformed.__name__)
self.assert_blob_integrity(env.transform_bucket, blob_name(transform_path), transform_path)
ti = env.run_task(telescope.bq_load_partition.__name__)
self.assertEqual(ti.state, 'skipped')
with patch('observatory.platform.utils.gc_utils.bq_query_bytes_daily_limit_check'):
ti = env.run_task(telescope.bq_delete_old.__name__)
self.assertEqual(ti.state, 'skipped')
env.run_task(telescope.bq_append_new.__name__)
(main_table_id, partition_table_id) = table_ids_from_path(transform_path)
table_id = f'{self.project_id}.{telescope.dataset_id}.{main_table_id}'
expected_rows = 4
self.assert_table_integrity(table_id, expected_rows)
(download_folder, extract_folder, transform_folder) = (release.download_folder, release.extract_folder, release.transform_folder)
env.run_task(telescope.cleanup.__name__)
self.assert_cleanup(download_folder, extract_folder, transform_folder)
with env.create_dag_run(dag, self.second_execution_date) as m_dag_run:
env.run_task(telescope.check_dependencies.__name__)
(start_date, end_date, first_release) = telescope.get_release_info(execution_date=self.second_execution_date, dag_run=m_dag_run, dag=dag, next_execution_date=pendulum.datetime(2021, 4, 1))
self.assertEqual((release.end_date + timedelta(days=1)), start_date)
self.assertEqual((pendulum.today('UTC') - timedelta(days=1)), end_date)
self.assertFalse(first_release)
release = DoabRelease(telescope.dag_id, start_date, end_date, first_release)
with httpretty.enabled():
self.setup_mock_file_download(DoabTelescope.CSV_URL, self.second_download_path)
env.run_task(telescope.download.__name__)
self.assertEqual(1, len(release.download_files))
download_path = release.download_files[0]
expected_file_hash = get_file_hash(file_path=self.second_download_path, algorithm='md5')
self.assert_file_integrity(download_path, expected_file_hash, 'md5')
env.run_task(telescope.upload_downloaded.__name__)
self.assert_blob_integrity(env.download_bucket, blob_name(download_path), download_path)
env.run_task(telescope.transform.__name__)
self.assertEqual(1, len(release.transform_files))
transform_path = release.transform_files[0]
expected_file_hash = '19f6ba1e'
self.assert_file_integrity(transform_path, expected_file_hash, 'gzip_crc')
env.run_task(telescope.upload_transformed.__name__)
self.assert_blob_integrity(env.transform_bucket, blob_name(transform_path), transform_path)
env.run_task(telescope.bq_load_partition.__name__)
(main_table_id, partition_table_id) = table_ids_from_path(transform_path)
table_id = create_date_table_id(partition_table_id, release.end_date, bigquery.TimePartitioningType.DAY)
table_id = f'{self.project_id}.{telescope.dataset_id}.{table_id}'
expected_rows = 4
self.assert_table_integrity(table_id, expected_rows)
with patch('observatory.platform.utils.gc_utils.bq_query_bytes_daily_limit_check'):
env.run_task(telescope.bq_delete_old.__name__)
table_id = f'{self.project_id}.{telescope.dataset_id}.{main_table_id}'
expected_rows = 3
self.assert_table_integrity(table_id, expected_rows)
env.run_task(telescope.bq_append_new.__name__)
table_id = f'{self.project_id}.{telescope.dataset_id}.{main_table_id}'
expected_rows = 7
self.assert_table_integrity(table_id, expected_rows)
(download_folder, extract_folder, transform_folder) = (release.download_folder, release.extract_folder, release.transform_folder)
env.run_task(telescope.cleanup.__name__)
self.assert_cleanup(download_folder, extract_folder, transform_folder) | Test the DOAB telescope end to end.
:return: None. | oaebu_workflows/workflows/tests/test_doab_telescope.py | test_telescope | The-Academic-Observatory/oaebu-workflows | 2 | python | def test_telescope(self):
'Test the DOAB telescope end to end.\n :return: None.\n '
env = ObservatoryEnvironment(self.project_id, self.data_location)
dataset_id = env.add_dataset()
telescope = DoabTelescope(dataset_id=dataset_id)
dag = telescope.make_dag()
with env.create():
with env.create_dag_run(dag, self.first_execution_date) as m_dagrun:
env.run_task(telescope.check_dependencies.__name__)
(start_date, end_date, first_release) = telescope.get_release_info(execution_date=self.first_execution_date, dag_run=m_dagrun, dag=dag, next_execution_date=pendulum.datetime(2021, 3, 1))
release = DoabRelease(telescope.dag_id, start_date, end_date, first_release)
with httpretty.enabled():
self.setup_mock_file_download(DoabTelescope.CSV_URL, self.first_download_path)
env.run_task(telescope.download.__name__)
self.assertEqual(1, len(release.download_files))
download_path = release.download_files[0]
expected_file_hash = get_file_hash(file_path=self.first_download_path, algorithm='md5')
self.assert_file_integrity(download_path, expected_file_hash, 'md5')
env.run_task(telescope.upload_downloaded.__name__)
self.assert_blob_integrity(env.download_bucket, blob_name(download_path), download_path)
env.run_task(telescope.transform.__name__)
self.assertEqual(1, len(release.transform_files))
transform_path = release.transform_files[0]
expected_file_hash = '97a86394'
self.assert_file_integrity(transform_path, expected_file_hash, 'gzip_crc')
env.run_task(telescope.upload_transformed.__name__)
self.assert_blob_integrity(env.transform_bucket, blob_name(transform_path), transform_path)
ti = env.run_task(telescope.bq_load_partition.__name__)
self.assertEqual(ti.state, 'skipped')
with patch('observatory.platform.utils.gc_utils.bq_query_bytes_daily_limit_check'):
ti = env.run_task(telescope.bq_delete_old.__name__)
self.assertEqual(ti.state, 'skipped')
env.run_task(telescope.bq_append_new.__name__)
(main_table_id, partition_table_id) = table_ids_from_path(transform_path)
table_id = f'{self.project_id}.{telescope.dataset_id}.{main_table_id}'
expected_rows = 4
self.assert_table_integrity(table_id, expected_rows)
(download_folder, extract_folder, transform_folder) = (release.download_folder, release.extract_folder, release.transform_folder)
env.run_task(telescope.cleanup.__name__)
self.assert_cleanup(download_folder, extract_folder, transform_folder)
with env.create_dag_run(dag, self.second_execution_date) as m_dag_run:
env.run_task(telescope.check_dependencies.__name__)
(start_date, end_date, first_release) = telescope.get_release_info(execution_date=self.second_execution_date, dag_run=m_dag_run, dag=dag, next_execution_date=pendulum.datetime(2021, 4, 1))
self.assertEqual((release.end_date + timedelta(days=1)), start_date)
self.assertEqual((pendulum.today('UTC') - timedelta(days=1)), end_date)
self.assertFalse(first_release)
release = DoabRelease(telescope.dag_id, start_date, end_date, first_release)
with httpretty.enabled():
self.setup_mock_file_download(DoabTelescope.CSV_URL, self.second_download_path)
env.run_task(telescope.download.__name__)
self.assertEqual(1, len(release.download_files))
download_path = release.download_files[0]
expected_file_hash = get_file_hash(file_path=self.second_download_path, algorithm='md5')
self.assert_file_integrity(download_path, expected_file_hash, 'md5')
env.run_task(telescope.upload_downloaded.__name__)
self.assert_blob_integrity(env.download_bucket, blob_name(download_path), download_path)
env.run_task(telescope.transform.__name__)
self.assertEqual(1, len(release.transform_files))
transform_path = release.transform_files[0]
expected_file_hash = '19f6ba1e'
self.assert_file_integrity(transform_path, expected_file_hash, 'gzip_crc')
env.run_task(telescope.upload_transformed.__name__)
self.assert_blob_integrity(env.transform_bucket, blob_name(transform_path), transform_path)
env.run_task(telescope.bq_load_partition.__name__)
(main_table_id, partition_table_id) = table_ids_from_path(transform_path)
table_id = create_date_table_id(partition_table_id, release.end_date, bigquery.TimePartitioningType.DAY)
table_id = f'{self.project_id}.{telescope.dataset_id}.{table_id}'
expected_rows = 4
self.assert_table_integrity(table_id, expected_rows)
with patch('observatory.platform.utils.gc_utils.bq_query_bytes_daily_limit_check'):
env.run_task(telescope.bq_delete_old.__name__)
table_id = f'{self.project_id}.{telescope.dataset_id}.{main_table_id}'
expected_rows = 3
self.assert_table_integrity(table_id, expected_rows)
env.run_task(telescope.bq_append_new.__name__)
table_id = f'{self.project_id}.{telescope.dataset_id}.{main_table_id}'
expected_rows = 7
self.assert_table_integrity(table_id, expected_rows)
(download_folder, extract_folder, transform_folder) = (release.download_folder, release.extract_folder, release.transform_folder)
env.run_task(telescope.cleanup.__name__)
self.assert_cleanup(download_folder, extract_folder, transform_folder) | def test_telescope(self):
'Test the DOAB telescope end to end.\n :return: None.\n '
env = ObservatoryEnvironment(self.project_id, self.data_location)
dataset_id = env.add_dataset()
telescope = DoabTelescope(dataset_id=dataset_id)
dag = telescope.make_dag()
with env.create():
with env.create_dag_run(dag, self.first_execution_date) as m_dagrun:
env.run_task(telescope.check_dependencies.__name__)
(start_date, end_date, first_release) = telescope.get_release_info(execution_date=self.first_execution_date, dag_run=m_dagrun, dag=dag, next_execution_date=pendulum.datetime(2021, 3, 1))
release = DoabRelease(telescope.dag_id, start_date, end_date, first_release)
with httpretty.enabled():
self.setup_mock_file_download(DoabTelescope.CSV_URL, self.first_download_path)
env.run_task(telescope.download.__name__)
self.assertEqual(1, len(release.download_files))
download_path = release.download_files[0]
expected_file_hash = get_file_hash(file_path=self.first_download_path, algorithm='md5')
self.assert_file_integrity(download_path, expected_file_hash, 'md5')
env.run_task(telescope.upload_downloaded.__name__)
self.assert_blob_integrity(env.download_bucket, blob_name(download_path), download_path)
env.run_task(telescope.transform.__name__)
self.assertEqual(1, len(release.transform_files))
transform_path = release.transform_files[0]
expected_file_hash = '97a86394'
self.assert_file_integrity(transform_path, expected_file_hash, 'gzip_crc')
env.run_task(telescope.upload_transformed.__name__)
self.assert_blob_integrity(env.transform_bucket, blob_name(transform_path), transform_path)
ti = env.run_task(telescope.bq_load_partition.__name__)
self.assertEqual(ti.state, 'skipped')
with patch('observatory.platform.utils.gc_utils.bq_query_bytes_daily_limit_check'):
ti = env.run_task(telescope.bq_delete_old.__name__)
self.assertEqual(ti.state, 'skipped')
env.run_task(telescope.bq_append_new.__name__)
(main_table_id, partition_table_id) = table_ids_from_path(transform_path)
table_id = f'{self.project_id}.{telescope.dataset_id}.{main_table_id}'
expected_rows = 4
self.assert_table_integrity(table_id, expected_rows)
(download_folder, extract_folder, transform_folder) = (release.download_folder, release.extract_folder, release.transform_folder)
env.run_task(telescope.cleanup.__name__)
self.assert_cleanup(download_folder, extract_folder, transform_folder)
with env.create_dag_run(dag, self.second_execution_date) as m_dag_run:
env.run_task(telescope.check_dependencies.__name__)
(start_date, end_date, first_release) = telescope.get_release_info(execution_date=self.second_execution_date, dag_run=m_dag_run, dag=dag, next_execution_date=pendulum.datetime(2021, 4, 1))
self.assertEqual((release.end_date + timedelta(days=1)), start_date)
self.assertEqual((pendulum.today('UTC') - timedelta(days=1)), end_date)
self.assertFalse(first_release)
release = DoabRelease(telescope.dag_id, start_date, end_date, first_release)
with httpretty.enabled():
self.setup_mock_file_download(DoabTelescope.CSV_URL, self.second_download_path)
env.run_task(telescope.download.__name__)
self.assertEqual(1, len(release.download_files))
download_path = release.download_files[0]
expected_file_hash = get_file_hash(file_path=self.second_download_path, algorithm='md5')
self.assert_file_integrity(download_path, expected_file_hash, 'md5')
env.run_task(telescope.upload_downloaded.__name__)
self.assert_blob_integrity(env.download_bucket, blob_name(download_path), download_path)
env.run_task(telescope.transform.__name__)
self.assertEqual(1, len(release.transform_files))
transform_path = release.transform_files[0]
expected_file_hash = '19f6ba1e'
self.assert_file_integrity(transform_path, expected_file_hash, 'gzip_crc')
env.run_task(telescope.upload_transformed.__name__)
self.assert_blob_integrity(env.transform_bucket, blob_name(transform_path), transform_path)
env.run_task(telescope.bq_load_partition.__name__)
(main_table_id, partition_table_id) = table_ids_from_path(transform_path)
table_id = create_date_table_id(partition_table_id, release.end_date, bigquery.TimePartitioningType.DAY)
table_id = f'{self.project_id}.{telescope.dataset_id}.{table_id}'
expected_rows = 4
self.assert_table_integrity(table_id, expected_rows)
with patch('observatory.platform.utils.gc_utils.bq_query_bytes_daily_limit_check'):
env.run_task(telescope.bq_delete_old.__name__)
table_id = f'{self.project_id}.{telescope.dataset_id}.{main_table_id}'
expected_rows = 3
self.assert_table_integrity(table_id, expected_rows)
env.run_task(telescope.bq_append_new.__name__)
table_id = f'{self.project_id}.{telescope.dataset_id}.{main_table_id}'
expected_rows = 7
self.assert_table_integrity(table_id, expected_rows)
(download_folder, extract_folder, transform_folder) = (release.download_folder, release.extract_folder, release.transform_folder)
env.run_task(telescope.cleanup.__name__)
self.assert_cleanup(download_folder, extract_folder, transform_folder)<|docstring|>Test the DOAB telescope end to end.
:return: None.<|endoftext|> |
68dcb955c07c3025215c3fd59a4919773d0eea7e1dfa359d1eb30ac61f04d603 | def test_airflow_vars(self):
'Cover case when airflow_vars is given.'
telescope = DoabTelescope(airflow_vars=[AirflowVars.DOWNLOAD_BUCKET])
self.assertEqual(set(telescope.airflow_vars), {AirflowVars.DOWNLOAD_BUCKET, AirflowVars.TRANSFORM_BUCKET}) | Cover case when airflow_vars is given. | oaebu_workflows/workflows/tests/test_doab_telescope.py | test_airflow_vars | The-Academic-Observatory/oaebu-workflows | 2 | python | def test_airflow_vars(self):
telescope = DoabTelescope(airflow_vars=[AirflowVars.DOWNLOAD_BUCKET])
self.assertEqual(set(telescope.airflow_vars), {AirflowVars.DOWNLOAD_BUCKET, AirflowVars.TRANSFORM_BUCKET}) | def test_airflow_vars(self):
telescope = DoabTelescope(airflow_vars=[AirflowVars.DOWNLOAD_BUCKET])
self.assertEqual(set(telescope.airflow_vars), {AirflowVars.DOWNLOAD_BUCKET, AirflowVars.TRANSFORM_BUCKET})<|docstring|>Cover case when airflow_vars is given.<|endoftext|> |
eaa039273c2a812fe1556e5b7df5722c2a817d8db2043dd6d383634adac08475 | @patch('observatory.platform.utils.workflow_utils.Variable.get')
def test_download(self, mock_variable_get):
"Download release and check exception is raised when response is not 200 or csv is empty.\n\n :param mock_variable_get: Mock result of airflow's Variable.get() function\n :return:\n "
start_date = pendulum.datetime(2020, 1, 1)
end_date = pendulum.datetime(2020, 1, 31)
release = DoabRelease('doab', start_date, end_date, False)
with CliRunner().isolated_filesystem():
mock_variable_get.return_value = 'data'
with httpretty.enabled():
httpretty.register_uri(httpretty.GET, DoabTelescope.CSV_URL, status=400)
with self.assertRaises(AirflowException):
release.download()
with httpretty.enabled():
empty_csv = 'Column1,Column2'
httpretty.register_uri(httpretty.GET, DoabTelescope.CSV_URL, body=empty_csv)
with self.assertRaises(AirflowException):
release.download() | Download release and check exception is raised when response is not 200 or csv is empty.
:param mock_variable_get: Mock result of airflow's Variable.get() function
:return: | oaebu_workflows/workflows/tests/test_doab_telescope.py | test_download | The-Academic-Observatory/oaebu-workflows | 2 | python | @patch('observatory.platform.utils.workflow_utils.Variable.get')
def test_download(self, mock_variable_get):
"Download release and check exception is raised when response is not 200 or csv is empty.\n\n :param mock_variable_get: Mock result of airflow's Variable.get() function\n :return:\n "
start_date = pendulum.datetime(2020, 1, 1)
end_date = pendulum.datetime(2020, 1, 31)
release = DoabRelease('doab', start_date, end_date, False)
with CliRunner().isolated_filesystem():
mock_variable_get.return_value = 'data'
with httpretty.enabled():
httpretty.register_uri(httpretty.GET, DoabTelescope.CSV_URL, status=400)
with self.assertRaises(AirflowException):
release.download()
with httpretty.enabled():
empty_csv = 'Column1,Column2'
httpretty.register_uri(httpretty.GET, DoabTelescope.CSV_URL, body=empty_csv)
with self.assertRaises(AirflowException):
release.download() | @patch('observatory.platform.utils.workflow_utils.Variable.get')
def test_download(self, mock_variable_get):
"Download release and check exception is raised when response is not 200 or csv is empty.\n\n :param mock_variable_get: Mock result of airflow's Variable.get() function\n :return:\n "
start_date = pendulum.datetime(2020, 1, 1)
end_date = pendulum.datetime(2020, 1, 31)
release = DoabRelease('doab', start_date, end_date, False)
with CliRunner().isolated_filesystem():
mock_variable_get.return_value = 'data'
with httpretty.enabled():
httpretty.register_uri(httpretty.GET, DoabTelescope.CSV_URL, status=400)
with self.assertRaises(AirflowException):
release.download()
with httpretty.enabled():
empty_csv = 'Column1,Column2'
httpretty.register_uri(httpretty.GET, DoabTelescope.CSV_URL, body=empty_csv)
with self.assertRaises(AirflowException):
release.download()<|docstring|>Download release and check exception is raised when response is not 200 or csv is empty.
:param mock_variable_get: Mock result of airflow's Variable.get() function
:return:<|endoftext|> |
1f03ed0eab93ea4092337f8eaf32a4a095d864c68c7f2600d148ce18223e5f1b | def test_transform_dict(self):
'Check transform_dict handling of invalid case.'
nested_fields = ['dc.subject.classification']
list_fields = ['dc.subject.classification', 'dc.date.issued', 'BITSTREAM ISBN']
test_dict = {'field1': [{'1': 'value1'}, '2'], 'field2': None, 'dc.subject.classification': 'value1||value2', 'dc.date.issued': '0000-01-01', 'BITSTREAM ISBN': '123-5521-4521'}
transformed_dict = {'field1': [{'1': 'value1'}, '2'], 'dc': {'subject': {'classification': {'value': ['value1', 'value2']}}}, 'dc_date_issued': [], 'BITSTREAM_ISBN': ['12355214521']}
result = transform_dict(test_dict, convert, nested_fields, list_fields)
self.assertDictEqual(result, transformed_dict) | Check transform_dict handling of invalid case. | oaebu_workflows/workflows/tests/test_doab_telescope.py | test_transform_dict | The-Academic-Observatory/oaebu-workflows | 2 | python | def test_transform_dict(self):
nested_fields = ['dc.subject.classification']
list_fields = ['dc.subject.classification', 'dc.date.issued', 'BITSTREAM ISBN']
test_dict = {'field1': [{'1': 'value1'}, '2'], 'field2': None, 'dc.subject.classification': 'value1||value2', 'dc.date.issued': '0000-01-01', 'BITSTREAM ISBN': '123-5521-4521'}
transformed_dict = {'field1': [{'1': 'value1'}, '2'], 'dc': {'subject': {'classification': {'value': ['value1', 'value2']}}}, 'dc_date_issued': [], 'BITSTREAM_ISBN': ['12355214521']}
result = transform_dict(test_dict, convert, nested_fields, list_fields)
self.assertDictEqual(result, transformed_dict) | def test_transform_dict(self):
nested_fields = ['dc.subject.classification']
list_fields = ['dc.subject.classification', 'dc.date.issued', 'BITSTREAM ISBN']
test_dict = {'field1': [{'1': 'value1'}, '2'], 'field2': None, 'dc.subject.classification': 'value1||value2', 'dc.date.issued': '0000-01-01', 'BITSTREAM ISBN': '123-5521-4521'}
transformed_dict = {'field1': [{'1': 'value1'}, '2'], 'dc': {'subject': {'classification': {'value': ['value1', 'value2']}}}, 'dc_date_issued': [], 'BITSTREAM_ISBN': ['12355214521']}
result = transform_dict(test_dict, convert, nested_fields, list_fields)
self.assertDictEqual(result, transformed_dict)<|docstring|>Check transform_dict handling of invalid case.<|endoftext|> |
4b44da72b93008c7167410ab110ed626b250fbc8173d2052555caa1d7df6ba91 | @tf.autograph.experimental.do_not_convert
def normalize_img(image, label):
'Normalizes images: `uint8` -> `float32`.'
return ((tf.cast(image, tf.float32) / 255.0), label) | Normalizes images: `uint8` -> `float32`. | tests/test_custom_layers.py | normalize_img | saugatkandel/cvnn | 38 | python | @tf.autograph.experimental.do_not_convert
def normalize_img(image, label):
return ((tf.cast(image, tf.float32) / 255.0), label) | @tf.autograph.experimental.do_not_convert
def normalize_img(image, label):
return ((tf.cast(image, tf.float32) / 255.0), label)<|docstring|>Normalizes images: `uint8` -> `float32`.<|endoftext|> |
843fd8c71699edfe2e506d4cff7bd9a9f4934c1340844bda682ba58004878be8 | def pre_processing(self):
'\n Process local run dictionary to create the run directory and input file.\n If clean_restart is True the clean_run method is called before the run.\n Call the :meth:`copy_source_dir` that manages the source folder,\n if provided.\n\n '
run_dir = self.run_options.get('run_dir', '.')
input = self.run_options.get('input')
name = self.run_options.get('name', 'default')
skip = self.run_options.get('skip')
clean_restart = self.run_options.get('clean_restart')
verbose = self.run_options.get('verbose')
self._ensure_run_directory()
if (input is not None):
input.write((os.path.join(run_dir, name) + '.in'))
else:
print('input not provided')
if (not skip):
if clean_restart:
self.clean_run()
elif verbose:
print('run performed starting from existing results')
self.copy_source_dir()
return {} | Process local run dictionary to create the run directory and input file.
If clean_restart is True the clean_run method is called before the run.
Call the :meth:`copy_source_dir` that manages the source folder,
if provided. | mppi/Calculators/QeCalculator.py | pre_processing | marcodalessandro76/MPPI | 1 | python | def pre_processing(self):
'\n Process local run dictionary to create the run directory and input file.\n If clean_restart is True the clean_run method is called before the run.\n Call the :meth:`copy_source_dir` that manages the source folder,\n if provided.\n\n '
run_dir = self.run_options.get('run_dir', '.')
input = self.run_options.get('input')
name = self.run_options.get('name', 'default')
skip = self.run_options.get('skip')
clean_restart = self.run_options.get('clean_restart')
verbose = self.run_options.get('verbose')
self._ensure_run_directory()
if (input is not None):
input.write((os.path.join(run_dir, name) + '.in'))
else:
print('input not provided')
if (not skip):
if clean_restart:
self.clean_run()
elif verbose:
print('run performed starting from existing results')
self.copy_source_dir()
return {} | def pre_processing(self):
'\n Process local run dictionary to create the run directory and input file.\n If clean_restart is True the clean_run method is called before the run.\n Call the :meth:`copy_source_dir` that manages the source folder,\n if provided.\n\n '
run_dir = self.run_options.get('run_dir', '.')
input = self.run_options.get('input')
name = self.run_options.get('name', 'default')
skip = self.run_options.get('skip')
clean_restart = self.run_options.get('clean_restart')
verbose = self.run_options.get('verbose')
self._ensure_run_directory()
if (input is not None):
input.write((os.path.join(run_dir, name) + '.in'))
else:
print('input not provided')
if (not skip):
if clean_restart:
self.clean_run()
elif verbose:
print('run performed starting from existing results')
self.copy_source_dir()
return {}<|docstring|>Process local run dictionary to create the run directory and input file.
If clean_restart is True the clean_run method is called before the run.
Call the :meth:`copy_source_dir` that manages the source folder,
if provided.<|endoftext|> |
2b3a09f24069ce792360d8d21010229479334ad267190f766ff7ff481dc87173 | def process_run(self):
'\n Method associated to the running of the executable. The method runs the computation\n and wait the end of the computation before passing to the :meth:`post_processing` method.\n\n '
to_run = self.is_to_run()
if to_run:
job = self.run_job()
self.wait(job)
return {} | Method associated to the running of the executable. The method runs the computation
and wait the end of the computation before passing to the :meth:`post_processing` method. | mppi/Calculators/QeCalculator.py | process_run | marcodalessandro76/MPPI | 1 | python | def process_run(self):
'\n Method associated to the running of the executable. The method runs the computation\n and wait the end of the computation before passing to the :meth:`post_processing` method.\n\n '
to_run = self.is_to_run()
if to_run:
job = self.run_job()
self.wait(job)
return {} | def process_run(self):
'\n Method associated to the running of the executable. The method runs the computation\n and wait the end of the computation before passing to the :meth:`post_processing` method.\n\n '
to_run = self.is_to_run()
if to_run:
job = self.run_job()
self.wait(job)
return {}<|docstring|>Method associated to the running of the executable. The method runs the computation
and wait the end of the computation before passing to the :meth:`post_processing` method.<|endoftext|> |
85b3a4908558e6ad40a8349dd09a12e1f9c3938d0174397c3698c2899b5ab04d | def post_processing(self):
'\n Return the name, including the path, of the data-file-schema.xml file. If the file is absent the\n method displays a warning.\n\n Return:\n :py:class:`string` : name, including the path, of the xml data-file-schema file\n\n '
input = self.run_options.get('input')
prefix = input.get_prefix()
out_dir = self._get_outdir_path()
save_dir = (os.path.join(out_dir, prefix) + '.save')
result = os.path.join(save_dir, 'data-file-schema.xml')
if (not os.path.isfile(result)):
print(('Expected file %s not found' % result))
print('\n Check if wait_end_run is False or the dry_run option is active.\n Otherwise a possible error has occured during the computation')
return result | Return the name, including the path, of the data-file-schema.xml file. If the file is absent the
method displays a warning.
Return:
:py:class:`string` : name, including the path, of the xml data-file-schema file | mppi/Calculators/QeCalculator.py | post_processing | marcodalessandro76/MPPI | 1 | python | def post_processing(self):
'\n Return the name, including the path, of the data-file-schema.xml file. If the file is absent the\n method displays a warning.\n\n Return:\n :py:class:`string` : name, including the path, of the xml data-file-schema file\n\n '
input = self.run_options.get('input')
prefix = input.get_prefix()
out_dir = self._get_outdir_path()
save_dir = (os.path.join(out_dir, prefix) + '.save')
result = os.path.join(save_dir, 'data-file-schema.xml')
if (not os.path.isfile(result)):
print(('Expected file %s not found' % result))
print('\n Check if wait_end_run is False or the dry_run option is active.\n Otherwise a possible error has occured during the computation')
return result | def post_processing(self):
'\n Return the name, including the path, of the data-file-schema.xml file. If the file is absent the\n method displays a warning.\n\n Return:\n :py:class:`string` : name, including the path, of the xml data-file-schema file\n\n '
input = self.run_options.get('input')
prefix = input.get_prefix()
out_dir = self._get_outdir_path()
save_dir = (os.path.join(out_dir, prefix) + '.save')
result = os.path.join(save_dir, 'data-file-schema.xml')
if (not os.path.isfile(result)):
print(('Expected file %s not found' % result))
print('\n Check if wait_end_run is False or the dry_run option is active.\n Otherwise a possible error has occured during the computation')
return result<|docstring|>Return the name, including the path, of the data-file-schema.xml file. If the file is absent the
method displays a warning.
Return:
:py:class:`string` : name, including the path, of the xml data-file-schema file<|endoftext|> |
094a9a5ab7a310504ceb6d9dcaee24b3a33d75b5d02805249ff3266246c5e0de | def is_to_run(self):
'\n The method evaluates if the computation can be skipped. This is done by\n checking if the file $prefix.xml is already present in the out_dir.\n\n Return:\n :py:class:`bool` : the boolean is True if the computation needs to be run\n\n '
skip = self.run_options.get('skip')
name = (self.run_options.get('name', 'default') + '.in')
input = self.run_options['input']
prefix = input.get_prefix()
out_dir = self._get_outdir_path()
skipfile = (os.path.join(out_dir, prefix) + '.xml')
verbose = self.run_options.get('verbose')
if (not skip):
return True
elif os.path.isfile(skipfile):
if verbose:
print('Skip the run of the input file', name)
return False
else:
return True | The method evaluates if the computation can be skipped. This is done by
checking if the file $prefix.xml is already present in the out_dir.
Return:
:py:class:`bool` : the boolean is True if the computation needs to be run | mppi/Calculators/QeCalculator.py | is_to_run | marcodalessandro76/MPPI | 1 | python | def is_to_run(self):
'\n The method evaluates if the computation can be skipped. This is done by\n checking if the file $prefix.xml is already present in the out_dir.\n\n Return:\n :py:class:`bool` : the boolean is True if the computation needs to be run\n\n '
skip = self.run_options.get('skip')
name = (self.run_options.get('name', 'default') + '.in')
input = self.run_options['input']
prefix = input.get_prefix()
out_dir = self._get_outdir_path()
skipfile = (os.path.join(out_dir, prefix) + '.xml')
verbose = self.run_options.get('verbose')
if (not skip):
return True
elif os.path.isfile(skipfile):
if verbose:
print('Skip the run of the input file', name)
return False
else:
return True | def is_to_run(self):
'\n The method evaluates if the computation can be skipped. This is done by\n checking if the file $prefix.xml is already present in the out_dir.\n\n Return:\n :py:class:`bool` : the boolean is True if the computation needs to be run\n\n '
skip = self.run_options.get('skip')
name = (self.run_options.get('name', 'default') + '.in')
input = self.run_options['input']
prefix = input.get_prefix()
out_dir = self._get_outdir_path()
skipfile = (os.path.join(out_dir, prefix) + '.xml')
verbose = self.run_options.get('verbose')
if (not skip):
return True
elif os.path.isfile(skipfile):
if verbose:
print('Skip the run of the input file', name)
return False
else:
return True<|docstring|>The method evaluates if the computation can be skipped. This is done by
checking if the file $prefix.xml is already present in the out_dir.
Return:
:py:class:`bool` : the boolean is True if the computation needs to be run<|endoftext|> |
ba01690c4e5f332030fc8434051fc9b10a1d0e9b707d10d8ad8abd518a6844f1 | def run_job(self):
'\n Run the computation. The operations performed depend on the scheduler adopted.\n If the dry_run option is enabled the run is not performed but the slurm script\n is written on disk.\n\n Return:\n The type of the object depends on the chosen scheduler. For scheduler `direct`\n job is an instance of Popen, while for `slurm` scheduler job is the name of the\n slurm script.\n\n '
from subprocess import Popen
run_dir = self.run_options.get('run_dir', '.')
scheduler = self.run_options['scheduler']
dry_run = self.run_options.get('dry_run')
verbose = self.run_options.get('verbose')
if (scheduler == 'direct'):
os.environ['OMP_NUM_THREADS'] = str(self.run_options['omp'])
if (not dry_run):
comm_str = ('cd %s ; %s' % (run_dir, self.run_command()))
job = Popen(comm_str, shell=True)
else:
job = None
if verbose:
print('Dry_run option active. Computation not performed')
elif (scheduler == 'slurm'):
job = self.build_slurm_script()
if (not dry_run):
slurm_submit = ('cd %s ; sbatch %s.sh' % (run_dir, job))
if verbose:
print('slurm submit: ', slurm_submit)
slurm_run = Popen(slurm_submit, shell=True)
elif verbose:
print('Dry_run option active. Script not submitted')
else:
print('scheduler unknown')
return job | Run the computation. The operations performed depend on the scheduler adopted.
If the dry_run option is enabled the run is not performed but the slurm script
is written on disk.
Return:
The type of the object depends on the chosen scheduler. For scheduler `direct`
job is an instance of Popen, while for `slurm` scheduler job is the name of the
slurm script. | mppi/Calculators/QeCalculator.py | run_job | marcodalessandro76/MPPI | 1 | python | def run_job(self):
'\n Run the computation. The operations performed depend on the scheduler adopted.\n If the dry_run option is enabled the run is not performed but the slurm script\n is written on disk.\n\n Return:\n The type of the object depends on the chosen scheduler. For scheduler `direct`\n job is an instance of Popen, while for `slurm` scheduler job is the name of the\n slurm script.\n\n '
from subprocess import Popen
run_dir = self.run_options.get('run_dir', '.')
scheduler = self.run_options['scheduler']
dry_run = self.run_options.get('dry_run')
verbose = self.run_options.get('verbose')
if (scheduler == 'direct'):
os.environ['OMP_NUM_THREADS'] = str(self.run_options['omp'])
if (not dry_run):
comm_str = ('cd %s ; %s' % (run_dir, self.run_command()))
job = Popen(comm_str, shell=True)
else:
job = None
if verbose:
print('Dry_run option active. Computation not performed')
elif (scheduler == 'slurm'):
job = self.build_slurm_script()
if (not dry_run):
slurm_submit = ('cd %s ; sbatch %s.sh' % (run_dir, job))
if verbose:
print('slurm submit: ', slurm_submit)
slurm_run = Popen(slurm_submit, shell=True)
elif verbose:
print('Dry_run option active. Script not submitted')
else:
print('scheduler unknown')
return job | def run_job(self):
'\n Run the computation. The operations performed depend on the scheduler adopted.\n If the dry_run option is enabled the run is not performed but the slurm script\n is written on disk.\n\n Return:\n The type of the object depends on the chosen scheduler. For scheduler `direct`\n job is an instance of Popen, while for `slurm` scheduler job is the name of the\n slurm script.\n\n '
from subprocess import Popen
run_dir = self.run_options.get('run_dir', '.')
scheduler = self.run_options['scheduler']
dry_run = self.run_options.get('dry_run')
verbose = self.run_options.get('verbose')
if (scheduler == 'direct'):
os.environ['OMP_NUM_THREADS'] = str(self.run_options['omp'])
if (not dry_run):
comm_str = ('cd %s ; %s' % (run_dir, self.run_command()))
job = Popen(comm_str, shell=True)
else:
job = None
if verbose:
print('Dry_run option active. Computation not performed')
elif (scheduler == 'slurm'):
job = self.build_slurm_script()
if (not dry_run):
slurm_submit = ('cd %s ; sbatch %s.sh' % (run_dir, job))
if verbose:
print('slurm submit: ', slurm_submit)
slurm_run = Popen(slurm_submit, shell=True)
elif verbose:
print('Dry_run option active. Script not submitted')
else:
print('scheduler unknown')
return job<|docstring|>Run the computation. The operations performed depend on the scheduler adopted.
If the dry_run option is enabled the run is not performed but the slurm script
is written on disk.
Return:
The type of the object depends on the chosen scheduler. For scheduler `direct`
job is an instance of Popen, while for `slurm` scheduler job is the name of the
slurm script.<|endoftext|> |
9292f58debf3be8ec0aa6d5a644672372f9830b2cd679782806068c28f47ed05 | def wait(self, job):
'\n Wait the end of the job. If the dry_run option is enabled or wait_end_run is False\n the check is not performed.\n\n Args:\n jobs : The reference to the job to be executed. If the scheduler is `direct`\n jobs is an instance of Popen of the :py:class:subprocess package. If the\n scheduler is `slurm` jobs is a string with the name of the slurm script\n\n '
import time
dry_run = self.run_options.get('dry_run')
wait_end_run = self.run_options.get('wait_end_run')
name = self.run_options.get('name', 'default')
verbose = self.run_options.get('verbose')
delay = 1
if ((wait_end_run is True) and (dry_run is False)):
message_written = False
while (not self.run_ended(job)):
if (not message_written):
if verbose:
print(('computation %s is running...' % name))
message_written = True
time.sleep(delay)
if verbose:
print(('computation %s ended' % name))
elif verbose:
print('The wait_end_run is False or the dry_run option is active. The calculator proceedes to the postprocessing') | Wait the end of the job. If the dry_run option is enabled or wait_end_run is False
the check is not performed.
Args:
jobs : The reference to the job to be executed. If the scheduler is `direct`
jobs is an instance of Popen of the :py:class:subprocess package. If the
scheduler is `slurm` jobs is a string with the name of the slurm script | mppi/Calculators/QeCalculator.py | wait | marcodalessandro76/MPPI | 1 | python | def wait(self, job):
'\n Wait the end of the job. If the dry_run option is enabled or wait_end_run is False\n the check is not performed.\n\n Args:\n jobs : The reference to the job to be executed. If the scheduler is `direct`\n jobs is an instance of Popen of the :py:class:subprocess package. If the\n scheduler is `slurm` jobs is a string with the name of the slurm script\n\n '
import time
dry_run = self.run_options.get('dry_run')
wait_end_run = self.run_options.get('wait_end_run')
name = self.run_options.get('name', 'default')
verbose = self.run_options.get('verbose')
delay = 1
if ((wait_end_run is True) and (dry_run is False)):
message_written = False
while (not self.run_ended(job)):
if (not message_written):
if verbose:
print(('computation %s is running...' % name))
message_written = True
time.sleep(delay)
if verbose:
print(('computation %s ended' % name))
elif verbose:
print('The wait_end_run is False or the dry_run option is active. The calculator proceedes to the postprocessing') | def wait(self, job):
'\n Wait the end of the job. If the dry_run option is enabled or wait_end_run is False\n the check is not performed.\n\n Args:\n jobs : The reference to the job to be executed. If the scheduler is `direct`\n jobs is an instance of Popen of the :py:class:subprocess package. If the\n scheduler is `slurm` jobs is a string with the name of the slurm script\n\n '
import time
dry_run = self.run_options.get('dry_run')
wait_end_run = self.run_options.get('wait_end_run')
name = self.run_options.get('name', 'default')
verbose = self.run_options.get('verbose')
delay = 1
if ((wait_end_run is True) and (dry_run is False)):
message_written = False
while (not self.run_ended(job)):
if (not message_written):
if verbose:
print(('computation %s is running...' % name))
message_written = True
time.sleep(delay)
if verbose:
print(('computation %s ended' % name))
elif verbose:
print('The wait_end_run is False or the dry_run option is active. The calculator proceedes to the postprocessing')<|docstring|>Wait the end of the job. If the dry_run option is enabled or wait_end_run is False
the check is not performed.
Args:
jobs : The reference to the job to be executed. If the scheduler is `direct`
jobs is an instance of Popen of the :py:class:subprocess package. If the
scheduler is `slurm` jobs is a string with the name of the slurm script<|endoftext|> |
5eb786942340997fb2766659cdcaa4081c6d8278174ba4e11b33ad3b7ab26787 | def build_slurm_script(self):
'\n Create the slurm script associated to the run.\n\n Return:\n :py:class:`string`: string with the name of the slurm script\n\n '
omp = self.run_options.get('omp')
mpi = self.run_options.get('mpi')
input = self.run_options.get('input')
prefix = input.get_prefix()
out_dir = input.get_outdir()
name = self.run_options.get('name', 'default')
run_dir = self.run_options.get('run_dir', '.')
out_dir_path = self._get_outdir_path()
save_dir = (os.path.join(out_dir_path, prefix) + '.save')
job = ('job_' + name)
sbatch_options = self.run_options.get('sbatch_options')
activate_BeeOND = self.run_options.get('activate_BeeOND')
comm_str = self.run_command()
lines = []
lines.append('#!/bin/bash')
lines.append(('#SBATCH --ntasks=%s ### Number of tasks (MPI processes)' % mpi))
lines.append(('#SBATCH --cpus-per-task=%s ### Number of threads per task (OMP threads)' % omp))
for option in sbatch_options:
lines.append(('#SBATCH %s' % option))
lines.append(('#SBATCH --output=%s.out' % job))
lines.append('')
lines.append('export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK')
lines.append(('export OUT_DIR=%s' % out_dir))
lines.append(('export BEEOND_DIR=%s' % self.BeeOND_dir))
lines.append(('export OUT_DIR_PATH=%s' % out_dir_path))
lines.append(('export SAVE_DIR=%s' % save_dir))
lines.append('')
lines.append('echo "Cluster name $SLURM_CLUSTER_NAME"')
lines.append('echo "Job name $SLURM_JOB_NAME "')
lines.append('echo "Job id $SLURM_JOB_ID"')
lines.append('echo "Job nodelist $SLURM_JOB_NODELIST"')
lines.append('echo "Number of nodes $SLURM_JOB_NUM_NODES"')
lines.append('echo "Number of mpi $SLURM_NTASKS"')
lines.append('echo "Number of threads per task $SLURM_CPUS_PER_TASK"')
lines.append('echo "OUT_DIR input parameter is $OUT_DIR"')
lines.append('echo "BEEOND_DIR path is $BEEOND_DIR"')
lines.append('echo "OUT_DIR path is $OUT_DIR_PATH"')
lines.append('echo "SAVE_DIR path is $SAVE_DIR"')
lines.append('echo " "')
lines.append('')
if activate_BeeOND:
lines.append('echo "THe BeeOND option is activated. The I/O is performed in $BEEOND_DIR"')
lines.append('if [ ! -d $BEEOND_DIR ]; then')
lines.append('echo "$BEEOND_DIR not found!"')
lines.append('exit')
lines.append('fi')
lines.append('echo " "')
lines.append('')
lines.append('echo "Change the outdir key of the input from $OUT_DIR to $BEEOND_DIR"')
lines.append(('sed -i "/outdir/s:%s:%s:" %s.in' % (out_dir, self.BeeOND_dir, name)))
lines.append('echo " "')
lines.append('')
if os.path.isdir(save_dir):
lines.append('echo "found SAVE_DIR folder $SAVE_DIR. Copy the SAVE_DIR in the $BEEOND_DIR folder"')
lines.append('echo "rsync -azv $SAVE_DIR $BEEOND_DIR"')
lines.append('rsync -azv $SAVE_DIR $BEEOND_DIR')
lines.append('echo " "')
lines.append('')
lines.append(('echo "execute : %s"' % comm_str))
lines.append(comm_str)
lines.append('echo " "')
lines.append('')
if activate_BeeOND:
lines.append('echo "Change the outdir key of the input to its original value $OUT_DIR"')
lines.append(('sed -i "/outdir/s:%s:%s:" %s.in' % (self.BeeOND_dir, out_dir, name)))
lines.append('echo "rsync -azv $BEEOND_DIR/ $OUT_DIR_PATH"')
lines.append('rsync -azv $BEEOND_DIR/ $OUT_DIR_PATH')
lines.append('echo " "')
lines.append('')
lines.append('echo "JOB_DONE"')
f = open(os.path.join(run_dir, (job + '.sh')), 'w')
f.write('\n'.join(lines))
f.close()
return job | Create the slurm script associated to the run.
Return:
:py:class:`string`: string with the name of the slurm script | mppi/Calculators/QeCalculator.py | build_slurm_script | marcodalessandro76/MPPI | 1 | python | def build_slurm_script(self):
'\n Create the slurm script associated to the run.\n\n Return:\n :py:class:`string`: string with the name of the slurm script\n\n '
omp = self.run_options.get('omp')
mpi = self.run_options.get('mpi')
input = self.run_options.get('input')
prefix = input.get_prefix()
out_dir = input.get_outdir()
name = self.run_options.get('name', 'default')
run_dir = self.run_options.get('run_dir', '.')
out_dir_path = self._get_outdir_path()
save_dir = (os.path.join(out_dir_path, prefix) + '.save')
job = ('job_' + name)
sbatch_options = self.run_options.get('sbatch_options')
activate_BeeOND = self.run_options.get('activate_BeeOND')
comm_str = self.run_command()
lines = []
lines.append('#!/bin/bash')
lines.append(('#SBATCH --ntasks=%s ### Number of tasks (MPI processes)' % mpi))
lines.append(('#SBATCH --cpus-per-task=%s ### Number of threads per task (OMP threads)' % omp))
for option in sbatch_options:
lines.append(('#SBATCH %s' % option))
lines.append(('#SBATCH --output=%s.out' % job))
lines.append()
lines.append('export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK')
lines.append(('export OUT_DIR=%s' % out_dir))
lines.append(('export BEEOND_DIR=%s' % self.BeeOND_dir))
lines.append(('export OUT_DIR_PATH=%s' % out_dir_path))
lines.append(('export SAVE_DIR=%s' % save_dir))
lines.append()
lines.append('echo "Cluster name $SLURM_CLUSTER_NAME"')
lines.append('echo "Job name $SLURM_JOB_NAME "')
lines.append('echo "Job id $SLURM_JOB_ID"')
lines.append('echo "Job nodelist $SLURM_JOB_NODELIST"')
lines.append('echo "Number of nodes $SLURM_JOB_NUM_NODES"')
lines.append('echo "Number of mpi $SLURM_NTASKS"')
lines.append('echo "Number of threads per task $SLURM_CPUS_PER_TASK"')
lines.append('echo "OUT_DIR input parameter is $OUT_DIR"')
lines.append('echo "BEEOND_DIR path is $BEEOND_DIR"')
lines.append('echo "OUT_DIR path is $OUT_DIR_PATH"')
lines.append('echo "SAVE_DIR path is $SAVE_DIR"')
lines.append('echo " "')
lines.append()
if activate_BeeOND:
lines.append('echo "THe BeeOND option is activated. The I/O is performed in $BEEOND_DIR"')
lines.append('if [ ! -d $BEEOND_DIR ]; then')
lines.append('echo "$BEEOND_DIR not found!"')
lines.append('exit')
lines.append('fi')
lines.append('echo " "')
lines.append()
lines.append('echo "Change the outdir key of the input from $OUT_DIR to $BEEOND_DIR"')
lines.append(('sed -i "/outdir/s:%s:%s:" %s.in' % (out_dir, self.BeeOND_dir, name)))
lines.append('echo " "')
lines.append()
if os.path.isdir(save_dir):
lines.append('echo "found SAVE_DIR folder $SAVE_DIR. Copy the SAVE_DIR in the $BEEOND_DIR folder"')
lines.append('echo "rsync -azv $SAVE_DIR $BEEOND_DIR"')
lines.append('rsync -azv $SAVE_DIR $BEEOND_DIR')
lines.append('echo " "')
lines.append()
lines.append(('echo "execute : %s"' % comm_str))
lines.append(comm_str)
lines.append('echo " "')
lines.append()
if activate_BeeOND:
lines.append('echo "Change the outdir key of the input to its original value $OUT_DIR"')
lines.append(('sed -i "/outdir/s:%s:%s:" %s.in' % (self.BeeOND_dir, out_dir, name)))
lines.append('echo "rsync -azv $BEEOND_DIR/ $OUT_DIR_PATH"')
lines.append('rsync -azv $BEEOND_DIR/ $OUT_DIR_PATH')
lines.append('echo " "')
lines.append()
lines.append('echo "JOB_DONE"')
f = open(os.path.join(run_dir, (job + '.sh')), 'w')
f.write('\n'.join(lines))
f.close()
return job | def build_slurm_script(self):
'\n Create the slurm script associated to the run.\n\n Return:\n :py:class:`string`: string with the name of the slurm script\n\n '
omp = self.run_options.get('omp')
mpi = self.run_options.get('mpi')
input = self.run_options.get('input')
prefix = input.get_prefix()
out_dir = input.get_outdir()
name = self.run_options.get('name', 'default')
run_dir = self.run_options.get('run_dir', '.')
out_dir_path = self._get_outdir_path()
save_dir = (os.path.join(out_dir_path, prefix) + '.save')
job = ('job_' + name)
sbatch_options = self.run_options.get('sbatch_options')
activate_BeeOND = self.run_options.get('activate_BeeOND')
comm_str = self.run_command()
lines = []
lines.append('#!/bin/bash')
lines.append(('#SBATCH --ntasks=%s ### Number of tasks (MPI processes)' % mpi))
lines.append(('#SBATCH --cpus-per-task=%s ### Number of threads per task (OMP threads)' % omp))
for option in sbatch_options:
lines.append(('#SBATCH %s' % option))
lines.append(('#SBATCH --output=%s.out' % job))
lines.append()
lines.append('export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK')
lines.append(('export OUT_DIR=%s' % out_dir))
lines.append(('export BEEOND_DIR=%s' % self.BeeOND_dir))
lines.append(('export OUT_DIR_PATH=%s' % out_dir_path))
lines.append(('export SAVE_DIR=%s' % save_dir))
lines.append()
lines.append('echo "Cluster name $SLURM_CLUSTER_NAME"')
lines.append('echo "Job name $SLURM_JOB_NAME "')
lines.append('echo "Job id $SLURM_JOB_ID"')
lines.append('echo "Job nodelist $SLURM_JOB_NODELIST"')
lines.append('echo "Number of nodes $SLURM_JOB_NUM_NODES"')
lines.append('echo "Number of mpi $SLURM_NTASKS"')
lines.append('echo "Number of threads per task $SLURM_CPUS_PER_TASK"')
lines.append('echo "OUT_DIR input parameter is $OUT_DIR"')
lines.append('echo "BEEOND_DIR path is $BEEOND_DIR"')
lines.append('echo "OUT_DIR path is $OUT_DIR_PATH"')
lines.append('echo "SAVE_DIR path is $SAVE_DIR"')
lines.append('echo " "')
lines.append()
if activate_BeeOND:
lines.append('echo "THe BeeOND option is activated. The I/O is performed in $BEEOND_DIR"')
lines.append('if [ ! -d $BEEOND_DIR ]; then')
lines.append('echo "$BEEOND_DIR not found!"')
lines.append('exit')
lines.append('fi')
lines.append('echo " "')
lines.append()
lines.append('echo "Change the outdir key of the input from $OUT_DIR to $BEEOND_DIR"')
lines.append(('sed -i "/outdir/s:%s:%s:" %s.in' % (out_dir, self.BeeOND_dir, name)))
lines.append('echo " "')
lines.append()
if os.path.isdir(save_dir):
lines.append('echo "found SAVE_DIR folder $SAVE_DIR. Copy the SAVE_DIR in the $BEEOND_DIR folder"')
lines.append('echo "rsync -azv $SAVE_DIR $BEEOND_DIR"')
lines.append('rsync -azv $SAVE_DIR $BEEOND_DIR')
lines.append('echo " "')
lines.append()
lines.append(('echo "execute : %s"' % comm_str))
lines.append(comm_str)
lines.append('echo " "')
lines.append()
if activate_BeeOND:
lines.append('echo "Change the outdir key of the input to its original value $OUT_DIR"')
lines.append(('sed -i "/outdir/s:%s:%s:" %s.in' % (self.BeeOND_dir, out_dir, name)))
lines.append('echo "rsync -azv $BEEOND_DIR/ $OUT_DIR_PATH"')
lines.append('rsync -azv $BEEOND_DIR/ $OUT_DIR_PATH')
lines.append('echo " "')
lines.append()
lines.append('echo "JOB_DONE"')
f = open(os.path.join(run_dir, (job + '.sh')), 'w')
f.write('\n'.join(lines))
f.close()
return job<|docstring|>Create the slurm script associated to the run.
Return:
:py:class:`string`: string with the name of the slurm script<|endoftext|> |
54ab9d8a6fb39364b3ef17da81f802182018b26801280e70dd463d60c72fe701 | def run_command(self):
'\n Define the run command used to run the computation.\n\n Return:\n :py:class:`string` : command that runs the computation\n\n '
executable = self.run_options.get('executable')
mpi = self.run_options.get('mpi')
mpi_run = self.run_options.get('mpi_run')
run_dir = self.run_options.get('run_dir', '.')
name = self.run_options.get('name', 'default')
verbose = self.run_options.get('verbose')
command = ((((mpi_run + ' ') + str(mpi)) + ' ') + executable)
input_name = (name + '.in')
output_name = (name + '.log')
comm_str = (command + (' -inp %s > %s' % (input_name, output_name)))
if verbose:
print(('run command: %s' % comm_str))
return comm_str | Define the run command used to run the computation.
Return:
:py:class:`string` : command that runs the computation | mppi/Calculators/QeCalculator.py | run_command | marcodalessandro76/MPPI | 1 | python | def run_command(self):
'\n Define the run command used to run the computation.\n\n Return:\n :py:class:`string` : command that runs the computation\n\n '
executable = self.run_options.get('executable')
mpi = self.run_options.get('mpi')
mpi_run = self.run_options.get('mpi_run')
run_dir = self.run_options.get('run_dir', '.')
name = self.run_options.get('name', 'default')
verbose = self.run_options.get('verbose')
command = ((((mpi_run + ' ') + str(mpi)) + ' ') + executable)
input_name = (name + '.in')
output_name = (name + '.log')
comm_str = (command + (' -inp %s > %s' % (input_name, output_name)))
if verbose:
print(('run command: %s' % comm_str))
return comm_str | def run_command(self):
'\n Define the run command used to run the computation.\n\n Return:\n :py:class:`string` : command that runs the computation\n\n '
executable = self.run_options.get('executable')
mpi = self.run_options.get('mpi')
mpi_run = self.run_options.get('mpi_run')
run_dir = self.run_options.get('run_dir', '.')
name = self.run_options.get('name', 'default')
verbose = self.run_options.get('verbose')
command = ((((mpi_run + ' ') + str(mpi)) + ' ') + executable)
input_name = (name + '.in')
output_name = (name + '.log')
comm_str = (command + (' -inp %s > %s' % (input_name, output_name)))
if verbose:
print(('run command: %s' % comm_str))
return comm_str<|docstring|>Define the run command used to run the computation.
Return:
:py:class:`string` : command that runs the computation<|endoftext|> |
09a849d9d3fe58c7a5a53a6a447a58363d4e837e95e703f1185330ac5cf2f9bb | def run_ended(self, job):
'\n Check the status of the running job.\n\n Args:\n job : reference to the actual job. job is an istance of Popen for `direct` scheduler\n or a string for `slurm` scheduler\n\n Return:\n :py:class:`bool`: return True if the computation is ended and False if it is running\n\n '
scheduler = self.run_options.get('scheduler')
run_dir = self.run_options.get('run_dir', '.')
if (scheduler == 'direct'):
if (job.poll() is not None):
is_ended = True
else:
is_ended = False
if (scheduler == 'slurm'):
job_out = os.path.join(run_dir, (job + '.out'))
if (not os.path.isfile(job_out)):
is_ended = False
else:
with open(job_out, 'r') as f:
last_line = f.read().splitlines()[(- 1)]
if (last_line == 'JOB_DONE'):
is_ended = True
else:
is_ended = False
return is_ended | Check the status of the running job.
Args:
job : reference to the actual job. job is an istance of Popen for `direct` scheduler
or a string for `slurm` scheduler
Return:
:py:class:`bool`: return True if the computation is ended and False if it is running | mppi/Calculators/QeCalculator.py | run_ended | marcodalessandro76/MPPI | 1 | python | def run_ended(self, job):
'\n Check the status of the running job.\n\n Args:\n job : reference to the actual job. job is an istance of Popen for `direct` scheduler\n or a string for `slurm` scheduler\n\n Return:\n :py:class:`bool`: return True if the computation is ended and False if it is running\n\n '
scheduler = self.run_options.get('scheduler')
run_dir = self.run_options.get('run_dir', '.')
if (scheduler == 'direct'):
if (job.poll() is not None):
is_ended = True
else:
is_ended = False
if (scheduler == 'slurm'):
job_out = os.path.join(run_dir, (job + '.out'))
if (not os.path.isfile(job_out)):
is_ended = False
else:
with open(job_out, 'r') as f:
last_line = f.read().splitlines()[(- 1)]
if (last_line == 'JOB_DONE'):
is_ended = True
else:
is_ended = False
return is_ended | def run_ended(self, job):
'\n Check the status of the running job.\n\n Args:\n job : reference to the actual job. job is an istance of Popen for `direct` scheduler\n or a string for `slurm` scheduler\n\n Return:\n :py:class:`bool`: return True if the computation is ended and False if it is running\n\n '
scheduler = self.run_options.get('scheduler')
run_dir = self.run_options.get('run_dir', '.')
if (scheduler == 'direct'):
if (job.poll() is not None):
is_ended = True
else:
is_ended = False
if (scheduler == 'slurm'):
job_out = os.path.join(run_dir, (job + '.out'))
if (not os.path.isfile(job_out)):
is_ended = False
else:
with open(job_out, 'r') as f:
last_line = f.read().splitlines()[(- 1)]
if (last_line == 'JOB_DONE'):
is_ended = True
else:
is_ended = False
return is_ended<|docstring|>Check the status of the running job.
Args:
job : reference to the actual job. job is an istance of Popen for `direct` scheduler
or a string for `slurm` scheduler
Return:
:py:class:`bool`: return True if the computation is ended and False if it is running<|endoftext|> |
24f0acb7e754888ac84c0dbb1fce6977a6bd0b3fb2ce1876e90efc8950a5b608 | def clean_run(self):
'\n Clean the run before performing the computation. Delete the $name.log and\n the job_$name.out file, located in the `run_dir`, and the $prefix.xml file\n and the $prefix.save folder located in the `out_dir`. Finally, if the\n `out_dir` is empty it is deleted.\n\n '
run_dir = self.run_options.get('run_dir', '.')
name = self.run_options.get('name', 'default')
input = self.run_options.get('input')
verbose = self.run_options.get('verbose')
prefix = input.get_prefix()
out_dir = self._get_outdir_path()
logfile = (os.path.join(run_dir, name) + '.log')
job_out = os.path.join(run_dir, (('job_' + name) + '.out'))
xmlfile = (os.path.join(out_dir, prefix) + '.xml')
save_dir = (os.path.join(out_dir, prefix) + '.save')
if os.path.isfile(logfile):
if verbose:
print('delete log file:', logfile)
os.system(('rm %s' % logfile))
if os.path.isfile(job_out):
if verbose:
print('delete job_out script:', job_out)
os.system(('rm %s' % job_out))
if os.path.isfile(xmlfile):
if verbose:
print('delete xml file:', xmlfile)
os.system(('rm %s' % xmlfile))
if os.path.isdir(save_dir):
if verbose:
print('delete folder:', save_dir)
os.system(('rm -r %s' % save_dir))
if (os.path.isdir(out_dir) and (not os.listdir(out_dir))):
if verbose:
print('delete the out_dir:', out_dir)
os.system(('rm -r %s' % out_dir)) | Clean the run before performing the computation. Delete the $name.log and
the job_$name.out file, located in the `run_dir`, and the $prefix.xml file
and the $prefix.save folder located in the `out_dir`. Finally, if the
`out_dir` is empty it is deleted. | mppi/Calculators/QeCalculator.py | clean_run | marcodalessandro76/MPPI | 1 | python | def clean_run(self):
'\n Clean the run before performing the computation. Delete the $name.log and\n the job_$name.out file, located in the `run_dir`, and the $prefix.xml file\n and the $prefix.save folder located in the `out_dir`. Finally, if the\n `out_dir` is empty it is deleted.\n\n '
run_dir = self.run_options.get('run_dir', '.')
name = self.run_options.get('name', 'default')
input = self.run_options.get('input')
verbose = self.run_options.get('verbose')
prefix = input.get_prefix()
out_dir = self._get_outdir_path()
logfile = (os.path.join(run_dir, name) + '.log')
job_out = os.path.join(run_dir, (('job_' + name) + '.out'))
xmlfile = (os.path.join(out_dir, prefix) + '.xml')
save_dir = (os.path.join(out_dir, prefix) + '.save')
if os.path.isfile(logfile):
if verbose:
print('delete log file:', logfile)
os.system(('rm %s' % logfile))
if os.path.isfile(job_out):
if verbose:
print('delete job_out script:', job_out)
os.system(('rm %s' % job_out))
if os.path.isfile(xmlfile):
if verbose:
print('delete xml file:', xmlfile)
os.system(('rm %s' % xmlfile))
if os.path.isdir(save_dir):
if verbose:
print('delete folder:', save_dir)
os.system(('rm -r %s' % save_dir))
if (os.path.isdir(out_dir) and (not os.listdir(out_dir))):
if verbose:
print('delete the out_dir:', out_dir)
os.system(('rm -r %s' % out_dir)) | def clean_run(self):
'\n Clean the run before performing the computation. Delete the $name.log and\n the job_$name.out file, located in the `run_dir`, and the $prefix.xml file\n and the $prefix.save folder located in the `out_dir`. Finally, if the\n `out_dir` is empty it is deleted.\n\n '
run_dir = self.run_options.get('run_dir', '.')
name = self.run_options.get('name', 'default')
input = self.run_options.get('input')
verbose = self.run_options.get('verbose')
prefix = input.get_prefix()
out_dir = self._get_outdir_path()
logfile = (os.path.join(run_dir, name) + '.log')
job_out = os.path.join(run_dir, (('job_' + name) + '.out'))
xmlfile = (os.path.join(out_dir, prefix) + '.xml')
save_dir = (os.path.join(out_dir, prefix) + '.save')
if os.path.isfile(logfile):
if verbose:
print('delete log file:', logfile)
os.system(('rm %s' % logfile))
if os.path.isfile(job_out):
if verbose:
print('delete job_out script:', job_out)
os.system(('rm %s' % job_out))
if os.path.isfile(xmlfile):
if verbose:
print('delete xml file:', xmlfile)
os.system(('rm %s' % xmlfile))
if os.path.isdir(save_dir):
if verbose:
print('delete folder:', save_dir)
os.system(('rm -r %s' % save_dir))
if (os.path.isdir(out_dir) and (not os.listdir(out_dir))):
if verbose:
print('delete the out_dir:', out_dir)
os.system(('rm -r %s' % out_dir))<|docstring|>Clean the run before performing the computation. Delete the $name.log and
the job_$name.out file, located in the `run_dir`, and the $prefix.xml file
and the $prefix.save folder located in the `out_dir`. Finally, if the
`out_dir` is empty it is deleted.<|endoftext|> |
66bb41622780f11d9648e57a668d3aab7dfcc26c3b3fffbdbace9f6777bc058f | def _ensure_run_directory(self):
'\n Create the run_dir, if it does not exists\n\n '
run_dir = self.run_options.get('run_dir', '.')
verbose = self.run_options.get('verbose')
if (not os.path.exists(run_dir)):
os.makedirs(run_dir)
if verbose:
print(("create the run_dir folder : '%s'" % run_dir)) | Create the run_dir, if it does not exists | mppi/Calculators/QeCalculator.py | _ensure_run_directory | marcodalessandro76/MPPI | 1 | python | def _ensure_run_directory(self):
'\n \n\n '
run_dir = self.run_options.get('run_dir', '.')
verbose = self.run_options.get('verbose')
if (not os.path.exists(run_dir)):
os.makedirs(run_dir)
if verbose:
print(("create the run_dir folder : '%s'" % run_dir)) | def _ensure_run_directory(self):
'\n \n\n '
run_dir = self.run_options.get('run_dir', '.')
verbose = self.run_options.get('verbose')
if (not os.path.exists(run_dir)):
os.makedirs(run_dir)
if verbose:
print(("create the run_dir folder : '%s'" % run_dir))<|docstring|>Create the run_dir, if it does not exists<|endoftext|> |
a3f82c9434a510dff709de819ee92a7d1fc8fe6471cff173dc7102236c11bc6c | def copy_source_dir(self):
'\n Copy the source_dir (if provided) in the out_dir and atttibute to the copied folder\n the name $prefix.save.\n\n Args:\n source_dir: the name of the source_dir (tipically it is the .save folder\n of the scf calculation that contains the wave-functions of the ground state).\n\n '
from shutil import copytree
source_dir = self.run_options.get('source_dir', None)
input = self.run_options.get('input')
prefix = input.get_prefix()
out_dir = self._get_outdir_path()
verbose = self.run_options.get('verbose')
if (source_dir is not None):
dest_dir = (os.path.join(out_dir, prefix) + '.save')
if (not os.path.isdir(dest_dir)):
if verbose:
print(('copy source_dir %s in the %s' % (source_dir, dest_dir)))
copytree(source_dir, dest_dir)
elif verbose:
print(('The folder %s already exists. Source_dir % s not copied' % (dest_dir, source_dir))) | Copy the source_dir (if provided) in the out_dir and atttibute to the copied folder
the name $prefix.save.
Args:
source_dir: the name of the source_dir (tipically it is the .save folder
of the scf calculation that contains the wave-functions of the ground state). | mppi/Calculators/QeCalculator.py | copy_source_dir | marcodalessandro76/MPPI | 1 | python | def copy_source_dir(self):
'\n Copy the source_dir (if provided) in the out_dir and atttibute to the copied folder\n the name $prefix.save.\n\n Args:\n source_dir: the name of the source_dir (tipically it is the .save folder\n of the scf calculation that contains the wave-functions of the ground state).\n\n '
from shutil import copytree
source_dir = self.run_options.get('source_dir', None)
input = self.run_options.get('input')
prefix = input.get_prefix()
out_dir = self._get_outdir_path()
verbose = self.run_options.get('verbose')
if (source_dir is not None):
dest_dir = (os.path.join(out_dir, prefix) + '.save')
if (not os.path.isdir(dest_dir)):
if verbose:
print(('copy source_dir %s in the %s' % (source_dir, dest_dir)))
copytree(source_dir, dest_dir)
elif verbose:
print(('The folder %s already exists. Source_dir % s not copied' % (dest_dir, source_dir))) | def copy_source_dir(self):
'\n Copy the source_dir (if provided) in the out_dir and atttibute to the copied folder\n the name $prefix.save.\n\n Args:\n source_dir: the name of the source_dir (tipically it is the .save folder\n of the scf calculation that contains the wave-functions of the ground state).\n\n '
from shutil import copytree
source_dir = self.run_options.get('source_dir', None)
input = self.run_options.get('input')
prefix = input.get_prefix()
out_dir = self._get_outdir_path()
verbose = self.run_options.get('verbose')
if (source_dir is not None):
dest_dir = (os.path.join(out_dir, prefix) + '.save')
if (not os.path.isdir(dest_dir)):
if verbose:
print(('copy source_dir %s in the %s' % (source_dir, dest_dir)))
copytree(source_dir, dest_dir)
elif verbose:
print(('The folder %s already exists. Source_dir % s not copied' % (dest_dir, source_dir)))<|docstring|>Copy the source_dir (if provided) in the out_dir and atttibute to the copied folder
the name $prefix.save.
Args:
source_dir: the name of the source_dir (tipically it is the .save folder
of the scf calculation that contains the wave-functions of the ground state).<|endoftext|> |
bee6690e79aa4383dbbc710d3f22540b8522eb90f3e3b54b095689ddb7f9091b | def _get_outdir_path(self):
'\n Get the absolute out_dir path. The path is built using the ``outdir`` parameter\n of the input file. If ``outdir`` is provided as a relative address the path starts\n from the ``run_dir`` of the calculator.\n\n '
run_dir = self.run_options.get('run_dir', '.')
input = self.run_options.get('input')
out_dir = input.get_outdir()
if os.path.isabs(out_dir):
return out_dir
else:
return os.path.abspath(os.path.join(run_dir, out_dir)) | Get the absolute out_dir path. The path is built using the ``outdir`` parameter
of the input file. If ``outdir`` is provided as a relative address the path starts
from the ``run_dir`` of the calculator. | mppi/Calculators/QeCalculator.py | _get_outdir_path | marcodalessandro76/MPPI | 1 | python | def _get_outdir_path(self):
'\n Get the absolute out_dir path. The path is built using the ``outdir`` parameter\n of the input file. If ``outdir`` is provided as a relative address the path starts\n from the ``run_dir`` of the calculator.\n\n '
run_dir = self.run_options.get('run_dir', '.')
input = self.run_options.get('input')
out_dir = input.get_outdir()
if os.path.isabs(out_dir):
return out_dir
else:
return os.path.abspath(os.path.join(run_dir, out_dir)) | def _get_outdir_path(self):
'\n Get the absolute out_dir path. The path is built using the ``outdir`` parameter\n of the input file. If ``outdir`` is provided as a relative address the path starts\n from the ``run_dir`` of the calculator.\n\n '
run_dir = self.run_options.get('run_dir', '.')
input = self.run_options.get('input')
out_dir = input.get_outdir()
if os.path.isabs(out_dir):
return out_dir
else:
return os.path.abspath(os.path.join(run_dir, out_dir))<|docstring|>Get the absolute out_dir path. The path is built using the ``outdir`` parameter
of the input file. If ``outdir`` is provided as a relative address the path starts
from the ``run_dir`` of the calculator.<|endoftext|> |
07362466ba0333f06fd56fdc177e23c57dbd3f0554c927564d49eec13ac724ce | def addTrace(tracer, entity, signalName):
'\n Add a signal to a tracer\n '
signal = '{0}.{1}'.format(entity.name, signalName)
filename = '{0}-{1}'.format(entity.name, signalName)
tracer.add(signal, filename) | Add a signal to a tracer | python/dynamic_graph/sot/torque_control/talos/create_entities_utils_talos.py | addTrace | nim65s/talos-torque-control | 3 | python | def addTrace(tracer, entity, signalName):
'\n \n '
signal = '{0}.{1}'.format(entity.name, signalName)
filename = '{0}-{1}'.format(entity.name, signalName)
tracer.add(signal, filename) | def addTrace(tracer, entity, signalName):
'\n \n '
signal = '{0}.{1}'.format(entity.name, signalName)
filename = '{0}-{1}'.format(entity.name, signalName)
tracer.add(signal, filename)<|docstring|>Add a signal to a tracer<|endoftext|> |
60f81fa3ecda277a4cf7c81cfd2559ff151cf12fda40955968f525604fb32790 | def needs_loop(func):
"\n A safeguard decorator for methods that require a live event loop.\n Inner function is needed to capture the instance reference -\n when needs_loop() is executed, there is no instance yet (hence no 'self')\n "
@wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
if (not self.loop.is_running()):
raise Exception('Cannot submit task to a stopped loop.')
return func(*args, **kwargs)
return wrapper | A safeguard decorator for methods that require a live event loop.
Inner function is needed to capture the instance reference -
when needs_loop() is executed, there is no instance yet (hence no 'self') | sshexec.py | needs_loop | bshakur8/asyncssh-pool | 0 | python | def needs_loop(func):
"\n A safeguard decorator for methods that require a live event loop.\n Inner function is needed to capture the instance reference -\n when needs_loop() is executed, there is no instance yet (hence no 'self')\n "
@wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
if (not self.loop.is_running()):
raise Exception('Cannot submit task to a stopped loop.')
return func(*args, **kwargs)
return wrapper | def needs_loop(func):
"\n A safeguard decorator for methods that require a live event loop.\n Inner function is needed to capture the instance reference -\n when needs_loop() is executed, there is no instance yet (hence no 'self')\n "
@wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
if (not self.loop.is_running()):
raise Exception('Cannot submit task to a stopped loop.')
return func(*args, **kwargs)
return wrapper<|docstring|>A safeguard decorator for methods that require a live event loop.
Inner function is needed to capture the instance reference -
when needs_loop() is executed, there is no instance yet (hence no 'self')<|endoftext|> |
8abb1ef82cbdf185b69f6742cdab381e2ece0a9e3dde1a73ee7b7fdb28140261 | def log_debug(*args, **kwargs):
'\n By default - outputs nothing. Uncomment one of the lines below as needed.\n '
pass | By default - outputs nothing. Uncomment one of the lines below as needed. | sshexec.py | log_debug | bshakur8/asyncssh-pool | 0 | python | def log_debug(*args, **kwargs):
'\n \n '
pass | def log_debug(*args, **kwargs):
'\n \n '
pass<|docstring|>By default - outputs nothing. Uncomment one of the lines below as needed.<|endoftext|> |
e3da9cd2cd8cbbf96fc9014db96b67d84a95cd8e3ca552c9f08d4f25495e2c65 | def __init__(self, hostname=None, username=None, password=None, port=22):
'\n :param hostname: str, IP to server to run the command on\n :param username: str, username\n :param password: str, password\n :param port: int, SSH Port (Default 22)\n '
self.hostname = hostname
self.username = username
self.password = password
self.port = port | :param hostname: str, IP to server to run the command on
:param username: str, username
:param password: str, password
:param port: int, SSH Port (Default 22) | sshexec.py | __init__ | bshakur8/asyncssh-pool | 0 | python | def __init__(self, hostname=None, username=None, password=None, port=22):
'\n :param hostname: str, IP to server to run the command on\n :param username: str, username\n :param password: str, password\n :param port: int, SSH Port (Default 22)\n '
self.hostname = hostname
self.username = username
self.password = password
self.port = port | def __init__(self, hostname=None, username=None, password=None, port=22):
'\n :param hostname: str, IP to server to run the command on\n :param username: str, username\n :param password: str, password\n :param port: int, SSH Port (Default 22)\n '
self.hostname = hostname
self.username = username
self.password = password
self.port = port<|docstring|>:param hostname: str, IP to server to run the command on
:param username: str, username
:param password: str, password
:param port: int, SSH Port (Default 22)<|endoftext|> |
86baa899f9f4d53c9b9e7f8728f3555c02f3302b4c4fcc4518aa5d2a28d69191 | def __init__(self, cmd_string=None, timeout=None, response=None, has_banner=False, **kwargs):
'\n :param cmd_string: str, Command to run\n :param timeout: int, Timeout for the command\n :param response: str, Response to answer in case on interactive command\n :param has_banner: bool, True iff command has banner before getting the result\n :param kwargs: kwargs\n '
super().__init__(**kwargs)
self.has_banner = has_banner
self.cmd_string = cmd_string
self.timeout = timeout
self.response = response | :param cmd_string: str, Command to run
:param timeout: int, Timeout for the command
:param response: str, Response to answer in case on interactive command
:param has_banner: bool, True iff command has banner before getting the result
:param kwargs: kwargs | sshexec.py | __init__ | bshakur8/asyncssh-pool | 0 | python | def __init__(self, cmd_string=None, timeout=None, response=None, has_banner=False, **kwargs):
'\n :param cmd_string: str, Command to run\n :param timeout: int, Timeout for the command\n :param response: str, Response to answer in case on interactive command\n :param has_banner: bool, True iff command has banner before getting the result\n :param kwargs: kwargs\n '
super().__init__(**kwargs)
self.has_banner = has_banner
self.cmd_string = cmd_string
self.timeout = timeout
self.response = response | def __init__(self, cmd_string=None, timeout=None, response=None, has_banner=False, **kwargs):
'\n :param cmd_string: str, Command to run\n :param timeout: int, Timeout for the command\n :param response: str, Response to answer in case on interactive command\n :param has_banner: bool, True iff command has banner before getting the result\n :param kwargs: kwargs\n '
super().__init__(**kwargs)
self.has_banner = has_banner
self.cmd_string = cmd_string
self.timeout = timeout
self.response = response<|docstring|>:param cmd_string: str, Command to run
:param timeout: int, Timeout for the command
:param response: str, Response to answer in case on interactive command
:param has_banner: bool, True iff command has banner before getting the result
:param kwargs: kwargs<|endoftext|> |
b4230fda59c29e5bfc2070e0aecf8a5ed8f6f8550850d578d3bfdee29ba0dbb6 | def __init__(self, *, stdout=None, stderr=None, rc=None, cmd_string=None, timed_out=False):
'\n :param stdout: str, output stream\n :param stderr: str, stderr\n :param rc: int, RC\n :param cmd_string: str, Sent command\n :param timed_out: bool, True iff command was timed out\n '
self.stdout = stdout
self.stderr = stderr
self.rc = rc
self.cmd = cmd_string
self.timed_out = timed_out | :param stdout: str, output stream
:param stderr: str, stderr
:param rc: int, RC
:param cmd_string: str, Sent command
:param timed_out: bool, True iff command was timed out | sshexec.py | __init__ | bshakur8/asyncssh-pool | 0 | python | def __init__(self, *, stdout=None, stderr=None, rc=None, cmd_string=None, timed_out=False):
'\n :param stdout: str, output stream\n :param stderr: str, stderr\n :param rc: int, RC\n :param cmd_string: str, Sent command\n :param timed_out: bool, True iff command was timed out\n '
self.stdout = stdout
self.stderr = stderr
self.rc = rc
self.cmd = cmd_string
self.timed_out = timed_out | def __init__(self, *, stdout=None, stderr=None, rc=None, cmd_string=None, timed_out=False):
'\n :param stdout: str, output stream\n :param stderr: str, stderr\n :param rc: int, RC\n :param cmd_string: str, Sent command\n :param timed_out: bool, True iff command was timed out\n '
self.stdout = stdout
self.stderr = stderr
self.rc = rc
self.cmd = cmd_string
self.timed_out = timed_out<|docstring|>:param stdout: str, output stream
:param stderr: str, stderr
:param rc: int, RC
:param cmd_string: str, Sent command
:param timed_out: bool, True iff command was timed out<|endoftext|> |
27c585ff8c13c291277949ec85198645c77470dc6076a6562ce76b7642ba4cf7 | def __init__(self, connection, client, semaphore):
'\n :param connection: Connection object\n :param client: Client object\n :param semaphore: Semaphore\n '
self.connection = connection
self.client = client
self.semaphore = semaphore | :param connection: Connection object
:param client: Client object
:param semaphore: Semaphore | sshexec.py | __init__ | bshakur8/asyncssh-pool | 0 | python | def __init__(self, connection, client, semaphore):
'\n :param connection: Connection object\n :param client: Client object\n :param semaphore: Semaphore\n '
self.connection = connection
self.client = client
self.semaphore = semaphore | def __init__(self, connection, client, semaphore):
'\n :param connection: Connection object\n :param client: Client object\n :param semaphore: Semaphore\n '
self.connection = connection
self.client = client
self.semaphore = semaphore<|docstring|>:param connection: Connection object
:param client: Client object
:param semaphore: Semaphore<|endoftext|> |
9ab2bf656df21f69a9d40b1fb0b6d968fc4e069fdb91899bf09d3d88f9ef82d7 | def connection_made(self, connection: asyncssh.SSHClientConnection):
'\n Function that runs after a connection was made\n\n :param connection: Connection was made\n :type connection: asyncssh.SSHClientConnection\n :return: None\n '
self.host = connection._host
log_debug('Made TCP connection to: {}'.format(self.host)) | Function that runs after a connection was made
:param connection: Connection was made
:type connection: asyncssh.SSHClientConnection
:return: None | sshexec.py | connection_made | bshakur8/asyncssh-pool | 0 | python | def connection_made(self, connection: asyncssh.SSHClientConnection):
'\n Function that runs after a connection was made\n\n :param connection: Connection was made\n :type connection: asyncssh.SSHClientConnection\n :return: None\n '
self.host = connection._host
log_debug('Made TCP connection to: {}'.format(self.host)) | def connection_made(self, connection: asyncssh.SSHClientConnection):
'\n Function that runs after a connection was made\n\n :param connection: Connection was made\n :type connection: asyncssh.SSHClientConnection\n :return: None\n '
self.host = connection._host
log_debug('Made TCP connection to: {}'.format(self.host))<|docstring|>Function that runs after a connection was made
:param connection: Connection was made
:type connection: asyncssh.SSHClientConnection
:return: None<|endoftext|> |
60930508bbdc5e0d3ca6bdeda03f150ac9f0a37ee9fff997c215dd4cff5b5cb2 | def connection_lost(self, exc: Exception):
'\n Function that runs after a connection was lost\n\n :param exc: Exception thrown after lost connection\n :type exc: Exception\n :return: Noneloglog\n '
log_debug('Lost connection to: {}, reason: {}'.format(self.host, exc))
self.connected = False | Function that runs after a connection was lost
:param exc: Exception thrown after lost connection
:type exc: Exception
:return: Noneloglog | sshexec.py | connection_lost | bshakur8/asyncssh-pool | 0 | python | def connection_lost(self, exc: Exception):
'\n Function that runs after a connection was lost\n\n :param exc: Exception thrown after lost connection\n :type exc: Exception\n :return: Noneloglog\n '
log_debug('Lost connection to: {}, reason: {}'.format(self.host, exc))
self.connected = False | def connection_lost(self, exc: Exception):
'\n Function that runs after a connection was lost\n\n :param exc: Exception thrown after lost connection\n :type exc: Exception\n :return: Noneloglog\n '
log_debug('Lost connection to: {}, reason: {}'.format(self.host, exc))
self.connected = False<|docstring|>Function that runs after a connection was lost
:param exc: Exception thrown after lost connection
:type exc: Exception
:return: Noneloglog<|endoftext|> |
ead5b4bb1fbda26bb2764ecd966d8e506cc8deb59eabaa0f349416ad4b1004dd | def auth_completed(self):
'\n Function that after authentication was completed\n\n :return: None\n '
self.connected = True
log_debug('Connected to : {}'.format(self.host)) | Function that after authentication was completed
:return: None | sshexec.py | auth_completed | bshakur8/asyncssh-pool | 0 | python | def auth_completed(self):
'\n Function that after authentication was completed\n\n :return: None\n '
self.connected = True
log_debug('Connected to : {}'.format(self.host)) | def auth_completed(self):
'\n Function that after authentication was completed\n\n :return: None\n '
self.connected = True
log_debug('Connected to : {}'.format(self.host))<|docstring|>Function that after authentication was completed
:return: None<|endoftext|> |
188eba935abf66a47d0cadf5878cf9e08e72d72ba3895b5690fbdfa9c5829f6d | def run(self):
'\n These actions take place on the event loop thread\n not on the main (calling) thread\n '
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
asyncio.BaseEventLoop.set_debug(self.loop, enabled=self.debug_flag)
self.coro_conn_locks = defaultdict(partial(asyncio.Lock, loop=self.loop))
self.loop.call_soon(self.is_running.set)
self.loop.run_forever() | These actions take place on the event loop thread
not on the main (calling) thread | sshexec.py | run | bshakur8/asyncssh-pool | 0 | python | def run(self):
'\n These actions take place on the event loop thread\n not on the main (calling) thread\n '
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
asyncio.BaseEventLoop.set_debug(self.loop, enabled=self.debug_flag)
self.coro_conn_locks = defaultdict(partial(asyncio.Lock, loop=self.loop))
self.loop.call_soon(self.is_running.set)
self.loop.run_forever() | def run(self):
'\n These actions take place on the event loop thread\n not on the main (calling) thread\n '
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
asyncio.BaseEventLoop.set_debug(self.loop, enabled=self.debug_flag)
self.coro_conn_locks = defaultdict(partial(asyncio.Lock, loop=self.loop))
self.loop.call_soon(self.is_running.set)
self.loop.run_forever()<|docstring|>These actions take place on the event loop thread
not on the main (calling) thread<|endoftext|> |
e335a3d6ba60830e00ef8d787473a4cdebdff8236754c65764d137fc755814af | def stop(self):
' Stop SSHExec '
log_debug('Stopping {}'.format(self.name))
self.is_running.clear()
self.loop.call_soon_threadsafe(self.loop.stop) | Stop SSHExec | sshexec.py | stop | bshakur8/asyncssh-pool | 0 | python | def stop(self):
' '
log_debug('Stopping {}'.format(self.name))
self.is_running.clear()
self.loop.call_soon_threadsafe(self.loop.stop) | def stop(self):
' '
log_debug('Stopping {}'.format(self.name))
self.is_running.clear()
self.loop.call_soon_threadsafe(self.loop.stop)<|docstring|>Stop SSHExec<|endoftext|> |
b30c2e8f53adcda9bd8705d42b20364d5375840e6cf9850fc85c9d9c51241add | @needs_loop
def sftp(self, auth_info: AuthInfo):
'\n An sftp_proxy factory, each sftp_proxy instance has the connection\n credentials and the event loop thread (self above)\n baked into __getattr__ on instantiation.\n This allows the OSL layer to provide the credentials in\n a way that is transparent to the test writer who only needs to\n provide the arguments that are specific to the sftp method he wants\n to execute.\n Verification of required sftp parameters/correct sftp method name\n is performed inside __getattr__, before forwarding the actual\n execution to the event loop so that param/name related exceptions\n are raised in the calling thread and not in the event loop thread.\n '
class SFTPProxy(object):
@staticmethod
def __getattr__(sftp_method_name: str):
def sftp_proxy_cmd(**kwargs):
sftp_method_obj = getattr(asyncssh.SFTPClient, sftp_method_name)
param_val_pairs = {param_name: kwargs[param_name] for param_name in signature(sftp_method_obj).parameters if (param_name in kwargs)}
sftp_func = partial(sftp_method_obj, **param_val_pairs)
asftp_cmd = self.async_sftp_cmd(sftp_func, auth_info)
fut = asyncio.run_coroutine_threadsafe(asftp_cmd, loop=self.loop)
return fut.result(timeout=DEFAULT_SFTP_TIMEOUT)
return sftp_proxy_cmd
return SFTPProxy() | An sftp_proxy factory, each sftp_proxy instance has the connection
credentials and the event loop thread (self above)
baked into __getattr__ on instantiation.
This allows the OSL layer to provide the credentials in
a way that is transparent to the test writer who only needs to
provide the arguments that are specific to the sftp method he wants
to execute.
Verification of required sftp parameters/correct sftp method name
is performed inside __getattr__, before forwarding the actual
execution to the event loop so that param/name related exceptions
are raised in the calling thread and not in the event loop thread. | sshexec.py | sftp | bshakur8/asyncssh-pool | 0 | python | @needs_loop
def sftp(self, auth_info: AuthInfo):
'\n An sftp_proxy factory, each sftp_proxy instance has the connection\n credentials and the event loop thread (self above)\n baked into __getattr__ on instantiation.\n This allows the OSL layer to provide the credentials in\n a way that is transparent to the test writer who only needs to\n provide the arguments that are specific to the sftp method he wants\n to execute.\n Verification of required sftp parameters/correct sftp method name\n is performed inside __getattr__, before forwarding the actual\n execution to the event loop so that param/name related exceptions\n are raised in the calling thread and not in the event loop thread.\n '
class SFTPProxy(object):
@staticmethod
def __getattr__(sftp_method_name: str):
def sftp_proxy_cmd(**kwargs):
sftp_method_obj = getattr(asyncssh.SFTPClient, sftp_method_name)
param_val_pairs = {param_name: kwargs[param_name] for param_name in signature(sftp_method_obj).parameters if (param_name in kwargs)}
sftp_func = partial(sftp_method_obj, **param_val_pairs)
asftp_cmd = self.async_sftp_cmd(sftp_func, auth_info)
fut = asyncio.run_coroutine_threadsafe(asftp_cmd, loop=self.loop)
return fut.result(timeout=DEFAULT_SFTP_TIMEOUT)
return sftp_proxy_cmd
return SFTPProxy() | @needs_loop
def sftp(self, auth_info: AuthInfo):
'\n An sftp_proxy factory, each sftp_proxy instance has the connection\n credentials and the event loop thread (self above)\n baked into __getattr__ on instantiation.\n This allows the OSL layer to provide the credentials in\n a way that is transparent to the test writer who only needs to\n provide the arguments that are specific to the sftp method he wants\n to execute.\n Verification of required sftp parameters/correct sftp method name\n is performed inside __getattr__, before forwarding the actual\n execution to the event loop so that param/name related exceptions\n are raised in the calling thread and not in the event loop thread.\n '
class SFTPProxy(object):
@staticmethod
def __getattr__(sftp_method_name: str):
def sftp_proxy_cmd(**kwargs):
sftp_method_obj = getattr(asyncssh.SFTPClient, sftp_method_name)
param_val_pairs = {param_name: kwargs[param_name] for param_name in signature(sftp_method_obj).parameters if (param_name in kwargs)}
sftp_func = partial(sftp_method_obj, **param_val_pairs)
asftp_cmd = self.async_sftp_cmd(sftp_func, auth_info)
fut = asyncio.run_coroutine_threadsafe(asftp_cmd, loop=self.loop)
return fut.result(timeout=DEFAULT_SFTP_TIMEOUT)
return sftp_proxy_cmd
return SFTPProxy()<|docstring|>An sftp_proxy factory, each sftp_proxy instance has the connection
credentials and the event loop thread (self above)
baked into __getattr__ on instantiation.
This allows the OSL layer to provide the credentials in
a way that is transparent to the test writer who only needs to
provide the arguments that are specific to the sftp method he wants
to execute.
Verification of required sftp parameters/correct sftp method name
is performed inside __getattr__, before forwarding the actual
execution to the event loop so that param/name related exceptions
are raised in the calling thread and not in the event loop thread.<|endoftext|> |
51040d40efbe139f2e9011c6b0634b0f44ccab1bf415429668c20458bcd9e8db | def is_connected(self, auth_info: AuthInfo, timeout: int=5) -> bool:
'\n :param auth_info: Authentication information\n :type auth_info: AuthInfo\n :param timeout: Command timeout\n :type timeout: int\n :return: True iff connection is alive and server is connected\n :rtype bool\n '
async def heartbeat():
cmd = 'echo {}'.format(auth_info.hostname)
with (await self.get_connection(auth_info)) as conn_info:
return (await self.execute_ssh(conn_info.connection, cmd))
try:
'\n Get connection to hostname ( create if needed) and then attempt\n to run a dummy command. Dummy is needed because sometimes the SSH daemon will open\n a connection but till not have enough resources to to execute incoming commands.\n '
log_debug('heartbeat {}'.format(auth_info.hostname))
asyncio.run_coroutine_threadsafe(heartbeat(), loop=self.loop).result(timeout=timeout)
return True
except Exception:
return False | :param auth_info: Authentication information
:type auth_info: AuthInfo
:param timeout: Command timeout
:type timeout: int
:return: True iff connection is alive and server is connected
:rtype bool | sshexec.py | is_connected | bshakur8/asyncssh-pool | 0 | python | def is_connected(self, auth_info: AuthInfo, timeout: int=5) -> bool:
'\n :param auth_info: Authentication information\n :type auth_info: AuthInfo\n :param timeout: Command timeout\n :type timeout: int\n :return: True iff connection is alive and server is connected\n :rtype bool\n '
async def heartbeat():
cmd = 'echo {}'.format(auth_info.hostname)
with (await self.get_connection(auth_info)) as conn_info:
return (await self.execute_ssh(conn_info.connection, cmd))
try:
'\n Get connection to hostname ( create if needed) and then attempt\n to run a dummy command. Dummy is needed because sometimes the SSH daemon will open\n a connection but till not have enough resources to to execute incoming commands.\n '
log_debug('heartbeat {}'.format(auth_info.hostname))
asyncio.run_coroutine_threadsafe(heartbeat(), loop=self.loop).result(timeout=timeout)
return True
except Exception:
return False | def is_connected(self, auth_info: AuthInfo, timeout: int=5) -> bool:
'\n :param auth_info: Authentication information\n :type auth_info: AuthInfo\n :param timeout: Command timeout\n :type timeout: int\n :return: True iff connection is alive and server is connected\n :rtype bool\n '
async def heartbeat():
cmd = 'echo {}'.format(auth_info.hostname)
with (await self.get_connection(auth_info)) as conn_info:
return (await self.execute_ssh(conn_info.connection, cmd))
try:
'\n Get connection to hostname ( create if needed) and then attempt\n to run a dummy command. Dummy is needed because sometimes the SSH daemon will open\n a connection but till not have enough resources to to execute incoming commands.\n '
log_debug('heartbeat {}'.format(auth_info.hostname))
asyncio.run_coroutine_threadsafe(heartbeat(), loop=self.loop).result(timeout=timeout)
return True
except Exception:
return False<|docstring|>:param auth_info: Authentication information
:type auth_info: AuthInfo
:param timeout: Command timeout
:type timeout: int
:return: True iff connection is alive and server is connected
:rtype bool<|endoftext|> |
2d75de5ccab197fd2d009040515b3602b1f700e4e8d5bcc0d170f3225f7d9221 | async def get_connection(self, auth_info: AuthInfo) -> AsyncConnInfo:
'\n Get the connection of the given authentication info\n\n :param auth_info: AuthInfo, Authentication information object\n :return: AsyncConnInfo, Saved connection\n '
hostname = auth_info.hostname
log_debug('Requested connection to {}'.format(hostname))
async with self.coro_conn_locks[hostname]:
log_debug('\t\t {} Entered lock for {}'.format(threading.currentThread().name, hostname))
"\n A thread level lock is not needed since get_conn can only be called\n by the thread in which the event loop is running.\n A coroutine-level lock is needed because we await on create_connection\n If the lock was not here, then it would be possible for multiple coroutines to\n attempt to create a connection to the same hostname simultaneously.\n coro_conn_locks is a defaultdict but we don't need to worry about thread safety -\n only the thread in which the SSHExec loop is running can access it.\n "
if ((hostname not in self.conn_dict) or (not self.conn_dict[hostname].client.connected)):
create_conn_params = dict(host=hostname, username=auth_info.username, password=auth_info.password, port=auth_info.port, known_hosts=None)
(conn, conn_client) = (await asyncssh.create_connection(SSHClient, **create_conn_params))
access_semaphore = asyncio.Semaphore(value=self.connections_per_host, loop=self.loop)
self.conn_dict[hostname] = AsyncConnInfo(conn, conn_client, access_semaphore)
log_debug('\t Created connection to {}'.format(hostname))
log_debug('\t\t exited lock for {}'.format(hostname))
log_debug('Returned cached connection to {}'.format(hostname))
return self.conn_dict[hostname] | Get the connection of the given authentication info
:param auth_info: AuthInfo, Authentication information object
:return: AsyncConnInfo, Saved connection | sshexec.py | get_connection | bshakur8/asyncssh-pool | 0 | python | async def get_connection(self, auth_info: AuthInfo) -> AsyncConnInfo:
'\n Get the connection of the given authentication info\n\n :param auth_info: AuthInfo, Authentication information object\n :return: AsyncConnInfo, Saved connection\n '
hostname = auth_info.hostname
log_debug('Requested connection to {}'.format(hostname))
async with self.coro_conn_locks[hostname]:
log_debug('\t\t {} Entered lock for {}'.format(threading.currentThread().name, hostname))
"\n A thread level lock is not needed since get_conn can only be called\n by the thread in which the event loop is running.\n A coroutine-level lock is needed because we await on create_connection\n If the lock was not here, then it would be possible for multiple coroutines to\n attempt to create a connection to the same hostname simultaneously.\n coro_conn_locks is a defaultdict but we don't need to worry about thread safety -\n only the thread in which the SSHExec loop is running can access it.\n "
if ((hostname not in self.conn_dict) or (not self.conn_dict[hostname].client.connected)):
create_conn_params = dict(host=hostname, username=auth_info.username, password=auth_info.password, port=auth_info.port, known_hosts=None)
(conn, conn_client) = (await asyncssh.create_connection(SSHClient, **create_conn_params))
access_semaphore = asyncio.Semaphore(value=self.connections_per_host, loop=self.loop)
self.conn_dict[hostname] = AsyncConnInfo(conn, conn_client, access_semaphore)
log_debug('\t Created connection to {}'.format(hostname))
log_debug('\t\t exited lock for {}'.format(hostname))
log_debug('Returned cached connection to {}'.format(hostname))
return self.conn_dict[hostname] | async def get_connection(self, auth_info: AuthInfo) -> AsyncConnInfo:
'\n Get the connection of the given authentication info\n\n :param auth_info: AuthInfo, Authentication information object\n :return: AsyncConnInfo, Saved connection\n '
hostname = auth_info.hostname
log_debug('Requested connection to {}'.format(hostname))
async with self.coro_conn_locks[hostname]:
log_debug('\t\t {} Entered lock for {}'.format(threading.currentThread().name, hostname))
"\n A thread level lock is not needed since get_conn can only be called\n by the thread in which the event loop is running.\n A coroutine-level lock is needed because we await on create_connection\n If the lock was not here, then it would be possible for multiple coroutines to\n attempt to create a connection to the same hostname simultaneously.\n coro_conn_locks is a defaultdict but we don't need to worry about thread safety -\n only the thread in which the SSHExec loop is running can access it.\n "
if ((hostname not in self.conn_dict) or (not self.conn_dict[hostname].client.connected)):
create_conn_params = dict(host=hostname, username=auth_info.username, password=auth_info.password, port=auth_info.port, known_hosts=None)
(conn, conn_client) = (await asyncssh.create_connection(SSHClient, **create_conn_params))
access_semaphore = asyncio.Semaphore(value=self.connections_per_host, loop=self.loop)
self.conn_dict[hostname] = AsyncConnInfo(conn, conn_client, access_semaphore)
log_debug('\t Created connection to {}'.format(hostname))
log_debug('\t\t exited lock for {}'.format(hostname))
log_debug('Returned cached connection to {}'.format(hostname))
return self.conn_dict[hostname]<|docstring|>Get the connection of the given authentication info
:param auth_info: AuthInfo, Authentication information object
:return: AsyncConnInfo, Saved connection<|endoftext|> |
2720c9a98848e016a5a93a0c2f87dad48a511363964e456660fd9b0f5adc0c6a | async def async_send_cmd(self, cmd_info: CmdInfo) -> ResultInfo:
'\n Send the given command asynchronously\n\n :param cmd_info: Command Info object\n :type cmd_info: CmdInfo\n :return: Result inform ation\n :type: ResultInfo\n '
conn_info = (await self.get_connection(cmd_info))
async with conn_info.semaphore:
return (await self.execute_ssh(conn_info.connection, cmd_info.cmd_string, response=cmd_info.response)) | Send the given command asynchronously
:param cmd_info: Command Info object
:type cmd_info: CmdInfo
:return: Result inform ation
:type: ResultInfo | sshexec.py | async_send_cmd | bshakur8/asyncssh-pool | 0 | python | async def async_send_cmd(self, cmd_info: CmdInfo) -> ResultInfo:
'\n Send the given command asynchronously\n\n :param cmd_info: Command Info object\n :type cmd_info: CmdInfo\n :return: Result inform ation\n :type: ResultInfo\n '
conn_info = (await self.get_connection(cmd_info))
async with conn_info.semaphore:
return (await self.execute_ssh(conn_info.connection, cmd_info.cmd_string, response=cmd_info.response)) | async def async_send_cmd(self, cmd_info: CmdInfo) -> ResultInfo:
'\n Send the given command asynchronously\n\n :param cmd_info: Command Info object\n :type cmd_info: CmdInfo\n :return: Result inform ation\n :type: ResultInfo\n '
conn_info = (await self.get_connection(cmd_info))
async with conn_info.semaphore:
return (await self.execute_ssh(conn_info.connection, cmd_info.cmd_string, response=cmd_info.response))<|docstring|>Send the given command asynchronously
:param cmd_info: Command Info object
:type cmd_info: CmdInfo
:return: Result inform ation
:type: ResultInfo<|endoftext|> |
c190c8bc216aba2cc6728e088bd71c4e6b12d05bd002e324b5c93ec0f0a49944 | @needs_loop
def send_cmd(self, cmd: CmdInfo) -> ResultInfo:
'\n Function to call when sending a command\n\n :param cmd: str, Command to run\n :return: ResultInfo, Result information\n :raise OSError: Failure in sending the command\n '
log_debug('Executing {}'.format(cmd.cmd_string))
"\n run_coroutine_threadsafe returns a concurrent.futures.future (not an asyncio.future).\n This means that the calling thread will wait for the result, unlike asyncio.future\n which raises an exception if the result is not yet available.\n Note that async_send_cmd(cmd) does not execute anything yet - it's only\n a coro object and will only be executed when the loop schedules it.\n "
'\n Event loop batch mode is disabled for this version,\n threadpool is used instead.\n ----\n Place the future in the currently active parallel context,\n do not wait for it to finish\n if self.in_batch_mode:\n self.thread_local.batch_commands[-1].append(FutureInfo(cmd, fut))\n return fut\n else:\n ----\n '
fut = asyncio.run_coroutine_threadsafe(self.async_send_cmd(cmd), loop=self.loop)
try:
if (cmd.timeout is not None):
cmd.timeout = max(cmd.timeout, DEFAULT_SSH_TIMEOUT)
return fut.result(timeout=cmd.timeout)
except Exception as e:
log_debug('{} occured when executing future {}, cancelling it'.format(type(e), fut))
raise OSError(e) | Function to call when sending a command
:param cmd: str, Command to run
:return: ResultInfo, Result information
:raise OSError: Failure in sending the command | sshexec.py | send_cmd | bshakur8/asyncssh-pool | 0 | python | @needs_loop
def send_cmd(self, cmd: CmdInfo) -> ResultInfo:
'\n Function to call when sending a command\n\n :param cmd: str, Command to run\n :return: ResultInfo, Result information\n :raise OSError: Failure in sending the command\n '
log_debug('Executing {}'.format(cmd.cmd_string))
"\n run_coroutine_threadsafe returns a concurrent.futures.future (not an asyncio.future).\n This means that the calling thread will wait for the result, unlike asyncio.future\n which raises an exception if the result is not yet available.\n Note that async_send_cmd(cmd) does not execute anything yet - it's only\n a coro object and will only be executed when the loop schedules it.\n "
'\n Event loop batch mode is disabled for this version,\n threadpool is used instead.\n ----\n Place the future in the currently active parallel context,\n do not wait for it to finish\n if self.in_batch_mode:\n self.thread_local.batch_commands[-1].append(FutureInfo(cmd, fut))\n return fut\n else:\n ----\n '
fut = asyncio.run_coroutine_threadsafe(self.async_send_cmd(cmd), loop=self.loop)
try:
if (cmd.timeout is not None):
cmd.timeout = max(cmd.timeout, DEFAULT_SSH_TIMEOUT)
return fut.result(timeout=cmd.timeout)
except Exception as e:
log_debug('{} occured when executing future {}, cancelling it'.format(type(e), fut))
raise OSError(e) | @needs_loop
def send_cmd(self, cmd: CmdInfo) -> ResultInfo:
'\n Function to call when sending a command\n\n :param cmd: str, Command to run\n :return: ResultInfo, Result information\n :raise OSError: Failure in sending the command\n '
log_debug('Executing {}'.format(cmd.cmd_string))
"\n run_coroutine_threadsafe returns a concurrent.futures.future (not an asyncio.future).\n This means that the calling thread will wait for the result, unlike asyncio.future\n which raises an exception if the result is not yet available.\n Note that async_send_cmd(cmd) does not execute anything yet - it's only\n a coro object and will only be executed when the loop schedules it.\n "
'\n Event loop batch mode is disabled for this version,\n threadpool is used instead.\n ----\n Place the future in the currently active parallel context,\n do not wait for it to finish\n if self.in_batch_mode:\n self.thread_local.batch_commands[-1].append(FutureInfo(cmd, fut))\n return fut\n else:\n ----\n '
fut = asyncio.run_coroutine_threadsafe(self.async_send_cmd(cmd), loop=self.loop)
try:
if (cmd.timeout is not None):
cmd.timeout = max(cmd.timeout, DEFAULT_SSH_TIMEOUT)
return fut.result(timeout=cmd.timeout)
except Exception as e:
log_debug('{} occured when executing future {}, cancelling it'.format(type(e), fut))
raise OSError(e)<|docstring|>Function to call when sending a command
:param cmd: str, Command to run
:return: ResultInfo, Result information
:raise OSError: Failure in sending the command<|endoftext|> |
8c0e71a2eaf5488951e65a71a7d38b45e65a6e033eb4d397c021a1a7381861fb | async def execute_ssh(self, conn: asyncssh.SSHClientConnection, cmd_string: str, response: str=None) -> ResultInfo:
'\n The atomic function that runs the given command on the giving connection\n\n :param conn: Connection to run the command on\n :type conn: asyncssh.SSHClientConnection\n :param cmd_string: Command to run\n :type cmd_string: str\n :param response:\n :return:\n '
std_output = err_output = None
log_debug('Executing {}:{}'.format(conn._host, cmd_string))
try:
(stdin, stdout, stderr) = (await conn.open_session())
try:
(await asyncio.wait_for(stdout.read(), timeout=1, loop=self.loop).result())
except Exception:
response = ''
stdin.write((cmd_string + '\n'))
if (';' in response):
list_response = response.split(';')
for response in list_response:
if (not response):
continue
stdin.write((response + '\n'))
stdin.write_eof()
std_output = (await stdout.readline())
err_output = (await stderr.readline())
(await stdout.channel.wait_closed())
(await stdin.channel.wait_closed())
(await stderr.channel.wait_closed())
rc = stdout.channel.get_exit_status()
else:
if response:
stdin.write((response + '\n'))
stdin.write_eof()
std_output = (await stdout.read())
err_output = (await stderr.read())
(await stdout.channel.wait_closed())
(await stdin.channel.wait_closed())
(await stderr.channel.wait_closed())
rc = stdout.channel.get_exit_status()
except Exception as e:
log_debug(f'Error executing command: {cmd_string}, {type(e)}: {e}')
raise OSError(e)
return ResultInfo(stdout=std_output, stderr=err_output, rc=rc, cmd_string=cmd_string) | The atomic function that runs the given command on the giving connection
:param conn: Connection to run the command on
:type conn: asyncssh.SSHClientConnection
:param cmd_string: Command to run
:type cmd_string: str
:param response:
:return: | sshexec.py | execute_ssh | bshakur8/asyncssh-pool | 0 | python | async def execute_ssh(self, conn: asyncssh.SSHClientConnection, cmd_string: str, response: str=None) -> ResultInfo:
'\n The atomic function that runs the given command on the giving connection\n\n :param conn: Connection to run the command on\n :type conn: asyncssh.SSHClientConnection\n :param cmd_string: Command to run\n :type cmd_string: str\n :param response:\n :return:\n '
std_output = err_output = None
log_debug('Executing {}:{}'.format(conn._host, cmd_string))
try:
(stdin, stdout, stderr) = (await conn.open_session())
try:
(await asyncio.wait_for(stdout.read(), timeout=1, loop=self.loop).result())
except Exception:
response =
stdin.write((cmd_string + '\n'))
if (';' in response):
list_response = response.split(';')
for response in list_response:
if (not response):
continue
stdin.write((response + '\n'))
stdin.write_eof()
std_output = (await stdout.readline())
err_output = (await stderr.readline())
(await stdout.channel.wait_closed())
(await stdin.channel.wait_closed())
(await stderr.channel.wait_closed())
rc = stdout.channel.get_exit_status()
else:
if response:
stdin.write((response + '\n'))
stdin.write_eof()
std_output = (await stdout.read())
err_output = (await stderr.read())
(await stdout.channel.wait_closed())
(await stdin.channel.wait_closed())
(await stderr.channel.wait_closed())
rc = stdout.channel.get_exit_status()
except Exception as e:
log_debug(f'Error executing command: {cmd_string}, {type(e)}: {e}')
raise OSError(e)
return ResultInfo(stdout=std_output, stderr=err_output, rc=rc, cmd_string=cmd_string) | async def execute_ssh(self, conn: asyncssh.SSHClientConnection, cmd_string: str, response: str=None) -> ResultInfo:
'\n The atomic function that runs the given command on the giving connection\n\n :param conn: Connection to run the command on\n :type conn: asyncssh.SSHClientConnection\n :param cmd_string: Command to run\n :type cmd_string: str\n :param response:\n :return:\n '
std_output = err_output = None
log_debug('Executing {}:{}'.format(conn._host, cmd_string))
try:
(stdin, stdout, stderr) = (await conn.open_session())
try:
(await asyncio.wait_for(stdout.read(), timeout=1, loop=self.loop).result())
except Exception:
response =
stdin.write((cmd_string + '\n'))
if (';' in response):
list_response = response.split(';')
for response in list_response:
if (not response):
continue
stdin.write((response + '\n'))
stdin.write_eof()
std_output = (await stdout.readline())
err_output = (await stderr.readline())
(await stdout.channel.wait_closed())
(await stdin.channel.wait_closed())
(await stderr.channel.wait_closed())
rc = stdout.channel.get_exit_status()
else:
if response:
stdin.write((response + '\n'))
stdin.write_eof()
std_output = (await stdout.read())
err_output = (await stderr.read())
(await stdout.channel.wait_closed())
(await stdin.channel.wait_closed())
(await stderr.channel.wait_closed())
rc = stdout.channel.get_exit_status()
except Exception as e:
log_debug(f'Error executing command: {cmd_string}, {type(e)}: {e}')
raise OSError(e)
return ResultInfo(stdout=std_output, stderr=err_output, rc=rc, cmd_string=cmd_string)<|docstring|>The atomic function that runs the given command on the giving connection
:param conn: Connection to run the command on
:type conn: asyncssh.SSHClientConnection
:param cmd_string: Command to run
:type cmd_string: str
:param response:
:return:<|endoftext|> |
6864920066143d08c21eae9e2e2989630b6149969de709f36fcdd1e114984501 | def iter_manifests():
'Iterate over all available manifests.'
manifests = [json.loads(fil.read_text()) for fil in component_dir.glob('*/manifest.json')]
return sorted(manifests, key=(lambda man: man['domain'])) | Iterate over all available manifests. | script/hassfest/manifest_helper.py | iter_manifests | cristian-vescan/core | 30,023 | python | def iter_manifests():
manifests = [json.loads(fil.read_text()) for fil in component_dir.glob('*/manifest.json')]
return sorted(manifests, key=(lambda man: man['domain'])) | def iter_manifests():
manifests = [json.loads(fil.read_text()) for fil in component_dir.glob('*/manifest.json')]
return sorted(manifests, key=(lambda man: man['domain']))<|docstring|>Iterate over all available manifests.<|endoftext|> |
26e0bc7b7a89183966fe39babb4de8e725fb2cc667838f899dd2d33be4c357c0 | def __init__(self, accumulation_steps=1, accumulation_type='mean', learning_rate=0.01, momentum=0.0, nesterov=False, name='SGD', **kwargs):
'Construct a new SGD optimizer.\n Args:\n accumulation_steps: An integer. Update gradient in every accumulation steps.\n learning_rate: A Tensor or a floating point value. The learning rate.\n beta_1: A float value or a constant float tensor. The exponential decay\n rate for the 1st moment estimates.\n beta_2: A float value or a constant float tensor. The exponential decay\n rate for the 2nd moment estimates.\n epsilon: A small constant for numerical stability. This epsilon is\n "epsilon hat" in the Kingma and Ba paper (in the formula just before\n Section 2.1), not the epsilon in Algorithm 1 of the paper.\n amsgrad: boolean. Whether to apply AMSGrad variant of this algorithm from\n the paper "On the Convergence of Adam and beyond".\n name: Optional name for the operations created when applying gradients.\n Defaults to "Adam". @compatibility(eager) When eager execution is\n enabled, `learning_rate`, `beta_1`, `beta_2`, and `epsilon` can each be\n a callable that takes no arguments and returns the actual value to use.\n This can be useful for changing these values across different\n invocations of optimizer functions. @end_compatibility\n **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,\n `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip\n gradients by value, `decay` is included for backward compatibility to\n allow time inverse decay of learning rate. `lr` is included for backward\n compatibility, recommended to use `learning_rate` instead.\n '
super(SGDAccumulated, self).__init__(name, **kwargs)
self._set_hyper('accumulation_steps', tf.cast(accumulation_steps, tf.int32))
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._momentum = False
if (isinstance(momentum, tf.Tensor) or callable(momentum) or (momentum > 0)):
self._momentum = True
if (isinstance(momentum, (int, float)) and ((momentum < 0) or (momentum > 1))):
raise ValueError('`momentum` must be between [0, 1].')
self._set_hyper('momentum', momentum)
self.nesterov = nesterov
self._accumulation_type = accumulation_type | Construct a new SGD optimizer.
Args:
accumulation_steps: An integer. Update gradient in every accumulation steps.
learning_rate: A Tensor or a floating point value. The learning rate.
beta_1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor. The exponential decay
rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
amsgrad: boolean. Whether to apply AMSGrad variant of this algorithm from
the paper "On the Convergence of Adam and beyond".
name: Optional name for the operations created when applying gradients.
Defaults to "Adam". @compatibility(eager) When eager execution is
enabled, `learning_rate`, `beta_1`, `beta_2`, and `epsilon` can each be
a callable that takes no arguments and returns the actual value to use.
This can be useful for changing these values across different
invocations of optimizer functions. @end_compatibility
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead. | optimization/sgd_accum.py | __init__ | vishnubanna/TFAggregatedTraining | 0 | python | def __init__(self, accumulation_steps=1, accumulation_type='mean', learning_rate=0.01, momentum=0.0, nesterov=False, name='SGD', **kwargs):
'Construct a new SGD optimizer.\n Args:\n accumulation_steps: An integer. Update gradient in every accumulation steps.\n learning_rate: A Tensor or a floating point value. The learning rate.\n beta_1: A float value or a constant float tensor. The exponential decay\n rate for the 1st moment estimates.\n beta_2: A float value or a constant float tensor. The exponential decay\n rate for the 2nd moment estimates.\n epsilon: A small constant for numerical stability. This epsilon is\n "epsilon hat" in the Kingma and Ba paper (in the formula just before\n Section 2.1), not the epsilon in Algorithm 1 of the paper.\n amsgrad: boolean. Whether to apply AMSGrad variant of this algorithm from\n the paper "On the Convergence of Adam and beyond".\n name: Optional name for the operations created when applying gradients.\n Defaults to "Adam". @compatibility(eager) When eager execution is\n enabled, `learning_rate`, `beta_1`, `beta_2`, and `epsilon` can each be\n a callable that takes no arguments and returns the actual value to use.\n This can be useful for changing these values across different\n invocations of optimizer functions. @end_compatibility\n **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,\n `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip\n gradients by value, `decay` is included for backward compatibility to\n allow time inverse decay of learning rate. `lr` is included for backward\n compatibility, recommended to use `learning_rate` instead.\n '
super(SGDAccumulated, self).__init__(name, **kwargs)
self._set_hyper('accumulation_steps', tf.cast(accumulation_steps, tf.int32))
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._momentum = False
if (isinstance(momentum, tf.Tensor) or callable(momentum) or (momentum > 0)):
self._momentum = True
if (isinstance(momentum, (int, float)) and ((momentum < 0) or (momentum > 1))):
raise ValueError('`momentum` must be between [0, 1].')
self._set_hyper('momentum', momentum)
self.nesterov = nesterov
self._accumulation_type = accumulation_type | def __init__(self, accumulation_steps=1, accumulation_type='mean', learning_rate=0.01, momentum=0.0, nesterov=False, name='SGD', **kwargs):
'Construct a new SGD optimizer.\n Args:\n accumulation_steps: An integer. Update gradient in every accumulation steps.\n learning_rate: A Tensor or a floating point value. The learning rate.\n beta_1: A float value or a constant float tensor. The exponential decay\n rate for the 1st moment estimates.\n beta_2: A float value or a constant float tensor. The exponential decay\n rate for the 2nd moment estimates.\n epsilon: A small constant for numerical stability. This epsilon is\n "epsilon hat" in the Kingma and Ba paper (in the formula just before\n Section 2.1), not the epsilon in Algorithm 1 of the paper.\n amsgrad: boolean. Whether to apply AMSGrad variant of this algorithm from\n the paper "On the Convergence of Adam and beyond".\n name: Optional name for the operations created when applying gradients.\n Defaults to "Adam". @compatibility(eager) When eager execution is\n enabled, `learning_rate`, `beta_1`, `beta_2`, and `epsilon` can each be\n a callable that takes no arguments and returns the actual value to use.\n This can be useful for changing these values across different\n invocations of optimizer functions. @end_compatibility\n **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,\n `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip\n gradients by value, `decay` is included for backward compatibility to\n allow time inverse decay of learning rate. `lr` is included for backward\n compatibility, recommended to use `learning_rate` instead.\n '
super(SGDAccumulated, self).__init__(name, **kwargs)
self._set_hyper('accumulation_steps', tf.cast(accumulation_steps, tf.int32))
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._momentum = False
if (isinstance(momentum, tf.Tensor) or callable(momentum) or (momentum > 0)):
self._momentum = True
if (isinstance(momentum, (int, float)) and ((momentum < 0) or (momentum > 1))):
raise ValueError('`momentum` must be between [0, 1].')
self._set_hyper('momentum', momentum)
self.nesterov = nesterov
self._accumulation_type = accumulation_type<|docstring|>Construct a new SGD optimizer.
Args:
accumulation_steps: An integer. Update gradient in every accumulation steps.
learning_rate: A Tensor or a floating point value. The learning rate.
beta_1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor. The exponential decay
rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
amsgrad: boolean. Whether to apply AMSGrad variant of this algorithm from
the paper "On the Convergence of Adam and beyond".
name: Optional name for the operations created when applying gradients.
Defaults to "Adam". @compatibility(eager) When eager execution is
enabled, `learning_rate`, `beta_1`, `beta_2`, and `epsilon` can each be
a callable that takes no arguments and returns the actual value to use.
This can be useful for changing these values across different
invocations of optimizer functions. @end_compatibility
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.<|endoftext|> |
c1a4d137ec0c39462b0b272377bf15cdb32461eb38956eec20177abb27f59072 | @pytest.mark.parametrize('comparators', [('first_name', 'second_name'), ('first_name', 1), (1, 'first_name')])
def test_non_literal(assert_errors, parse_ast_tree, simple_conditions, comparators, default_options):
'Testing that comparisons work well.'
tree = parse_ast_tree(simple_conditions.format(*comparators))
visitor = ComparisonSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, []) | Testing that comparisons work well. | tests/test_visitors/test_ast/test_comparisons/test_literal.py | test_non_literal | phoolish-philomath/wemake-python-styleguide | 0 | python | @pytest.mark.parametrize('comparators', [('first_name', 'second_name'), ('first_name', 1), (1, 'first_name')])
def test_non_literal(assert_errors, parse_ast_tree, simple_conditions, comparators, default_options):
tree = parse_ast_tree(simple_conditions.format(*comparators))
visitor = ComparisonSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, []) | @pytest.mark.parametrize('comparators', [('first_name', 'second_name'), ('first_name', 1), (1, 'first_name')])
def test_non_literal(assert_errors, parse_ast_tree, simple_conditions, comparators, default_options):
tree = parse_ast_tree(simple_conditions.format(*comparators))
visitor = ComparisonSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])<|docstring|>Testing that comparisons work well.<|endoftext|> |
6bb1aa43588edfd364f42fb33784437efb248db782055cd4d6ca318a9dcfeaa9 | @pytest.mark.parametrize('comparators', [(1, 2), ('"string1"', '"string2"'), ('[1, 2, 3]', '(1, 2, 3)'), ('{"key": 1}', '{"a", "b"}')])
def test_literal(assert_errors, parse_ast_tree, simple_conditions, comparators, default_options):
'Testing that violations are when using literal comparisons.'
tree = parse_ast_tree(simple_conditions.format(*comparators))
visitor = ComparisonSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ConstantComparisonViolation]) | Testing that violations are when using literal comparisons. | tests/test_visitors/test_ast/test_comparisons/test_literal.py | test_literal | phoolish-philomath/wemake-python-styleguide | 0 | python | @pytest.mark.parametrize('comparators', [(1, 2), ('"string1"', '"string2"'), ('[1, 2, 3]', '(1, 2, 3)'), ('{"key": 1}', '{"a", "b"}')])
def test_literal(assert_errors, parse_ast_tree, simple_conditions, comparators, default_options):
tree = parse_ast_tree(simple_conditions.format(*comparators))
visitor = ComparisonSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ConstantComparisonViolation]) | @pytest.mark.parametrize('comparators', [(1, 2), ('"string1"', '"string2"'), ('[1, 2, 3]', '(1, 2, 3)'), ('{"key": 1}', '{"a", "b"}')])
def test_literal(assert_errors, parse_ast_tree, simple_conditions, comparators, default_options):
tree = parse_ast_tree(simple_conditions.format(*comparators))
visitor = ComparisonSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ConstantComparisonViolation])<|docstring|>Testing that violations are when using literal comparisons.<|endoftext|> |
55e187e79d341742ad5464d1331da1ce47545954f0ce2c28310dca7a3b4b41ac | @pytest.mark.parametrize('code', [if_with_chained_comparisons1, if_with_chained_comparisons3])
@pytest.mark.parametrize('comparators', [(1, 'first_name'), (1, 1)])
def test_literal_special1(assert_errors, parse_ast_tree, code, comparators, default_options):
'Testing that special cases do work and raise warnings.'
tree = parse_ast_tree(code.format(*comparators))
visitor = ComparisonSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ConstantComparisonViolation]) | Testing that special cases do work and raise warnings. | tests/test_visitors/test_ast/test_comparisons/test_literal.py | test_literal_special1 | phoolish-philomath/wemake-python-styleguide | 0 | python | @pytest.mark.parametrize('code', [if_with_chained_comparisons1, if_with_chained_comparisons3])
@pytest.mark.parametrize('comparators', [(1, 'first_name'), (1, 1)])
def test_literal_special1(assert_errors, parse_ast_tree, code, comparators, default_options):
tree = parse_ast_tree(code.format(*comparators))
visitor = ComparisonSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ConstantComparisonViolation]) | @pytest.mark.parametrize('code', [if_with_chained_comparisons1, if_with_chained_comparisons3])
@pytest.mark.parametrize('comparators', [(1, 'first_name'), (1, 1)])
def test_literal_special1(assert_errors, parse_ast_tree, code, comparators, default_options):
tree = parse_ast_tree(code.format(*comparators))
visitor = ComparisonSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ConstantComparisonViolation])<|docstring|>Testing that special cases do work and raise warnings.<|endoftext|> |
2cb6c951cc0c5729c321e48728e628d627c7bc1bc907adafab93061194ee6502 | @pytest.mark.parametrize('code', [if_with_chained_comparisons2, if_with_chained_comparisons3])
@pytest.mark.parametrize('comparators', [('first_name', 1), (1, 1)])
def test_literal_special2(assert_errors, parse_ast_tree, code, comparators, default_options):
'Testing that special cases do work and raise warnings.'
tree = parse_ast_tree(code.format(*comparators))
visitor = ComparisonSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ConstantComparisonViolation]) | Testing that special cases do work and raise warnings. | tests/test_visitors/test_ast/test_comparisons/test_literal.py | test_literal_special2 | phoolish-philomath/wemake-python-styleguide | 0 | python | @pytest.mark.parametrize('code', [if_with_chained_comparisons2, if_with_chained_comparisons3])
@pytest.mark.parametrize('comparators', [('first_name', 1), (1, 1)])
def test_literal_special2(assert_errors, parse_ast_tree, code, comparators, default_options):
tree = parse_ast_tree(code.format(*comparators))
visitor = ComparisonSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ConstantComparisonViolation]) | @pytest.mark.parametrize('code', [if_with_chained_comparisons2, if_with_chained_comparisons3])
@pytest.mark.parametrize('comparators', [('first_name', 1), (1, 1)])
def test_literal_special2(assert_errors, parse_ast_tree, code, comparators, default_options):
tree = parse_ast_tree(code.format(*comparators))
visitor = ComparisonSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ConstantComparisonViolation])<|docstring|>Testing that special cases do work and raise warnings.<|endoftext|> |
00b8d598d759e5af4c3f4e1126ac12fc647a32365306d38d0d55c964c84babed | @pytest.mark.parametrize('code', [if_with_chained_comparisons1, if_with_chained_comparisons2, if_with_chained_comparisons3])
def test_literal_special_without_errors(assert_errors, parse_ast_tree, code, default_options):
'Testing that special cases do work and do not raise warnings.'
tree = parse_ast_tree(code.format('first_name', 'second_name'))
visitor = ComparisonSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, []) | Testing that special cases do work and do not raise warnings. | tests/test_visitors/test_ast/test_comparisons/test_literal.py | test_literal_special_without_errors | phoolish-philomath/wemake-python-styleguide | 0 | python | @pytest.mark.parametrize('code', [if_with_chained_comparisons1, if_with_chained_comparisons2, if_with_chained_comparisons3])
def test_literal_special_without_errors(assert_errors, parse_ast_tree, code, default_options):
tree = parse_ast_tree(code.format('first_name', 'second_name'))
visitor = ComparisonSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, []) | @pytest.mark.parametrize('code', [if_with_chained_comparisons1, if_with_chained_comparisons2, if_with_chained_comparisons3])
def test_literal_special_without_errors(assert_errors, parse_ast_tree, code, default_options):
tree = parse_ast_tree(code.format('first_name', 'second_name'))
visitor = ComparisonSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])<|docstring|>Testing that special cases do work and do not raise warnings.<|endoftext|> |
f47f92ba4658762d4c8ce01de4eb71b8e6c856dc00c925083c23ac287c069b46 | def forward(self, input_ids=None, token_type_ids=None, attention_mask=None, labels=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, adapter_names=None, head=None, **kwargs):
'\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\n :obj:`input_ids` above)\n '
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
(batch_size, num_choices) = (input_ids.shape[:2] if (input_ids is not None) else inputs_embeds.shape[:2])
flat_input_ids = (input_ids.view((- 1), input_ids.size((- 1))) if (input_ids is not None) else None)
flat_position_ids = (position_ids.view((- 1), position_ids.size((- 1))) if (position_ids is not None) else None)
flat_token_type_ids = (token_type_ids.view((- 1), token_type_ids.size((- 1))) if (token_type_ids is not None) else None)
flat_attention_mask = (attention_mask.view((- 1), attention_mask.size((- 1))) if (attention_mask is not None) else None)
flat_inputs_embeds = (inputs_embeds.view((- 1), inputs_embeds.size((- 2)), inputs_embeds.size((- 1))) if (inputs_embeds is not None) else None)
past_key_values = self.get_prompt(batch_size=(batch_size * num_choices))
prefix_attention_mask = torch.ones((batch_size * num_choices), self.pre_seq_len).to(self.roberta.device)
flat_attention_mask = torch.cat((prefix_attention_mask, flat_attention_mask), dim=1)
outputs = self.roberta(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, adapter_names=adapter_names, past_key_values=past_key_values)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view((- 1), num_choices)
loss = None
if (labels is not None):
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if (not return_dict):
output = ((reshaped_logits,) + outputs[2:])
return (((loss,) + output) if (loss is not None) else output)
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) | labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above) | model/multiple_choice.py | forward | guanzhchen/PETuning | 10 | python | def forward(self, input_ids=None, token_type_ids=None, attention_mask=None, labels=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, adapter_names=None, head=None, **kwargs):
'\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\n :obj:`input_ids` above)\n '
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
(batch_size, num_choices) = (input_ids.shape[:2] if (input_ids is not None) else inputs_embeds.shape[:2])
flat_input_ids = (input_ids.view((- 1), input_ids.size((- 1))) if (input_ids is not None) else None)
flat_position_ids = (position_ids.view((- 1), position_ids.size((- 1))) if (position_ids is not None) else None)
flat_token_type_ids = (token_type_ids.view((- 1), token_type_ids.size((- 1))) if (token_type_ids is not None) else None)
flat_attention_mask = (attention_mask.view((- 1), attention_mask.size((- 1))) if (attention_mask is not None) else None)
flat_inputs_embeds = (inputs_embeds.view((- 1), inputs_embeds.size((- 2)), inputs_embeds.size((- 1))) if (inputs_embeds is not None) else None)
past_key_values = self.get_prompt(batch_size=(batch_size * num_choices))
prefix_attention_mask = torch.ones((batch_size * num_choices), self.pre_seq_len).to(self.roberta.device)
flat_attention_mask = torch.cat((prefix_attention_mask, flat_attention_mask), dim=1)
outputs = self.roberta(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, adapter_names=adapter_names, past_key_values=past_key_values)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view((- 1), num_choices)
loss = None
if (labels is not None):
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if (not return_dict):
output = ((reshaped_logits,) + outputs[2:])
return (((loss,) + output) if (loss is not None) else output)
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) | def forward(self, input_ids=None, token_type_ids=None, attention_mask=None, labels=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, adapter_names=None, head=None, **kwargs):
'\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\n :obj:`input_ids` above)\n '
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
(batch_size, num_choices) = (input_ids.shape[:2] if (input_ids is not None) else inputs_embeds.shape[:2])
flat_input_ids = (input_ids.view((- 1), input_ids.size((- 1))) if (input_ids is not None) else None)
flat_position_ids = (position_ids.view((- 1), position_ids.size((- 1))) if (position_ids is not None) else None)
flat_token_type_ids = (token_type_ids.view((- 1), token_type_ids.size((- 1))) if (token_type_ids is not None) else None)
flat_attention_mask = (attention_mask.view((- 1), attention_mask.size((- 1))) if (attention_mask is not None) else None)
flat_inputs_embeds = (inputs_embeds.view((- 1), inputs_embeds.size((- 2)), inputs_embeds.size((- 1))) if (inputs_embeds is not None) else None)
past_key_values = self.get_prompt(batch_size=(batch_size * num_choices))
prefix_attention_mask = torch.ones((batch_size * num_choices), self.pre_seq_len).to(self.roberta.device)
flat_attention_mask = torch.cat((prefix_attention_mask, flat_attention_mask), dim=1)
outputs = self.roberta(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, adapter_names=adapter_names, past_key_values=past_key_values)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view((- 1), num_choices)
loss = None
if (labels is not None):
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if (not return_dict):
output = ((reshaped_logits,) + outputs[2:])
return (((loss,) + output) if (loss is not None) else output)
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)<|docstring|>labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)<|endoftext|> |
5830274b49c986a3e69f5810bbc6f48936df488f7e8bc8e6a856080809d3f063 | def forward(self, input_ids=None, token_type_ids=None, attention_mask=None, labels=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, adapter_names=None, head=None, **kwargs):
'\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\n :obj:`input_ids` above)\n '
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
(batch_size, num_choices) = (input_ids.shape[:2] if (input_ids is not None) else inputs_embeds.shape[:2])
flat_input_ids = (input_ids.view((- 1), input_ids.size((- 1))) if (input_ids is not None) else None)
flat_position_ids = (position_ids.view((- 1), position_ids.size((- 1))) if (position_ids is not None) else None)
flat_token_type_ids = (token_type_ids.view((- 1), token_type_ids.size((- 1))) if (token_type_ids is not None) else None)
flat_attention_mask = (attention_mask.view((- 1), attention_mask.size((- 1))) if (attention_mask is not None) else None)
flat_inputs_embeds = (inputs_embeds.view((- 1), inputs_embeds.size((- 2)), inputs_embeds.size((- 1))) if (inputs_embeds is not None) else None)
outputs = self.roberta(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, adapter_names=adapter_names)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view((- 1), num_choices)
loss = None
if (labels is not None):
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if (not return_dict):
output = ((reshaped_logits,) + outputs[2:])
return (((loss,) + output) if (loss is not None) else output)
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) | labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above) | model/multiple_choice.py | forward | guanzhchen/PETuning | 10 | python | def forward(self, input_ids=None, token_type_ids=None, attention_mask=None, labels=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, adapter_names=None, head=None, **kwargs):
'\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\n :obj:`input_ids` above)\n '
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
(batch_size, num_choices) = (input_ids.shape[:2] if (input_ids is not None) else inputs_embeds.shape[:2])
flat_input_ids = (input_ids.view((- 1), input_ids.size((- 1))) if (input_ids is not None) else None)
flat_position_ids = (position_ids.view((- 1), position_ids.size((- 1))) if (position_ids is not None) else None)
flat_token_type_ids = (token_type_ids.view((- 1), token_type_ids.size((- 1))) if (token_type_ids is not None) else None)
flat_attention_mask = (attention_mask.view((- 1), attention_mask.size((- 1))) if (attention_mask is not None) else None)
flat_inputs_embeds = (inputs_embeds.view((- 1), inputs_embeds.size((- 2)), inputs_embeds.size((- 1))) if (inputs_embeds is not None) else None)
outputs = self.roberta(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, adapter_names=adapter_names)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view((- 1), num_choices)
loss = None
if (labels is not None):
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if (not return_dict):
output = ((reshaped_logits,) + outputs[2:])
return (((loss,) + output) if (loss is not None) else output)
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) | def forward(self, input_ids=None, token_type_ids=None, attention_mask=None, labels=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, adapter_names=None, head=None, **kwargs):
'\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\n :obj:`input_ids` above)\n '
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
(batch_size, num_choices) = (input_ids.shape[:2] if (input_ids is not None) else inputs_embeds.shape[:2])
flat_input_ids = (input_ids.view((- 1), input_ids.size((- 1))) if (input_ids is not None) else None)
flat_position_ids = (position_ids.view((- 1), position_ids.size((- 1))) if (position_ids is not None) else None)
flat_token_type_ids = (token_type_ids.view((- 1), token_type_ids.size((- 1))) if (token_type_ids is not None) else None)
flat_attention_mask = (attention_mask.view((- 1), attention_mask.size((- 1))) if (attention_mask is not None) else None)
flat_inputs_embeds = (inputs_embeds.view((- 1), inputs_embeds.size((- 2)), inputs_embeds.size((- 1))) if (inputs_embeds is not None) else None)
outputs = self.roberta(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, adapter_names=adapter_names)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view((- 1), num_choices)
loss = None
if (labels is not None):
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if (not return_dict):
output = ((reshaped_logits,) + outputs[2:])
return (((loss,) + output) if (loss is not None) else output)
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)<|docstring|>labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)<|endoftext|> |
3b8fc6846dc7a33ad042f465f616308ecfc4574f2d4c32c0de9d8cd943e9c101 | def test_latest_pages_build(self):
'Test the ability to retrieve the latest pages build for a repo.'
self.basic_login()
cassette_name = self.cassette_name('latest_pages_build')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert (repository is not None)
latest_build = repository.latest_pages_build()
assert isinstance(latest_build, github3.repos.pages.PagesBuild) | Test the ability to retrieve the latest pages build for a repo. | tests/integration/test_repos_pages.py | test_latest_pages_build | seveas/github3.py | 1 | python | def test_latest_pages_build(self):
self.basic_login()
cassette_name = self.cassette_name('latest_pages_build')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert (repository is not None)
latest_build = repository.latest_pages_build()
assert isinstance(latest_build, github3.repos.pages.PagesBuild) | def test_latest_pages_build(self):
self.basic_login()
cassette_name = self.cassette_name('latest_pages_build')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert (repository is not None)
latest_build = repository.latest_pages_build()
assert isinstance(latest_build, github3.repos.pages.PagesBuild)<|docstring|>Test the ability to retrieve the latest pages build for a repo.<|endoftext|> |
0cba7f7ee148021d81b50134b4e86b9722166133ebfb965e6f287c89e4624803 | def test_pages(self):
"\n Test the ability to retrieve information about a repository's pages.\n "
self.basic_login()
cassette_name = self.cassette_name('pages')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert (repository is not None)
pages_info = repository.pages()
assert isinstance(pages_info, github3.repos.pages.PagesInfo) | Test the ability to retrieve information about a repository's pages. | tests/integration/test_repos_pages.py | test_pages | seveas/github3.py | 1 | python | def test_pages(self):
"\n \n "
self.basic_login()
cassette_name = self.cassette_name('pages')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert (repository is not None)
pages_info = repository.pages()
assert isinstance(pages_info, github3.repos.pages.PagesInfo) | def test_pages(self):
"\n \n "
self.basic_login()
cassette_name = self.cassette_name('pages')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert (repository is not None)
pages_info = repository.pages()
assert isinstance(pages_info, github3.repos.pages.PagesInfo)<|docstring|>Test the ability to retrieve information about a repository's pages.<|endoftext|> |
45fc0defe7cb4dc987cd47bf101002d73a3b6e0db1fd272c995b7fb5c680d5f9 | def test_iter_pages_builds(self):
'Test the ability to list the pages builds.'
self.basic_login()
cassette_name = self.cassette_name('pages_builds')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert (repository is not None)
for build in repository.iter_pages_builds():
assert isinstance(build, github3.repos.pages.PagesBuild) | Test the ability to list the pages builds. | tests/integration/test_repos_pages.py | test_iter_pages_builds | seveas/github3.py | 1 | python | def test_iter_pages_builds(self):
self.basic_login()
cassette_name = self.cassette_name('pages_builds')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert (repository is not None)
for build in repository.iter_pages_builds():
assert isinstance(build, github3.repos.pages.PagesBuild) | def test_iter_pages_builds(self):
self.basic_login()
cassette_name = self.cassette_name('pages_builds')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert (repository is not None)
for build in repository.iter_pages_builds():
assert isinstance(build, github3.repos.pages.PagesBuild)<|docstring|>Test the ability to list the pages builds.<|endoftext|> |
084ded5550200dc4ece0b4fb72e8e254a0bb8d1773c8668bde5762e96ec76048 | def __init__(self, bottomLeft, topRight):
'\n :param bottomLeft: {lngMin, latMin}\n :param topRight: {lngMax, latMax}\n '
self.lngMin = bottomLeft[0]
self.lngMax = topRight[0]
self.latMin = bottomLeft[1]
self.latMax = topRight[1]
print('minimum longitude in network: ', self.lngMin)
print('maximum longitude in network: ', self.lngMax)
print('minimum latitude in network: ', self.latMin)
print('maximum latitude in network: ', self.latMax)
self.matchedReq = set()
self.matchedTax = set() | :param bottomLeft: {lngMin, latMin}
:param topRight: {lngMax, latMax} | DispatchingLogic_demo.py | __init__ | bsmsnd/AMoD | 0 | python | def __init__(self, bottomLeft, topRight):
'\n :param bottomLeft: {lngMin, latMin}\n :param topRight: {lngMax, latMax}\n '
self.lngMin = bottomLeft[0]
self.lngMax = topRight[0]
self.latMin = bottomLeft[1]
self.latMax = topRight[1]
print('minimum longitude in network: ', self.lngMin)
print('maximum longitude in network: ', self.lngMax)
print('minimum latitude in network: ', self.latMin)
print('maximum latitude in network: ', self.latMax)
self.matchedReq = set()
self.matchedTax = set() | def __init__(self, bottomLeft, topRight):
'\n :param bottomLeft: {lngMin, latMin}\n :param topRight: {lngMax, latMax}\n '
self.lngMin = bottomLeft[0]
self.lngMax = topRight[0]
self.latMin = bottomLeft[1]
self.latMax = topRight[1]
print('minimum longitude in network: ', self.lngMin)
print('maximum longitude in network: ', self.lngMax)
print('minimum latitude in network: ', self.latMin)
print('maximum latitude in network: ', self.latMax)
self.matchedReq = set()
self.matchedTax = set()<|docstring|>:param bottomLeft: {lngMin, latMin}
:param topRight: {lngMax, latMax}<|endoftext|> |
3f9809aeacb4fc34e7b8a981c80cf58c3a2e875e21b77b9a43f2e19e9ed37ac7 | def getRandomRebalanceLocation(self):
'\n ATTENTION: AMoDeus internally uses the convention (longitude, latitude) for a WGS:84 pair, not the other way\n around as in some other cases.\n '
return [random.uniform(self.lngMin, self.lngMax), random.uniform(self.latMin, self.latMax)] | ATTENTION: AMoDeus internally uses the convention (longitude, latitude) for a WGS:84 pair, not the other way
around as in some other cases. | DispatchingLogic_demo.py | getRandomRebalanceLocation | bsmsnd/AMoD | 0 | python | def getRandomRebalanceLocation(self):
'\n ATTENTION: AMoDeus internally uses the convention (longitude, latitude) for a WGS:84 pair, not the other way\n around as in some other cases.\n '
return [random.uniform(self.lngMin, self.lngMax), random.uniform(self.latMin, self.latMax)] | def getRandomRebalanceLocation(self):
'\n ATTENTION: AMoDeus internally uses the convention (longitude, latitude) for a WGS:84 pair, not the other way\n around as in some other cases.\n '
return [random.uniform(self.lngMin, self.lngMax), random.uniform(self.latMin, self.latMax)]<|docstring|>ATTENTION: AMoDeus internally uses the convention (longitude, latitude) for a WGS:84 pair, not the other way
around as in some other cases.<|endoftext|> |
1174a8742b4e600cc6b04e0f22732f5575cd4a892ff9fa08b2818b683c5a6e43 | def find_best_single_site_proposer(self, node: RVIdentifier):
'\n Finds the best proposer for a node which is\n SingleSiteUniformMetropolisHastingsProposer for\n SingleSiteUniformMetropolisHastings\n\n :param node: the node for which to return a proposer\n :returns: a proposer for the node\n '
return self.proposer_ | Finds the best proposer for a node which is
SingleSiteUniformMetropolisHastingsProposer for
SingleSiteUniformMetropolisHastings
:param node: the node for which to return a proposer
:returns: a proposer for the node | src/beanmachine/ppl/legacy/inference/single_site_uniform_mh.py | find_best_single_site_proposer | michaeltingley/beanmachine-1 | 1 | python | def find_best_single_site_proposer(self, node: RVIdentifier):
'\n Finds the best proposer for a node which is\n SingleSiteUniformMetropolisHastingsProposer for\n SingleSiteUniformMetropolisHastings\n\n :param node: the node for which to return a proposer\n :returns: a proposer for the node\n '
return self.proposer_ | def find_best_single_site_proposer(self, node: RVIdentifier):
'\n Finds the best proposer for a node which is\n SingleSiteUniformMetropolisHastingsProposer for\n SingleSiteUniformMetropolisHastings\n\n :param node: the node for which to return a proposer\n :returns: a proposer for the node\n '
return self.proposer_<|docstring|>Finds the best proposer for a node which is
SingleSiteUniformMetropolisHastingsProposer for
SingleSiteUniformMetropolisHastings
:param node: the node for which to return a proposer
:returns: a proposer for the node<|endoftext|> |
bf01606025efe3cafd8cc619b57d5d2cb3e720339a07cc7814f039fc00d13314 | def parse_arguments():
'Parse training arguments'
parser = argparse.ArgumentParser()
parser.add_argument('--trn-text-path', type=str, metavar='PATH', required=True, help='path to the training text file')
parser.add_argument('--trn-feat-path', type=str, metavar='PATH', required=True, help='path to the instance feature matrix (CSR matrix, nr_insts * nr_features)')
parser.add_argument('--trn-label-path', type=str, required=True, metavar='PATH', help='path to the training label matrix (CSR matrix, nr_insts * nr_labels)')
parser.add_argument('--model-dir', type=str, required=True, metavar='PATH', help='the output directory where the models will be saved.')
parser.add_argument('--tst-text-path', type=str, metavar='PATH', default='', help='path to the test text file')
parser.add_argument('--tst-feat-path', type=str, metavar='PATH', default='', help='path to the test instance feature matrix')
parser.add_argument('--tst-label-path', type=str, metavar='PATH', default='', help='path to the file of the test label matrix')
parser.add_argument('--code-path', type=str, default='', metavar='PATH', help='path to the clustering file (CSR matrix, nr_insts * nr_labels)')
parser.add_argument('--label-feat-path', type=str, default='', metavar='PATH', help='path to the CSR npz or Row-majored npy file of the label feature matrix (nr_labels * nr_label_feats)')
parser.add_argument('--nr-splits', type=int, default=32, metavar='INT', help='number of splits used to construct hierarchy (a power of 2 is recommended)')
parser.add_argument('--min-codes', type=int, default=None, metavar='INT', help='minimal number of codes, default None to use nr-splits')
parser.add_argument('--indexer', choices=Indexer.indexer_dict.keys(), default='hierarchicalkmeans', metavar='STR', help=f"Indexer algorithm (default hierarchicalkmeans). Available choices are {', '.join(Indexer.indexer_dict.keys())}")
parser.add_argument('--max-leaf-size', type=int, default=100, metavar='INT', help='The max size of the leaf nodes of hierarchical 2-means clustering. Default 100.')
parser.add_argument('--imbalanced-ratio', type=float, default=0.0, metavar='FLOAT', help='Value between 0.0 and 0.5 (inclusive). Indicates how relaxed the balancedness constraint of 2-means can be. Specifically, if an iteration of 2-means is clustering L labels, the size of the output 2 clusters will be within approx imbalanced_ratio * 2 * L of each other. (default 0.0)')
parser.add_argument('--imbalanced-depth', type=int, default=100, metavar='INT', help='After hierarchical 2-means clustering has reached this depth, it will continue clustering as if --imbalanced-ratio is set to 0.0. (default 100)')
parser.add_argument('--no-spherical', action='store_true', default=False, help='Do not l2-normalize cluster centers while clustering')
parser.add_argument('--max-iter', type=int, default=20, metavar='INT', help='max iterations for indexer (default 20)')
parser.add_argument('--max-match-clusters', type=int, default=(- 1), metavar='INT', help='max number of clusters on which to train matcher; if <0, set to number of leaf clusters. Default -1')
parser.add_argument('--no-fine-tune', action='store_true', help='whether do fine-tune on loaded/downloaded transformers')
parser.add_argument('--model-shortcut', type=str, metavar='STR', default='bert-base-uncased', help='pre-trained transformer model name shortcut for download (default bert-base-uncased)')
parser.add_argument('--init-model-dir', type=str, metavar='PATH', default='', help='path to load existing TransformerMatcher checkpoint from disk, overrides model-shortcut')
parser.add_argument('-b', '--beam-size', type=int, default=10, metavar='INT', help='the default size of beam search used in the prediction')
parser.add_argument('--only-topk', default=20, metavar='INT', type=int, help='the default number of top labels used in the prediction')
parser.add_argument('-pp', '--post-processor', type=str, choices=PostProcessor.valid_list(), default='noop', metavar='STR', help='the default post processor used in the prediction')
parser.add_argument('-ns', '--negative-sampling', type=str, choices=['tfn', 'man', 'tfn+man'], default='tfn', metavar='STR', help='Negative Sampling Schemes')
parser.add_argument('--ensemble-method', type=str, choices=['concat-only', 'transformer-only', 'average', 'rank_average', 'round_robin'], default='transformer-only', metavar='STR', help='ensemble method for transformer/concat prediction ensemble')
parser.add_argument('-t', '--threshold', type=float, default=0.1, metavar='VAL', help='threshold to sparsify the model weights (default 0.1)')
parser.add_argument('--loss-function', type=str, choices=TransformerMatcher.LOSS_FUNCTION_TYPES.keys(), default='squared-hinge', metavar='STR', help='loss function type for transformer training')
parser.add_argument('--cache-dir', default='', metavar='PATH', type=str, help='dir to store the pre-trained models downloaded from s3')
parser.add_argument('--saved-trn-pt', default='', metavar='PATH', type=str, help='dir to save/load tokenized train tensor')
parser.add_argument('--saved-val-pt', default='', metavar='PATH', type=str, help='dir to save/load tokenized validation tensor')
parser.add_argument('--truncate-length', default=128, metavar='INT', type=int, help='if given, truncate input text to this length, else use longest input length as truncate-length.')
parser.add_argument('--hidden-dropout-prob', default=0.1, metavar='VAL', type=float, help='hidden dropout prob in deep transformer models.')
parser.add_argument('--batch-size', default=32, metavar='INT', type=int, help='batch size per GPU.')
parser.add_argument('--gradient-accumulation-steps', type=int, metavar='INT', default=1, help='number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--learning-rate', default=0.0001, metavar='VAL', type=float, help='maximum learning rate for Adam.')
parser.add_argument('--weight-decay', default=0.0, metavar='VAL', type=float, help='weight decay rate for regularization')
parser.add_argument('--adam-epsilon', default=1e-08, metavar='VAL', type=float, help='epsilon for Adam optimizer.')
parser.add_argument('--max-grad-norm', default=1.0, metavar='VAL', type=float, help='max gradient norm.')
parser.add_argument('--num-train-epochs', default=5.0, metavar='INT', type=int, help='total number of training epochs to perform for each sub-task.')
parser.add_argument('--max-steps', default=(- 1), metavar='INT', type=int, help='if > 0: set total number of training steps to perform for each sub-task. Overrides num-train-epochs.')
parser.add_argument('--steps-scale', nargs='+', type=float, default=None, metavar='FLOAT', help='scale number of transformer fine-tuning steps for each layer. Default None to ignore')
parser.add_argument('--max-no-improve-cnt', type=int, default=(- 1), metavar='INT', help='if > 0, training will stop when this number of validation steps result in no improvment. Default -1 to ignore')
parser.add_argument('--lr-schedule', default='linear', metavar='STR', type=str, choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup'], help='learning rate schedule for transformer fine-tuning. See transformers.SchedulerType for details')
parser.add_argument('--warmup-steps', default=0, metavar='INT', type=int, help='Linear warmup over warmup-steps.')
parser.add_argument('--logging-steps', type=int, metavar='INT', default=50, help='log training information every NUM updates steps.')
parser.add_argument('--save-steps', type=int, metavar='INT', default=100, help='save checkpoint every NUM updates steps.')
parser.add_argument('--max-active-matching-labels', default=None, metavar='INT', type=int, help='max number of active matching labels, will subsample from existing negative samples if necessary. Default None to ignore.')
parser.add_argument('--max-num-labels-in-gpu', default=65536, metavar='INT', type=int, help='Upper limit on labels to put output layer in GPU. Default 65536')
parser.add_argument('--save-emb-dir', default='', metavar='PATH', type=str, help='dir to save instance embeddings.')
parser.add_argument('--disable-gpu', action='store_true', help="disable CUDA training even if it's available")
parser.add_argument('--bootstrap-method', type=str, default='linear', choices=['linear', 'inherit', None], help='initialization method for the text_model weights. Ignored if None is given. Default linear')
parser.add_argument('--batch-gen-workers', type=int, metavar='INT', default=4, help='number of CPUs to use for batch generation')
parser.add_argument('--seed', type=int, metavar='INT', default=0, help='random seed for initialization')
parser.add_argument('--verbose-level', type=int, choices=logging_util.log_levels.keys(), default=2, metavar='INT', help=f"the verbose level, {', '.join([((str(k) + ' for ') + logging.getLevelName(v)) for (k, v) in logging_util.log_levels.items()])}. Default 2")
return parser | Parse training arguments | pecos/xmc/xtransformer/train.py | parse_arguments | Xabilahu/pecos | 2 | python | def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--trn-text-path', type=str, metavar='PATH', required=True, help='path to the training text file')
parser.add_argument('--trn-feat-path', type=str, metavar='PATH', required=True, help='path to the instance feature matrix (CSR matrix, nr_insts * nr_features)')
parser.add_argument('--trn-label-path', type=str, required=True, metavar='PATH', help='path to the training label matrix (CSR matrix, nr_insts * nr_labels)')
parser.add_argument('--model-dir', type=str, required=True, metavar='PATH', help='the output directory where the models will be saved.')
parser.add_argument('--tst-text-path', type=str, metavar='PATH', default=, help='path to the test text file')
parser.add_argument('--tst-feat-path', type=str, metavar='PATH', default=, help='path to the test instance feature matrix')
parser.add_argument('--tst-label-path', type=str, metavar='PATH', default=, help='path to the file of the test label matrix')
parser.add_argument('--code-path', type=str, default=, metavar='PATH', help='path to the clustering file (CSR matrix, nr_insts * nr_labels)')
parser.add_argument('--label-feat-path', type=str, default=, metavar='PATH', help='path to the CSR npz or Row-majored npy file of the label feature matrix (nr_labels * nr_label_feats)')
parser.add_argument('--nr-splits', type=int, default=32, metavar='INT', help='number of splits used to construct hierarchy (a power of 2 is recommended)')
parser.add_argument('--min-codes', type=int, default=None, metavar='INT', help='minimal number of codes, default None to use nr-splits')
parser.add_argument('--indexer', choices=Indexer.indexer_dict.keys(), default='hierarchicalkmeans', metavar='STR', help=f"Indexer algorithm (default hierarchicalkmeans). Available choices are {', '.join(Indexer.indexer_dict.keys())}")
parser.add_argument('--max-leaf-size', type=int, default=100, metavar='INT', help='The max size of the leaf nodes of hierarchical 2-means clustering. Default 100.')
parser.add_argument('--imbalanced-ratio', type=float, default=0.0, metavar='FLOAT', help='Value between 0.0 and 0.5 (inclusive). Indicates how relaxed the balancedness constraint of 2-means can be. Specifically, if an iteration of 2-means is clustering L labels, the size of the output 2 clusters will be within approx imbalanced_ratio * 2 * L of each other. (default 0.0)')
parser.add_argument('--imbalanced-depth', type=int, default=100, metavar='INT', help='After hierarchical 2-means clustering has reached this depth, it will continue clustering as if --imbalanced-ratio is set to 0.0. (default 100)')
parser.add_argument('--no-spherical', action='store_true', default=False, help='Do not l2-normalize cluster centers while clustering')
parser.add_argument('--max-iter', type=int, default=20, metavar='INT', help='max iterations for indexer (default 20)')
parser.add_argument('--max-match-clusters', type=int, default=(- 1), metavar='INT', help='max number of clusters on which to train matcher; if <0, set to number of leaf clusters. Default -1')
parser.add_argument('--no-fine-tune', action='store_true', help='whether do fine-tune on loaded/downloaded transformers')
parser.add_argument('--model-shortcut', type=str, metavar='STR', default='bert-base-uncased', help='pre-trained transformer model name shortcut for download (default bert-base-uncased)')
parser.add_argument('--init-model-dir', type=str, metavar='PATH', default=, help='path to load existing TransformerMatcher checkpoint from disk, overrides model-shortcut')
parser.add_argument('-b', '--beam-size', type=int, default=10, metavar='INT', help='the default size of beam search used in the prediction')
parser.add_argument('--only-topk', default=20, metavar='INT', type=int, help='the default number of top labels used in the prediction')
parser.add_argument('-pp', '--post-processor', type=str, choices=PostProcessor.valid_list(), default='noop', metavar='STR', help='the default post processor used in the prediction')
parser.add_argument('-ns', '--negative-sampling', type=str, choices=['tfn', 'man', 'tfn+man'], default='tfn', metavar='STR', help='Negative Sampling Schemes')
parser.add_argument('--ensemble-method', type=str, choices=['concat-only', 'transformer-only', 'average', 'rank_average', 'round_robin'], default='transformer-only', metavar='STR', help='ensemble method for transformer/concat prediction ensemble')
parser.add_argument('-t', '--threshold', type=float, default=0.1, metavar='VAL', help='threshold to sparsify the model weights (default 0.1)')
parser.add_argument('--loss-function', type=str, choices=TransformerMatcher.LOSS_FUNCTION_TYPES.keys(), default='squared-hinge', metavar='STR', help='loss function type for transformer training')
parser.add_argument('--cache-dir', default=, metavar='PATH', type=str, help='dir to store the pre-trained models downloaded from s3')
parser.add_argument('--saved-trn-pt', default=, metavar='PATH', type=str, help='dir to save/load tokenized train tensor')
parser.add_argument('--saved-val-pt', default=, metavar='PATH', type=str, help='dir to save/load tokenized validation tensor')
parser.add_argument('--truncate-length', default=128, metavar='INT', type=int, help='if given, truncate input text to this length, else use longest input length as truncate-length.')
parser.add_argument('--hidden-dropout-prob', default=0.1, metavar='VAL', type=float, help='hidden dropout prob in deep transformer models.')
parser.add_argument('--batch-size', default=32, metavar='INT', type=int, help='batch size per GPU.')
parser.add_argument('--gradient-accumulation-steps', type=int, metavar='INT', default=1, help='number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--learning-rate', default=0.0001, metavar='VAL', type=float, help='maximum learning rate for Adam.')
parser.add_argument('--weight-decay', default=0.0, metavar='VAL', type=float, help='weight decay rate for regularization')
parser.add_argument('--adam-epsilon', default=1e-08, metavar='VAL', type=float, help='epsilon for Adam optimizer.')
parser.add_argument('--max-grad-norm', default=1.0, metavar='VAL', type=float, help='max gradient norm.')
parser.add_argument('--num-train-epochs', default=5.0, metavar='INT', type=int, help='total number of training epochs to perform for each sub-task.')
parser.add_argument('--max-steps', default=(- 1), metavar='INT', type=int, help='if > 0: set total number of training steps to perform for each sub-task. Overrides num-train-epochs.')
parser.add_argument('--steps-scale', nargs='+', type=float, default=None, metavar='FLOAT', help='scale number of transformer fine-tuning steps for each layer. Default None to ignore')
parser.add_argument('--max-no-improve-cnt', type=int, default=(- 1), metavar='INT', help='if > 0, training will stop when this number of validation steps result in no improvment. Default -1 to ignore')
parser.add_argument('--lr-schedule', default='linear', metavar='STR', type=str, choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup'], help='learning rate schedule for transformer fine-tuning. See transformers.SchedulerType for details')
parser.add_argument('--warmup-steps', default=0, metavar='INT', type=int, help='Linear warmup over warmup-steps.')
parser.add_argument('--logging-steps', type=int, metavar='INT', default=50, help='log training information every NUM updates steps.')
parser.add_argument('--save-steps', type=int, metavar='INT', default=100, help='save checkpoint every NUM updates steps.')
parser.add_argument('--max-active-matching-labels', default=None, metavar='INT', type=int, help='max number of active matching labels, will subsample from existing negative samples if necessary. Default None to ignore.')
parser.add_argument('--max-num-labels-in-gpu', default=65536, metavar='INT', type=int, help='Upper limit on labels to put output layer in GPU. Default 65536')
parser.add_argument('--save-emb-dir', default=, metavar='PATH', type=str, help='dir to save instance embeddings.')
parser.add_argument('--disable-gpu', action='store_true', help="disable CUDA training even if it's available")
parser.add_argument('--bootstrap-method', type=str, default='linear', choices=['linear', 'inherit', None], help='initialization method for the text_model weights. Ignored if None is given. Default linear')
parser.add_argument('--batch-gen-workers', type=int, metavar='INT', default=4, help='number of CPUs to use for batch generation')
parser.add_argument('--seed', type=int, metavar='INT', default=0, help='random seed for initialization')
parser.add_argument('--verbose-level', type=int, choices=logging_util.log_levels.keys(), default=2, metavar='INT', help=f"the verbose level, {', '.join([((str(k) + ' for ') + logging.getLevelName(v)) for (k, v) in logging_util.log_levels.items()])}. Default 2")
return parser | def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--trn-text-path', type=str, metavar='PATH', required=True, help='path to the training text file')
parser.add_argument('--trn-feat-path', type=str, metavar='PATH', required=True, help='path to the instance feature matrix (CSR matrix, nr_insts * nr_features)')
parser.add_argument('--trn-label-path', type=str, required=True, metavar='PATH', help='path to the training label matrix (CSR matrix, nr_insts * nr_labels)')
parser.add_argument('--model-dir', type=str, required=True, metavar='PATH', help='the output directory where the models will be saved.')
parser.add_argument('--tst-text-path', type=str, metavar='PATH', default=, help='path to the test text file')
parser.add_argument('--tst-feat-path', type=str, metavar='PATH', default=, help='path to the test instance feature matrix')
parser.add_argument('--tst-label-path', type=str, metavar='PATH', default=, help='path to the file of the test label matrix')
parser.add_argument('--code-path', type=str, default=, metavar='PATH', help='path to the clustering file (CSR matrix, nr_insts * nr_labels)')
parser.add_argument('--label-feat-path', type=str, default=, metavar='PATH', help='path to the CSR npz or Row-majored npy file of the label feature matrix (nr_labels * nr_label_feats)')
parser.add_argument('--nr-splits', type=int, default=32, metavar='INT', help='number of splits used to construct hierarchy (a power of 2 is recommended)')
parser.add_argument('--min-codes', type=int, default=None, metavar='INT', help='minimal number of codes, default None to use nr-splits')
parser.add_argument('--indexer', choices=Indexer.indexer_dict.keys(), default='hierarchicalkmeans', metavar='STR', help=f"Indexer algorithm (default hierarchicalkmeans). Available choices are {', '.join(Indexer.indexer_dict.keys())}")
parser.add_argument('--max-leaf-size', type=int, default=100, metavar='INT', help='The max size of the leaf nodes of hierarchical 2-means clustering. Default 100.')
parser.add_argument('--imbalanced-ratio', type=float, default=0.0, metavar='FLOAT', help='Value between 0.0 and 0.5 (inclusive). Indicates how relaxed the balancedness constraint of 2-means can be. Specifically, if an iteration of 2-means is clustering L labels, the size of the output 2 clusters will be within approx imbalanced_ratio * 2 * L of each other. (default 0.0)')
parser.add_argument('--imbalanced-depth', type=int, default=100, metavar='INT', help='After hierarchical 2-means clustering has reached this depth, it will continue clustering as if --imbalanced-ratio is set to 0.0. (default 100)')
parser.add_argument('--no-spherical', action='store_true', default=False, help='Do not l2-normalize cluster centers while clustering')
parser.add_argument('--max-iter', type=int, default=20, metavar='INT', help='max iterations for indexer (default 20)')
parser.add_argument('--max-match-clusters', type=int, default=(- 1), metavar='INT', help='max number of clusters on which to train matcher; if <0, set to number of leaf clusters. Default -1')
parser.add_argument('--no-fine-tune', action='store_true', help='whether do fine-tune on loaded/downloaded transformers')
parser.add_argument('--model-shortcut', type=str, metavar='STR', default='bert-base-uncased', help='pre-trained transformer model name shortcut for download (default bert-base-uncased)')
parser.add_argument('--init-model-dir', type=str, metavar='PATH', default=, help='path to load existing TransformerMatcher checkpoint from disk, overrides model-shortcut')
parser.add_argument('-b', '--beam-size', type=int, default=10, metavar='INT', help='the default size of beam search used in the prediction')
parser.add_argument('--only-topk', default=20, metavar='INT', type=int, help='the default number of top labels used in the prediction')
parser.add_argument('-pp', '--post-processor', type=str, choices=PostProcessor.valid_list(), default='noop', metavar='STR', help='the default post processor used in the prediction')
parser.add_argument('-ns', '--negative-sampling', type=str, choices=['tfn', 'man', 'tfn+man'], default='tfn', metavar='STR', help='Negative Sampling Schemes')
parser.add_argument('--ensemble-method', type=str, choices=['concat-only', 'transformer-only', 'average', 'rank_average', 'round_robin'], default='transformer-only', metavar='STR', help='ensemble method for transformer/concat prediction ensemble')
parser.add_argument('-t', '--threshold', type=float, default=0.1, metavar='VAL', help='threshold to sparsify the model weights (default 0.1)')
parser.add_argument('--loss-function', type=str, choices=TransformerMatcher.LOSS_FUNCTION_TYPES.keys(), default='squared-hinge', metavar='STR', help='loss function type for transformer training')
parser.add_argument('--cache-dir', default=, metavar='PATH', type=str, help='dir to store the pre-trained models downloaded from s3')
parser.add_argument('--saved-trn-pt', default=, metavar='PATH', type=str, help='dir to save/load tokenized train tensor')
parser.add_argument('--saved-val-pt', default=, metavar='PATH', type=str, help='dir to save/load tokenized validation tensor')
parser.add_argument('--truncate-length', default=128, metavar='INT', type=int, help='if given, truncate input text to this length, else use longest input length as truncate-length.')
parser.add_argument('--hidden-dropout-prob', default=0.1, metavar='VAL', type=float, help='hidden dropout prob in deep transformer models.')
parser.add_argument('--batch-size', default=32, metavar='INT', type=int, help='batch size per GPU.')
parser.add_argument('--gradient-accumulation-steps', type=int, metavar='INT', default=1, help='number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--learning-rate', default=0.0001, metavar='VAL', type=float, help='maximum learning rate for Adam.')
parser.add_argument('--weight-decay', default=0.0, metavar='VAL', type=float, help='weight decay rate for regularization')
parser.add_argument('--adam-epsilon', default=1e-08, metavar='VAL', type=float, help='epsilon for Adam optimizer.')
parser.add_argument('--max-grad-norm', default=1.0, metavar='VAL', type=float, help='max gradient norm.')
parser.add_argument('--num-train-epochs', default=5.0, metavar='INT', type=int, help='total number of training epochs to perform for each sub-task.')
parser.add_argument('--max-steps', default=(- 1), metavar='INT', type=int, help='if > 0: set total number of training steps to perform for each sub-task. Overrides num-train-epochs.')
parser.add_argument('--steps-scale', nargs='+', type=float, default=None, metavar='FLOAT', help='scale number of transformer fine-tuning steps for each layer. Default None to ignore')
parser.add_argument('--max-no-improve-cnt', type=int, default=(- 1), metavar='INT', help='if > 0, training will stop when this number of validation steps result in no improvment. Default -1 to ignore')
parser.add_argument('--lr-schedule', default='linear', metavar='STR', type=str, choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup'], help='learning rate schedule for transformer fine-tuning. See transformers.SchedulerType for details')
parser.add_argument('--warmup-steps', default=0, metavar='INT', type=int, help='Linear warmup over warmup-steps.')
parser.add_argument('--logging-steps', type=int, metavar='INT', default=50, help='log training information every NUM updates steps.')
parser.add_argument('--save-steps', type=int, metavar='INT', default=100, help='save checkpoint every NUM updates steps.')
parser.add_argument('--max-active-matching-labels', default=None, metavar='INT', type=int, help='max number of active matching labels, will subsample from existing negative samples if necessary. Default None to ignore.')
parser.add_argument('--max-num-labels-in-gpu', default=65536, metavar='INT', type=int, help='Upper limit on labels to put output layer in GPU. Default 65536')
parser.add_argument('--save-emb-dir', default=, metavar='PATH', type=str, help='dir to save instance embeddings.')
parser.add_argument('--disable-gpu', action='store_true', help="disable CUDA training even if it's available")
parser.add_argument('--bootstrap-method', type=str, default='linear', choices=['linear', 'inherit', None], help='initialization method for the text_model weights. Ignored if None is given. Default linear')
parser.add_argument('--batch-gen-workers', type=int, metavar='INT', default=4, help='number of CPUs to use for batch generation')
parser.add_argument('--seed', type=int, metavar='INT', default=0, help='random seed for initialization')
parser.add_argument('--verbose-level', type=int, choices=logging_util.log_levels.keys(), default=2, metavar='INT', help=f"the verbose level, {', '.join([((str(k) + ' for ') + logging.getLevelName(v)) for (k, v) in logging_util.log_levels.items()])}. Default 2")
return parser<|docstring|>Parse training arguments<|endoftext|> |
29734b70af423b06dfa2957f699829898bd8f0ff986c4a5e55a9cc5b5325dc74 | def do_train(args):
'Train and save X-Transformer model.\n\n Args:\n args (argparse.Namespace): Command line arguments parsed by `parser.parse_args()`\n '
torch_util.set_seed(args.seed)
LOGGER.info('Setting random seed {}'.format(args.seed))
X_trn = smat_util.load_matrix(args.trn_feat_path, dtype=np.float32)
LOGGER.info('Loaded training feature matrix with shape={}'.format(X_trn.shape))
Y_trn = smat_util.load_matrix(args.trn_label_path, dtype=np.float32)
LOGGER.info('Loaded training label matrix with shape={}'.format(Y_trn.shape))
if args.tst_feat_path:
X_tst = smat_util.load_matrix(args.tst_feat_path, dtype=np.float32)
LOGGER.info('Loaded test feature matrix with shape={}'.format(X_tst.shape))
else:
X_tst = None
if args.tst_label_path:
Y_tst = smat_util.load_matrix(args.tst_label_path, dtype=np.float32)
LOGGER.info('Loaded test label matrix with shape={}'.format(Y_tst.shape))
else:
Y_tst = None
(_, trn_corpus) = Preprocessor.load_data_from_file(args.trn_text_path, label_text_path=None, text_pos=0)
LOGGER.info('Loaded {} training sequences'.format(len(trn_corpus)))
if args.tst_text_path:
(_, tst_corpus) = Preprocessor.load_data_from_file(args.tst_text_path, label_text_path=None, text_pos=0)
LOGGER.info('Loaded {} test sequences'.format(len(tst_corpus)))
else:
tst_corpus = None
if os.path.exists(args.code_path):
cluster_chain = ClusterChain.from_partial_chain(smat_util.load_matrix(args.code_path), min_codes=args.min_codes, nr_splits=args.nr_splits)
LOGGER.info('Loaded from code-path: {}'.format(args.code_path))
else:
if os.path.isfile(args.label_feat_path):
label_feat = smat_util.load_matrix(args.label_feat_path, dtype=np.float32)
LOGGER.info('Loaded label feature matrix shape={}, from {}'.format(label_feat.shape, args.label_feat_path))
else:
label_feat = LabelEmbeddingFactory.pifa(Y_trn, X_trn)
if args.label_feat_path:
smat_util.save_matrix(args.label_feat_path, label_feat)
LOGGER.info('Created label feature matrix with shape={}, saved to {}'.format(label_feat.shape, args.label_feat_path))
cluster_chain = Indexer.gen(label_feat, args.indexer, nr_splits=args.nr_splits, min_codes=args.min_codes, max_leaf_size=args.max_leaf_size, imbalanced_depth=args.imbalanced_depth, imbalanced_ratio=args.imbalanced_ratio, seed=args.seed, max_iter=args.max_iter, spherical=(not args.no_spherical))
del label_feat
gc.collect()
if args.code_path:
cluster_chain.save(args.code_path)
LOGGER.info('Created clustering chain, saved to {}'.format(args.code_path))
LOGGER.info('Constructed clustering chain for ranker: {}'.format([cc.shape for cc in cluster_chain]))
nr_leaf_clusters = cluster_chain[(- 1)].shape[1]
if (args.max_match_clusters < 0):
args.max_match_clusters = nr_leaf_clusters
if (args.max_match_clusters < cluster_chain[(- 1)].shape[0]):
args.ranker_level = (len(cluster_chain) - next((level for (level, C) in enumerate(cluster_chain[:]) if (C.shape[1] >= args.max_match_clusters))))
LOGGER.info('Apply matcher at ranker-level {} with nr_labels={}'.format(args.ranker_level, cluster_chain[(- args.ranker_level)].shape[1]))
else:
args.ranker_level = 0
LOGGER.info('Apply matcher at ranker-level 0 with nr_labels={}'.format(cluster_chain[(- 1)].shape[0]))
trn_prob = MLProblemWithText(trn_corpus, X_trn, Y_trn)
if all(((v is not None) for v in [tst_corpus, X_tst, Y_tst])):
val_prob = MLProblemWithText(tst_corpus, X_tst, Y_tst)
else:
val_prob = None
if (not args.saved_trn_pt):
temp_trn_pt_dir = tempfile.TemporaryDirectory()
args.saved_trn_pt = f'{temp_trn_pt_dir.name}/X_trn.pt'
if (not args.saved_val_pt):
temp_val_pt_dir = tempfile.TemporaryDirectory()
args.saved_val_pt = f'{temp_val_pt_dir.name}/X_val.pt'
args.neg_mining_chain = args.negative_sampling
train_params = XTransformer.TrainParams.from_dict(vars(args), recursive=True)
pred_params = XTransformer.PredParams.from_dict(vars(args), recursive=True)
xtf = XTransformer.train(trn_prob, cluster_chain, val_prob=val_prob, train_params=train_params, pred_params=pred_params, beam_size=args.beam_size, steps_scale=args.steps_scale)
xtf.save(args.model_dir) | Train and save X-Transformer model.
Args:
args (argparse.Namespace): Command line arguments parsed by `parser.parse_args()` | pecos/xmc/xtransformer/train.py | do_train | Xabilahu/pecos | 2 | python | def do_train(args):
'Train and save X-Transformer model.\n\n Args:\n args (argparse.Namespace): Command line arguments parsed by `parser.parse_args()`\n '
torch_util.set_seed(args.seed)
LOGGER.info('Setting random seed {}'.format(args.seed))
X_trn = smat_util.load_matrix(args.trn_feat_path, dtype=np.float32)
LOGGER.info('Loaded training feature matrix with shape={}'.format(X_trn.shape))
Y_trn = smat_util.load_matrix(args.trn_label_path, dtype=np.float32)
LOGGER.info('Loaded training label matrix with shape={}'.format(Y_trn.shape))
if args.tst_feat_path:
X_tst = smat_util.load_matrix(args.tst_feat_path, dtype=np.float32)
LOGGER.info('Loaded test feature matrix with shape={}'.format(X_tst.shape))
else:
X_tst = None
if args.tst_label_path:
Y_tst = smat_util.load_matrix(args.tst_label_path, dtype=np.float32)
LOGGER.info('Loaded test label matrix with shape={}'.format(Y_tst.shape))
else:
Y_tst = None
(_, trn_corpus) = Preprocessor.load_data_from_file(args.trn_text_path, label_text_path=None, text_pos=0)
LOGGER.info('Loaded {} training sequences'.format(len(trn_corpus)))
if args.tst_text_path:
(_, tst_corpus) = Preprocessor.load_data_from_file(args.tst_text_path, label_text_path=None, text_pos=0)
LOGGER.info('Loaded {} test sequences'.format(len(tst_corpus)))
else:
tst_corpus = None
if os.path.exists(args.code_path):
cluster_chain = ClusterChain.from_partial_chain(smat_util.load_matrix(args.code_path), min_codes=args.min_codes, nr_splits=args.nr_splits)
LOGGER.info('Loaded from code-path: {}'.format(args.code_path))
else:
if os.path.isfile(args.label_feat_path):
label_feat = smat_util.load_matrix(args.label_feat_path, dtype=np.float32)
LOGGER.info('Loaded label feature matrix shape={}, from {}'.format(label_feat.shape, args.label_feat_path))
else:
label_feat = LabelEmbeddingFactory.pifa(Y_trn, X_trn)
if args.label_feat_path:
smat_util.save_matrix(args.label_feat_path, label_feat)
LOGGER.info('Created label feature matrix with shape={}, saved to {}'.format(label_feat.shape, args.label_feat_path))
cluster_chain = Indexer.gen(label_feat, args.indexer, nr_splits=args.nr_splits, min_codes=args.min_codes, max_leaf_size=args.max_leaf_size, imbalanced_depth=args.imbalanced_depth, imbalanced_ratio=args.imbalanced_ratio, seed=args.seed, max_iter=args.max_iter, spherical=(not args.no_spherical))
del label_feat
gc.collect()
if args.code_path:
cluster_chain.save(args.code_path)
LOGGER.info('Created clustering chain, saved to {}'.format(args.code_path))
LOGGER.info('Constructed clustering chain for ranker: {}'.format([cc.shape for cc in cluster_chain]))
nr_leaf_clusters = cluster_chain[(- 1)].shape[1]
if (args.max_match_clusters < 0):
args.max_match_clusters = nr_leaf_clusters
if (args.max_match_clusters < cluster_chain[(- 1)].shape[0]):
args.ranker_level = (len(cluster_chain) - next((level for (level, C) in enumerate(cluster_chain[:]) if (C.shape[1] >= args.max_match_clusters))))
LOGGER.info('Apply matcher at ranker-level {} with nr_labels={}'.format(args.ranker_level, cluster_chain[(- args.ranker_level)].shape[1]))
else:
args.ranker_level = 0
LOGGER.info('Apply matcher at ranker-level 0 with nr_labels={}'.format(cluster_chain[(- 1)].shape[0]))
trn_prob = MLProblemWithText(trn_corpus, X_trn, Y_trn)
if all(((v is not None) for v in [tst_corpus, X_tst, Y_tst])):
val_prob = MLProblemWithText(tst_corpus, X_tst, Y_tst)
else:
val_prob = None
if (not args.saved_trn_pt):
temp_trn_pt_dir = tempfile.TemporaryDirectory()
args.saved_trn_pt = f'{temp_trn_pt_dir.name}/X_trn.pt'
if (not args.saved_val_pt):
temp_val_pt_dir = tempfile.TemporaryDirectory()
args.saved_val_pt = f'{temp_val_pt_dir.name}/X_val.pt'
args.neg_mining_chain = args.negative_sampling
train_params = XTransformer.TrainParams.from_dict(vars(args), recursive=True)
pred_params = XTransformer.PredParams.from_dict(vars(args), recursive=True)
xtf = XTransformer.train(trn_prob, cluster_chain, val_prob=val_prob, train_params=train_params, pred_params=pred_params, beam_size=args.beam_size, steps_scale=args.steps_scale)
xtf.save(args.model_dir) | def do_train(args):
'Train and save X-Transformer model.\n\n Args:\n args (argparse.Namespace): Command line arguments parsed by `parser.parse_args()`\n '
torch_util.set_seed(args.seed)
LOGGER.info('Setting random seed {}'.format(args.seed))
X_trn = smat_util.load_matrix(args.trn_feat_path, dtype=np.float32)
LOGGER.info('Loaded training feature matrix with shape={}'.format(X_trn.shape))
Y_trn = smat_util.load_matrix(args.trn_label_path, dtype=np.float32)
LOGGER.info('Loaded training label matrix with shape={}'.format(Y_trn.shape))
if args.tst_feat_path:
X_tst = smat_util.load_matrix(args.tst_feat_path, dtype=np.float32)
LOGGER.info('Loaded test feature matrix with shape={}'.format(X_tst.shape))
else:
X_tst = None
if args.tst_label_path:
Y_tst = smat_util.load_matrix(args.tst_label_path, dtype=np.float32)
LOGGER.info('Loaded test label matrix with shape={}'.format(Y_tst.shape))
else:
Y_tst = None
(_, trn_corpus) = Preprocessor.load_data_from_file(args.trn_text_path, label_text_path=None, text_pos=0)
LOGGER.info('Loaded {} training sequences'.format(len(trn_corpus)))
if args.tst_text_path:
(_, tst_corpus) = Preprocessor.load_data_from_file(args.tst_text_path, label_text_path=None, text_pos=0)
LOGGER.info('Loaded {} test sequences'.format(len(tst_corpus)))
else:
tst_corpus = None
if os.path.exists(args.code_path):
cluster_chain = ClusterChain.from_partial_chain(smat_util.load_matrix(args.code_path), min_codes=args.min_codes, nr_splits=args.nr_splits)
LOGGER.info('Loaded from code-path: {}'.format(args.code_path))
else:
if os.path.isfile(args.label_feat_path):
label_feat = smat_util.load_matrix(args.label_feat_path, dtype=np.float32)
LOGGER.info('Loaded label feature matrix shape={}, from {}'.format(label_feat.shape, args.label_feat_path))
else:
label_feat = LabelEmbeddingFactory.pifa(Y_trn, X_trn)
if args.label_feat_path:
smat_util.save_matrix(args.label_feat_path, label_feat)
LOGGER.info('Created label feature matrix with shape={}, saved to {}'.format(label_feat.shape, args.label_feat_path))
cluster_chain = Indexer.gen(label_feat, args.indexer, nr_splits=args.nr_splits, min_codes=args.min_codes, max_leaf_size=args.max_leaf_size, imbalanced_depth=args.imbalanced_depth, imbalanced_ratio=args.imbalanced_ratio, seed=args.seed, max_iter=args.max_iter, spherical=(not args.no_spherical))
del label_feat
gc.collect()
if args.code_path:
cluster_chain.save(args.code_path)
LOGGER.info('Created clustering chain, saved to {}'.format(args.code_path))
LOGGER.info('Constructed clustering chain for ranker: {}'.format([cc.shape for cc in cluster_chain]))
nr_leaf_clusters = cluster_chain[(- 1)].shape[1]
if (args.max_match_clusters < 0):
args.max_match_clusters = nr_leaf_clusters
if (args.max_match_clusters < cluster_chain[(- 1)].shape[0]):
args.ranker_level = (len(cluster_chain) - next((level for (level, C) in enumerate(cluster_chain[:]) if (C.shape[1] >= args.max_match_clusters))))
LOGGER.info('Apply matcher at ranker-level {} with nr_labels={}'.format(args.ranker_level, cluster_chain[(- args.ranker_level)].shape[1]))
else:
args.ranker_level = 0
LOGGER.info('Apply matcher at ranker-level 0 with nr_labels={}'.format(cluster_chain[(- 1)].shape[0]))
trn_prob = MLProblemWithText(trn_corpus, X_trn, Y_trn)
if all(((v is not None) for v in [tst_corpus, X_tst, Y_tst])):
val_prob = MLProblemWithText(tst_corpus, X_tst, Y_tst)
else:
val_prob = None
if (not args.saved_trn_pt):
temp_trn_pt_dir = tempfile.TemporaryDirectory()
args.saved_trn_pt = f'{temp_trn_pt_dir.name}/X_trn.pt'
if (not args.saved_val_pt):
temp_val_pt_dir = tempfile.TemporaryDirectory()
args.saved_val_pt = f'{temp_val_pt_dir.name}/X_val.pt'
args.neg_mining_chain = args.negative_sampling
train_params = XTransformer.TrainParams.from_dict(vars(args), recursive=True)
pred_params = XTransformer.PredParams.from_dict(vars(args), recursive=True)
xtf = XTransformer.train(trn_prob, cluster_chain, val_prob=val_prob, train_params=train_params, pred_params=pred_params, beam_size=args.beam_size, steps_scale=args.steps_scale)
xtf.save(args.model_dir)<|docstring|>Train and save X-Transformer model.
Args:
args (argparse.Namespace): Command line arguments parsed by `parser.parse_args()`<|endoftext|> |
5c46df3f633fd3cd8f312bb55a44e717ce9dba824132cd8102b21cc9b7428800 | def test_output_contract(self):
'\n Test that the output complies to the established protocol\n that is used by the IaC pipeline and cf-name-check.\n\n Output should look like:\n {\n "valid": "true", # NOTE: this is a string and NOT a boolean\n "reason": ""\n "failed_rules": [] # Optional\n }\n '
event = {'stack_template_url': 'https://fake/bucket/key'}
mock_created_s3_adapter_object = Mock()
mock_created_s3_adapter_object.download_template_to_dictionary.return_value = {'Resources': {'sg': {'Type': 'AWS::EC2::SecurityGroup', 'Properties': {'GroupDescription': 'some_group_desc', 'SecurityGroupIngress': {'CidrIp': '10.1.2.3/32', 'FromPort': 34, 'ToPort': 36, 'IpProtocol': 'tcp'}, 'VpcId': 'vpc-9f8e9dfa'}}}}
mock_s3_adapter = Mock(return_value=mock_created_s3_adapter_object)
with patch('cfripper.main.S3Adapter', new=mock_s3_adapter):
from cfripper.main import handler
event_result = handler(event, None)
assert (event_result['valid'] == 'true')
assert isinstance(event_result['reason'], str)
assert isinstance(event_result.get('failed_rules'), list) | Test that the output complies to the established protocol
that is used by the IaC pipeline and cf-name-check.
Output should look like:
{
"valid": "true", # NOTE: this is a string and NOT a boolean
"reason": ""
"failed_rules": [] # Optional
} | tests/test_main.py | test_output_contract | ocrawford555/cfripper | 0 | python | def test_output_contract(self):
'\n Test that the output complies to the established protocol\n that is used by the IaC pipeline and cf-name-check.\n\n Output should look like:\n {\n "valid": "true", # NOTE: this is a string and NOT a boolean\n "reason": \n "failed_rules": [] # Optional\n }\n '
event = {'stack_template_url': 'https://fake/bucket/key'}
mock_created_s3_adapter_object = Mock()
mock_created_s3_adapter_object.download_template_to_dictionary.return_value = {'Resources': {'sg': {'Type': 'AWS::EC2::SecurityGroup', 'Properties': {'GroupDescription': 'some_group_desc', 'SecurityGroupIngress': {'CidrIp': '10.1.2.3/32', 'FromPort': 34, 'ToPort': 36, 'IpProtocol': 'tcp'}, 'VpcId': 'vpc-9f8e9dfa'}}}}
mock_s3_adapter = Mock(return_value=mock_created_s3_adapter_object)
with patch('cfripper.main.S3Adapter', new=mock_s3_adapter):
from cfripper.main import handler
event_result = handler(event, None)
assert (event_result['valid'] == 'true')
assert isinstance(event_result['reason'], str)
assert isinstance(event_result.get('failed_rules'), list) | def test_output_contract(self):
'\n Test that the output complies to the established protocol\n that is used by the IaC pipeline and cf-name-check.\n\n Output should look like:\n {\n "valid": "true", # NOTE: this is a string and NOT a boolean\n "reason": \n "failed_rules": [] # Optional\n }\n '
event = {'stack_template_url': 'https://fake/bucket/key'}
mock_created_s3_adapter_object = Mock()
mock_created_s3_adapter_object.download_template_to_dictionary.return_value = {'Resources': {'sg': {'Type': 'AWS::EC2::SecurityGroup', 'Properties': {'GroupDescription': 'some_group_desc', 'SecurityGroupIngress': {'CidrIp': '10.1.2.3/32', 'FromPort': 34, 'ToPort': 36, 'IpProtocol': 'tcp'}, 'VpcId': 'vpc-9f8e9dfa'}}}}
mock_s3_adapter = Mock(return_value=mock_created_s3_adapter_object)
with patch('cfripper.main.S3Adapter', new=mock_s3_adapter):
from cfripper.main import handler
event_result = handler(event, None)
assert (event_result['valid'] == 'true')
assert isinstance(event_result['reason'], str)
assert isinstance(event_result.get('failed_rules'), list)<|docstring|>Test that the output complies to the established protocol
that is used by the IaC pipeline and cf-name-check.
Output should look like:
{
"valid": "true", # NOTE: this is a string and NOT a boolean
"reason": ""
"failed_rules": [] # Optional
}<|endoftext|> |
9a43782cc4712cc3dcf7985adf03bcc41c4e154af36300b85f07e9531341ee04 | def MakeAlignment(SequenceAsStrings: str):
'\n\n This is used to find out which translation window sequence is most similar\n to a reference protein sequence.\n Bio.Align.PairwiseAligner somehow failed to achieve this.\n\n '
(MainSequence, QuerySequence) = sorted(SequenceAsStrings, key=(lambda s: (- len(s))))
FragmentSize = 3
FragmentCount = 50
MatchedFragments = 0
if (len(QuerySequence) < FragmentSize):
return 0
for w in range(FragmentCount):
PossibleIndexes = range(max((len(QuerySequence) - FragmentSize), 3))
F = random.choice(PossibleIndexes)
J = (F + FragmentSize)
MatchedFragments += (QuerySequence[F:J] in MainSequence)
return MatchedFragments | This is used to find out which translation window sequence is most similar
to a reference protein sequence.
Bio.Align.PairwiseAligner somehow failed to achieve this. | straintables/Executable/Protein.py | MakeAlignment | Gab0/linkageMapper | 0 | python | def MakeAlignment(SequenceAsStrings: str):
'\n\n This is used to find out which translation window sequence is most similar\n to a reference protein sequence.\n Bio.Align.PairwiseAligner somehow failed to achieve this.\n\n '
(MainSequence, QuerySequence) = sorted(SequenceAsStrings, key=(lambda s: (- len(s))))
FragmentSize = 3
FragmentCount = 50
MatchedFragments = 0
if (len(QuerySequence) < FragmentSize):
return 0
for w in range(FragmentCount):
PossibleIndexes = range(max((len(QuerySequence) - FragmentSize), 3))
F = random.choice(PossibleIndexes)
J = (F + FragmentSize)
MatchedFragments += (QuerySequence[F:J] in MainSequence)
return MatchedFragments | def MakeAlignment(SequenceAsStrings: str):
'\n\n This is used to find out which translation window sequence is most similar\n to a reference protein sequence.\n Bio.Align.PairwiseAligner somehow failed to achieve this.\n\n '
(MainSequence, QuerySequence) = sorted(SequenceAsStrings, key=(lambda s: (- len(s))))
FragmentSize = 3
FragmentCount = 50
MatchedFragments = 0
if (len(QuerySequence) < FragmentSize):
return 0
for w in range(FragmentCount):
PossibleIndexes = range(max((len(QuerySequence) - FragmentSize), 3))
F = random.choice(PossibleIndexes)
J = (F + FragmentSize)
MatchedFragments += (QuerySequence[F:J] in MainSequence)
return MatchedFragments<|docstring|>This is used to find out which translation window sequence is most similar
to a reference protein sequence.
Bio.Align.PairwiseAligner somehow failed to achieve this.<|endoftext|> |
c091130c8febefb1799df0a6e33cb12d7ba126b5a8a4b327f095a99077d9d657 | def establish_scp_conn(self):
'Establish the secure copy connection.'
self.scp_conn = paramiko.SSHClient()
self.scp_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.scp_conn.connect(hostname=self.ssh_ctl_chan.host, port=self.ssh_ctl_chan.port, username=self.ssh_ctl_chan.username, password=self.ssh_ctl_chan.password, key_filename=self.ssh_ctl_chan.key_file, look_for_keys=self.ssh_ctl_chan.use_keys, allow_agent=self.ssh_ctl_chan.allow_agent, timeout=self.ssh_ctl_chan.timeout)
self.scp_client = scp.SCPClient(self.scp_conn.get_transport()) | Establish the secure copy connection. | netmiko/scp_handler.py | establish_scp_conn | r2r-dev/netmiko | 1 | python | def establish_scp_conn(self):
self.scp_conn = paramiko.SSHClient()
self.scp_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.scp_conn.connect(hostname=self.ssh_ctl_chan.host, port=self.ssh_ctl_chan.port, username=self.ssh_ctl_chan.username, password=self.ssh_ctl_chan.password, key_filename=self.ssh_ctl_chan.key_file, look_for_keys=self.ssh_ctl_chan.use_keys, allow_agent=self.ssh_ctl_chan.allow_agent, timeout=self.ssh_ctl_chan.timeout)
self.scp_client = scp.SCPClient(self.scp_conn.get_transport()) | def establish_scp_conn(self):
self.scp_conn = paramiko.SSHClient()
self.scp_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.scp_conn.connect(hostname=self.ssh_ctl_chan.host, port=self.ssh_ctl_chan.port, username=self.ssh_ctl_chan.username, password=self.ssh_ctl_chan.password, key_filename=self.ssh_ctl_chan.key_file, look_for_keys=self.ssh_ctl_chan.use_keys, allow_agent=self.ssh_ctl_chan.allow_agent, timeout=self.ssh_ctl_chan.timeout)
self.scp_client = scp.SCPClient(self.scp_conn.get_transport())<|docstring|>Establish the secure copy connection.<|endoftext|> |
3e84399cb9dc5ffb96159aa1368ca0a480fb17a3bf09a6587a338ca66a0f24b9 | def scp_transfer_file(self, source_file, dest_file):
'Put file using SCP (for backwards compatibility).'
self.scp_client.put(source_file, dest_file) | Put file using SCP (for backwards compatibility). | netmiko/scp_handler.py | scp_transfer_file | r2r-dev/netmiko | 1 | python | def scp_transfer_file(self, source_file, dest_file):
self.scp_client.put(source_file, dest_file) | def scp_transfer_file(self, source_file, dest_file):
self.scp_client.put(source_file, dest_file)<|docstring|>Put file using SCP (for backwards compatibility).<|endoftext|> |
4f712963f8773efd4e1ed97977fc859528c07da52d928f186e6806ca28f21ec4 | def scp_get_file(self, source_file, dest_file):
'Get file using SCP.'
self.scp_client.get(source_file, dest_file) | Get file using SCP. | netmiko/scp_handler.py | scp_get_file | r2r-dev/netmiko | 1 | python | def scp_get_file(self, source_file, dest_file):
self.scp_client.get(source_file, dest_file) | def scp_get_file(self, source_file, dest_file):
self.scp_client.get(source_file, dest_file)<|docstring|>Get file using SCP.<|endoftext|> |
efecdb7cc4f7788405ffcbf1ecddcc69dcfa32bf0961d740b31b0df296cd9c88 | def scp_put_file(self, source_file, dest_file):
'Put file using SCP.'
self.scp_client.put(source_file, dest_file) | Put file using SCP. | netmiko/scp_handler.py | scp_put_file | r2r-dev/netmiko | 1 | python | def scp_put_file(self, source_file, dest_file):
self.scp_client.put(source_file, dest_file) | def scp_put_file(self, source_file, dest_file):
self.scp_client.put(source_file, dest_file)<|docstring|>Put file using SCP.<|endoftext|> |
4476a83513937c8bde9c52a8805646e95dfd7720701434dbe0690adfefad22e5 | def close(self):
'Close the SCP connection.'
self.scp_conn.close() | Close the SCP connection. | netmiko/scp_handler.py | close | r2r-dev/netmiko | 1 | python | def close(self):
self.scp_conn.close() | def close(self):
self.scp_conn.close()<|docstring|>Close the SCP connection.<|endoftext|> |
81e8f9804d4d076bbb2d2ab04952ed78fb69f9c9d9e2b2eb14599da2363e5f20 | def __enter__(self):
'Context manager setup'
self.establish_scp_conn()
return self | Context manager setup | netmiko/scp_handler.py | __enter__ | r2r-dev/netmiko | 1 | python | def __enter__(self):
self.establish_scp_conn()
return self | def __enter__(self):
self.establish_scp_conn()
return self<|docstring|>Context manager setup<|endoftext|> |
8f9a0909d4b57f3d9c465ba79f1a9d3bdca5287b2373298b35432d7440829345 | def __exit__(self, exc_type, exc_value, traceback):
'Context manager cleanup.'
self.close_scp_chan()
if (exc_type is not None):
raise exc_type(exc_value) | Context manager cleanup. | netmiko/scp_handler.py | __exit__ | r2r-dev/netmiko | 1 | python | def __exit__(self, exc_type, exc_value, traceback):
self.close_scp_chan()
if (exc_type is not None):
raise exc_type(exc_value) | def __exit__(self, exc_type, exc_value, traceback):
self.close_scp_chan()
if (exc_type is not None):
raise exc_type(exc_value)<|docstring|>Context manager cleanup.<|endoftext|> |
ab10f3ad6fb8fe94f5320b73a7c8109fc9791404638c462be3e12283a36b21f6 | def establish_scp_conn(self):
'Establish SCP connection.'
self.scp_conn = SCPConn(self.ssh_ctl_chan) | Establish SCP connection. | netmiko/scp_handler.py | establish_scp_conn | r2r-dev/netmiko | 1 | python | def establish_scp_conn(self):
self.scp_conn = SCPConn(self.ssh_ctl_chan) | def establish_scp_conn(self):
self.scp_conn = SCPConn(self.ssh_ctl_chan)<|docstring|>Establish SCP connection.<|endoftext|> |
7a5f732ff85492538b7a89927169bbff6f3ead96c71174b99795144ebdc6057a | def close_scp_chan(self):
'Close the SCP connection to the remote network device.'
self.scp_conn.close()
self.scp_conn = None | Close the SCP connection to the remote network device. | netmiko/scp_handler.py | close_scp_chan | r2r-dev/netmiko | 1 | python | def close_scp_chan(self):
self.scp_conn.close()
self.scp_conn = None | def close_scp_chan(self):
self.scp_conn.close()
self.scp_conn = None<|docstring|>Close the SCP connection to the remote network device.<|endoftext|> |
5bd18951f827cb53bbba69b56e374f2ab03978dc3101c3f9e6460377d76cf10d | def remote_space_available(self, search_pattern='bytes total \\((.*) bytes free\\)'):
'Return space available on remote device.'
remote_cmd = 'dir {0}'.format(self.file_system)
remote_output = self.ssh_ctl_chan.send_command_expect(remote_cmd)
match = re.search(search_pattern, remote_output)
return int(match.group(1)) | Return space available on remote device. | netmiko/scp_handler.py | remote_space_available | r2r-dev/netmiko | 1 | python | def remote_space_available(self, search_pattern='bytes total \\((.*) bytes free\\)'):
remote_cmd = 'dir {0}'.format(self.file_system)
remote_output = self.ssh_ctl_chan.send_command_expect(remote_cmd)
match = re.search(search_pattern, remote_output)
return int(match.group(1)) | def remote_space_available(self, search_pattern='bytes total \\((.*) bytes free\\)'):
remote_cmd = 'dir {0}'.format(self.file_system)
remote_output = self.ssh_ctl_chan.send_command_expect(remote_cmd)
match = re.search(search_pattern, remote_output)
return int(match.group(1))<|docstring|>Return space available on remote device.<|endoftext|> |
60808f83bfa025a61b6890a6b80db2138b0de2d587aac90c3f4b3f9fdb87c5bb | def local_space_available(self):
'Return space available on local filesystem.'
destination_stats = os.statvfs('.')
return (destination_stats.f_bsize * destination_stats.f_bavail) | Return space available on local filesystem. | netmiko/scp_handler.py | local_space_available | r2r-dev/netmiko | 1 | python | def local_space_available(self):
destination_stats = os.statvfs('.')
return (destination_stats.f_bsize * destination_stats.f_bavail) | def local_space_available(self):
destination_stats = os.statvfs('.')
return (destination_stats.f_bsize * destination_stats.f_bavail)<|docstring|>Return space available on local filesystem.<|endoftext|> |
618ee1cf8c06800de07789601d927594a172ad84c39a2bc51572e4ba2b3b97f3 | def verify_space_available(self, search_pattern='bytes total \\((.*) bytes free\\)'):
'Verify sufficient space is available on destination file system (return boolean).'
if (self.direction == 'put'):
space_avail = self.remote_space_available(search_pattern=search_pattern)
elif (self.direction == 'get'):
space_avail = self.local_space_available()
if (space_avail > self.file_size):
return True
return False | Verify sufficient space is available on destination file system (return boolean). | netmiko/scp_handler.py | verify_space_available | r2r-dev/netmiko | 1 | python | def verify_space_available(self, search_pattern='bytes total \\((.*) bytes free\\)'):
if (self.direction == 'put'):
space_avail = self.remote_space_available(search_pattern=search_pattern)
elif (self.direction == 'get'):
space_avail = self.local_space_available()
if (space_avail > self.file_size):
return True
return False | def verify_space_available(self, search_pattern='bytes total \\((.*) bytes free\\)'):
if (self.direction == 'put'):
space_avail = self.remote_space_available(search_pattern=search_pattern)
elif (self.direction == 'get'):
space_avail = self.local_space_available()
if (space_avail > self.file_size):
return True
return False<|docstring|>Verify sufficient space is available on destination file system (return boolean).<|endoftext|> |
68449950ac807a5fce7bd07dcae6c16c7c5abafa5ac4234476073abc7f593f28 | def check_file_exists(self, remote_cmd=''):
'Check if the dest_file already exists on the file system (return boolean).'
if (self.direction == 'put'):
if (not remote_cmd):
remote_cmd = 'dir {0}/{1}'.format(self.file_system, self.dest_file)
remote_out = self.ssh_ctl_chan.send_command_expect(remote_cmd)
search_string = 'Directory of .*{0}'.format(self.dest_file)
if ('Error opening' in remote_out):
return False
elif re.search(search_string, remote_out):
return True
else:
raise ValueError('Unexpected output from check_file_exists')
elif (self.direction == 'get'):
return os.path.exists(self.dest_file) | Check if the dest_file already exists on the file system (return boolean). | netmiko/scp_handler.py | check_file_exists | r2r-dev/netmiko | 1 | python | def check_file_exists(self, remote_cmd=):
if (self.direction == 'put'):
if (not remote_cmd):
remote_cmd = 'dir {0}/{1}'.format(self.file_system, self.dest_file)
remote_out = self.ssh_ctl_chan.send_command_expect(remote_cmd)
search_string = 'Directory of .*{0}'.format(self.dest_file)
if ('Error opening' in remote_out):
return False
elif re.search(search_string, remote_out):
return True
else:
raise ValueError('Unexpected output from check_file_exists')
elif (self.direction == 'get'):
return os.path.exists(self.dest_file) | def check_file_exists(self, remote_cmd=):
if (self.direction == 'put'):
if (not remote_cmd):
remote_cmd = 'dir {0}/{1}'.format(self.file_system, self.dest_file)
remote_out = self.ssh_ctl_chan.send_command_expect(remote_cmd)
search_string = 'Directory of .*{0}'.format(self.dest_file)
if ('Error opening' in remote_out):
return False
elif re.search(search_string, remote_out):
return True
else:
raise ValueError('Unexpected output from check_file_exists')
elif (self.direction == 'get'):
return os.path.exists(self.dest_file)<|docstring|>Check if the dest_file already exists on the file system (return boolean).<|endoftext|> |
c0d0a2d5ad284dbea61149933640691e7c4c1ad1fb344dece51aadb33e062b59 | def remote_file_size(self, remote_cmd='', remote_file=None):
'Get the file size of the remote file.'
if (remote_file is None):
remote_file = self.dest_file
if (not remote_cmd):
remote_cmd = 'dir {0}/{1}'.format(self.file_system, remote_file)
remote_out = self.ssh_ctl_chan.send_command_expect(remote_cmd)
remote_out = re.split('Directory of .*', remote_out)
remote_out = ''.join(remote_out)
escape_file_name = re.escape(remote_file)
pattern = '.*({0}).*'.format(escape_file_name)
match = re.search(pattern, remote_out)
if match:
line = match.group(0)
file_size = line.split()[2]
if ('Error opening' in remote_out):
raise IOError('Unable to find file on remote system')
else:
return int(file_size) | Get the file size of the remote file. | netmiko/scp_handler.py | remote_file_size | r2r-dev/netmiko | 1 | python | def remote_file_size(self, remote_cmd=, remote_file=None):
if (remote_file is None):
remote_file = self.dest_file
if (not remote_cmd):
remote_cmd = 'dir {0}/{1}'.format(self.file_system, remote_file)
remote_out = self.ssh_ctl_chan.send_command_expect(remote_cmd)
remote_out = re.split('Directory of .*', remote_out)
remote_out = .join(remote_out)
escape_file_name = re.escape(remote_file)
pattern = '.*({0}).*'.format(escape_file_name)
match = re.search(pattern, remote_out)
if match:
line = match.group(0)
file_size = line.split()[2]
if ('Error opening' in remote_out):
raise IOError('Unable to find file on remote system')
else:
return int(file_size) | def remote_file_size(self, remote_cmd=, remote_file=None):
if (remote_file is None):
remote_file = self.dest_file
if (not remote_cmd):
remote_cmd = 'dir {0}/{1}'.format(self.file_system, remote_file)
remote_out = self.ssh_ctl_chan.send_command_expect(remote_cmd)
remote_out = re.split('Directory of .*', remote_out)
remote_out = .join(remote_out)
escape_file_name = re.escape(remote_file)
pattern = '.*({0}).*'.format(escape_file_name)
match = re.search(pattern, remote_out)
if match:
line = match.group(0)
file_size = line.split()[2]
if ('Error opening' in remote_out):
raise IOError('Unable to find file on remote system')
else:
return int(file_size)<|docstring|>Get the file size of the remote file.<|endoftext|> |
695deeebeff9150dd575590b0cfa79f5cc80a94d65d51dcc9f39dd6f4317f5f7 | def file_md5(self, file_name):
'Compute MD5 hash of file.'
with open(file_name, 'rb') as f:
file_contents = f.read()
file_hash = hashlib.md5(file_contents).hexdigest()
return file_hash | Compute MD5 hash of file. | netmiko/scp_handler.py | file_md5 | r2r-dev/netmiko | 1 | python | def file_md5(self, file_name):
with open(file_name, 'rb') as f:
file_contents = f.read()
file_hash = hashlib.md5(file_contents).hexdigest()
return file_hash | def file_md5(self, file_name):
with open(file_name, 'rb') as f:
file_contents = f.read()
file_hash = hashlib.md5(file_contents).hexdigest()
return file_hash<|docstring|>Compute MD5 hash of file.<|endoftext|> |
0b0f61711124ab7d9914b53b1360e2aab4d968fc517f9100841d73ae9c4911fd | @staticmethod
def process_md5(md5_output, pattern='= (.*)'):
'\n Process the string to retrieve the MD5 hash\n\n Output from Cisco IOS (ASA is similar)\n .MD5 of flash:file_name Done!\n verify /md5 (flash:file_name) = 410db2a7015eaa42b1fe71f1bf3d59a2\n '
match = re.search(pattern, md5_output)
if match:
return match.group(1)
else:
raise ValueError('Invalid output from MD5 command: {0}'.format(md5_output)) | Process the string to retrieve the MD5 hash
Output from Cisco IOS (ASA is similar)
.MD5 of flash:file_name Done!
verify /md5 (flash:file_name) = 410db2a7015eaa42b1fe71f1bf3d59a2 | netmiko/scp_handler.py | process_md5 | r2r-dev/netmiko | 1 | python | @staticmethod
def process_md5(md5_output, pattern='= (.*)'):
'\n Process the string to retrieve the MD5 hash\n\n Output from Cisco IOS (ASA is similar)\n .MD5 of flash:file_name Done!\n verify /md5 (flash:file_name) = 410db2a7015eaa42b1fe71f1bf3d59a2\n '
match = re.search(pattern, md5_output)
if match:
return match.group(1)
else:
raise ValueError('Invalid output from MD5 command: {0}'.format(md5_output)) | @staticmethod
def process_md5(md5_output, pattern='= (.*)'):
'\n Process the string to retrieve the MD5 hash\n\n Output from Cisco IOS (ASA is similar)\n .MD5 of flash:file_name Done!\n verify /md5 (flash:file_name) = 410db2a7015eaa42b1fe71f1bf3d59a2\n '
match = re.search(pattern, md5_output)
if match:
return match.group(1)
else:
raise ValueError('Invalid output from MD5 command: {0}'.format(md5_output))<|docstring|>Process the string to retrieve the MD5 hash
Output from Cisco IOS (ASA is similar)
.MD5 of flash:file_name Done!
verify /md5 (flash:file_name) = 410db2a7015eaa42b1fe71f1bf3d59a2<|endoftext|> |
10a04cba62a654df2c1f4223b87a40889113bae5d3205c4dd42487c9e4f9afda | def compare_md5(self, base_cmd='verify /md5'):
'Compare md5 of file on network device to md5 of local file'
if (self.direction == 'put'):
remote_md5 = self.remote_md5(base_cmd=base_cmd)
print(remote_md5)
print(self.source_md5)
return (self.source_md5 == remote_md5)
elif (self.direction == 'get'):
local_md5 = self.file_md5(self.dest_file)
return (self.source_md5 == local_md5) | Compare md5 of file on network device to md5 of local file | netmiko/scp_handler.py | compare_md5 | r2r-dev/netmiko | 1 | python | def compare_md5(self, base_cmd='verify /md5'):
if (self.direction == 'put'):
remote_md5 = self.remote_md5(base_cmd=base_cmd)
print(remote_md5)
print(self.source_md5)
return (self.source_md5 == remote_md5)
elif (self.direction == 'get'):
local_md5 = self.file_md5(self.dest_file)
return (self.source_md5 == local_md5) | def compare_md5(self, base_cmd='verify /md5'):
if (self.direction == 'put'):
remote_md5 = self.remote_md5(base_cmd=base_cmd)
print(remote_md5)
print(self.source_md5)
return (self.source_md5 == remote_md5)
elif (self.direction == 'get'):
local_md5 = self.file_md5(self.dest_file)
return (self.source_md5 == local_md5)<|docstring|>Compare md5 of file on network device to md5 of local file<|endoftext|> |
2fa935813428b2a6bd780b860087bc8c96dffcb12ecfe37b7068cec03c8de13a | def remote_md5(self, base_cmd='verify /md5', remote_file=None):
'\n Calculate remote MD5 and return the checksum.\n\n This command can be CPU intensive on the remote device.\n '
if (remote_file is None):
remote_file = self.dest_file
remote_md5_cmd = '{0} {1}{2}'.format(base_cmd, self.file_system, remote_file)
dest_md5 = self.ssh_ctl_chan.send_command_expect(remote_md5_cmd, delay_factor=3.0)
print(dest_md5)
dest_md5 = self.process_md5(dest_md5)
return dest_md5 | Calculate remote MD5 and return the checksum.
This command can be CPU intensive on the remote device. | netmiko/scp_handler.py | remote_md5 | r2r-dev/netmiko | 1 | python | def remote_md5(self, base_cmd='verify /md5', remote_file=None):
'\n Calculate remote MD5 and return the checksum.\n\n This command can be CPU intensive on the remote device.\n '
if (remote_file is None):
remote_file = self.dest_file
remote_md5_cmd = '{0} {1}{2}'.format(base_cmd, self.file_system, remote_file)
dest_md5 = self.ssh_ctl_chan.send_command_expect(remote_md5_cmd, delay_factor=3.0)
print(dest_md5)
dest_md5 = self.process_md5(dest_md5)
return dest_md5 | def remote_md5(self, base_cmd='verify /md5', remote_file=None):
'\n Calculate remote MD5 and return the checksum.\n\n This command can be CPU intensive on the remote device.\n '
if (remote_file is None):
remote_file = self.dest_file
remote_md5_cmd = '{0} {1}{2}'.format(base_cmd, self.file_system, remote_file)
dest_md5 = self.ssh_ctl_chan.send_command_expect(remote_md5_cmd, delay_factor=3.0)
print(dest_md5)
dest_md5 = self.process_md5(dest_md5)
return dest_md5<|docstring|>Calculate remote MD5 and return the checksum.
This command can be CPU intensive on the remote device.<|endoftext|> |
ba59b72c1b5e780f7df0a234743446298dba4723dd2f604ecf92c24a3e1a3f83 | def transfer_file(self):
'SCP transfer file.'
if (self.direction == 'put'):
self.put_file()
elif (self.direction == 'get'):
self.get_file() | SCP transfer file. | netmiko/scp_handler.py | transfer_file | r2r-dev/netmiko | 1 | python | def transfer_file(self):
if (self.direction == 'put'):
self.put_file()
elif (self.direction == 'get'):
self.get_file() | def transfer_file(self):
if (self.direction == 'put'):
self.put_file()
elif (self.direction == 'get'):
self.get_file()<|docstring|>SCP transfer file.<|endoftext|> |
3738853de3c310e260c2322a0acb56df4192096caf5d7ce7614f973207664e0e | def get_file(self):
'SCP copy the file from the remote device to local system.'
self.scp_conn.scp_get_file(self.source_file, self.dest_file)
self.scp_conn.close() | SCP copy the file from the remote device to local system. | netmiko/scp_handler.py | get_file | r2r-dev/netmiko | 1 | python | def get_file(self):
self.scp_conn.scp_get_file(self.source_file, self.dest_file)
self.scp_conn.close() | def get_file(self):
self.scp_conn.scp_get_file(self.source_file, self.dest_file)
self.scp_conn.close()<|docstring|>SCP copy the file from the remote device to local system.<|endoftext|> |
4cfcf3d2b0c870b0b215bee2263bf048a885342234e47c2ebb291fd5ff7e588e | def put_file(self):
'SCP copy the file from the local system to the remote device.'
destination = '{0}{1}'.format(self.file_system, self.dest_file)
if (':' not in destination):
raise ValueError('Invalid destination file system specified')
self.scp_conn.scp_transfer_file(self.source_file, destination)
self.scp_conn.close() | SCP copy the file from the local system to the remote device. | netmiko/scp_handler.py | put_file | r2r-dev/netmiko | 1 | python | def put_file(self):
destination = '{0}{1}'.format(self.file_system, self.dest_file)
if (':' not in destination):
raise ValueError('Invalid destination file system specified')
self.scp_conn.scp_transfer_file(self.source_file, destination)
self.scp_conn.close() | def put_file(self):
destination = '{0}{1}'.format(self.file_system, self.dest_file)
if (':' not in destination):
raise ValueError('Invalid destination file system specified')
self.scp_conn.scp_transfer_file(self.source_file, destination)
self.scp_conn.close()<|docstring|>SCP copy the file from the local system to the remote device.<|endoftext|> |
9fbd8ef9d6c194fec503715cb85f220f094c39ff6f2851b05e6641fa4175a932 | def verify_file(self):
'Verify the file has been transferred correctly.'
return self.compare_md5() | Verify the file has been transferred correctly. | netmiko/scp_handler.py | verify_file | r2r-dev/netmiko | 1 | python | def verify_file(self):
return self.compare_md5() | def verify_file(self):
return self.compare_md5()<|docstring|>Verify the file has been transferred correctly.<|endoftext|> |
93940ca9417f272ee50e309eaec8f58dc76ae9d0c77a5178eb1b533118e33a7d | def enable_scp(self, cmd=None):
'\n Enable SCP on remote device.\n\n Defaults to Cisco IOS command\n '
if (cmd is None):
cmd = ['ip scp server enable']
elif (not hasattr(cmd, '__iter__')):
cmd = [cmd]
self.ssh_ctl_chan.send_config_set(cmd) | Enable SCP on remote device.
Defaults to Cisco IOS command | netmiko/scp_handler.py | enable_scp | r2r-dev/netmiko | 1 | python | def enable_scp(self, cmd=None):
'\n Enable SCP on remote device.\n\n Defaults to Cisco IOS command\n '
if (cmd is None):
cmd = ['ip scp server enable']
elif (not hasattr(cmd, '__iter__')):
cmd = [cmd]
self.ssh_ctl_chan.send_config_set(cmd) | def enable_scp(self, cmd=None):
'\n Enable SCP on remote device.\n\n Defaults to Cisco IOS command\n '
if (cmd is None):
cmd = ['ip scp server enable']
elif (not hasattr(cmd, '__iter__')):
cmd = [cmd]
self.ssh_ctl_chan.send_config_set(cmd)<|docstring|>Enable SCP on remote device.
Defaults to Cisco IOS command<|endoftext|> |
49eb585aa9a0d44a3d15fab1a587b74d5f03a2ae006195d50e9fa613d99dd0cc | def disable_scp(self, cmd=None):
'\n Disable SCP on remote device.\n\n Defaults to Cisco IOS command\n '
if (cmd is None):
cmd = ['no ip scp server enable']
elif (not hasattr(cmd, '__iter__')):
cmd = [cmd]
self.ssh_ctl_chan.send_config_set(cmd) | Disable SCP on remote device.
Defaults to Cisco IOS command | netmiko/scp_handler.py | disable_scp | r2r-dev/netmiko | 1 | python | def disable_scp(self, cmd=None):
'\n Disable SCP on remote device.\n\n Defaults to Cisco IOS command\n '
if (cmd is None):
cmd = ['no ip scp server enable']
elif (not hasattr(cmd, '__iter__')):
cmd = [cmd]
self.ssh_ctl_chan.send_config_set(cmd) | def disable_scp(self, cmd=None):
'\n Disable SCP on remote device.\n\n Defaults to Cisco IOS command\n '
if (cmd is None):
cmd = ['no ip scp server enable']
elif (not hasattr(cmd, '__iter__')):
cmd = [cmd]
self.ssh_ctl_chan.send_config_set(cmd)<|docstring|>Disable SCP on remote device.
Defaults to Cisco IOS command<|endoftext|> |
b969b2724300e8370bf2cb84b6157ea652ac0542a2ed4b7a62e31db647ee9d86 | @staticmethod
def _tcl_newline_rationalize(tcl_string):
'\n When using put inside a TCL {} section the newline is considered a new TCL\n statement and causes a missing curly-brace message. Convert "\n" to "\r". TCL\n will convert the "\r" to a "\n" i.e. you will see a "\n" inside the file on the\n Cisco IOS device.\n '
NEWLINE = '\\n'
CARRIAGE_RETURN = '\\r'
tmp_string = re.sub(NEWLINE, CARRIAGE_RETURN, tcl_string)
if re.search('[{}]', tmp_string):
msg = 'Curly brace detected in string; TCL requires this be escaped.'
raise ValueError(msg)
return tmp_string | When using put inside a TCL {} section the newline is considered a new TCL
statement and causes a missing curly-brace message. Convert "
" to "
". TCL
will convert the "
" to a "
" i.e. you will see a "
" inside the file on the
Cisco IOS device. | netmiko/scp_handler.py | _tcl_newline_rationalize | r2r-dev/netmiko | 1 | python | @staticmethod
def _tcl_newline_rationalize(tcl_string):
'\n When using put inside a TCL {} section the newline is considered a new TCL\n statement and causes a missing curly-brace message. Convert "\n" to "\r". TCL\n will convert the "\r" to a "\n" i.e. you will see a "\n" inside the file on the\n Cisco IOS device.\n '
NEWLINE = '\\n'
CARRIAGE_RETURN = '\\r'
tmp_string = re.sub(NEWLINE, CARRIAGE_RETURN, tcl_string)
if re.search('[{}]', tmp_string):
msg = 'Curly brace detected in string; TCL requires this be escaped.'
raise ValueError(msg)
return tmp_string | @staticmethod
def _tcl_newline_rationalize(tcl_string):
'\n When using put inside a TCL {} section the newline is considered a new TCL\n statement and causes a missing curly-brace message. Convert "\n" to "\r". TCL\n will convert the "\r" to a "\n" i.e. you will see a "\n" inside the file on the\n Cisco IOS device.\n '
NEWLINE = '\\n'
CARRIAGE_RETURN = '\\r'
tmp_string = re.sub(NEWLINE, CARRIAGE_RETURN, tcl_string)
if re.search('[{}]', tmp_string):
msg = 'Curly brace detected in string; TCL requires this be escaped.'
raise ValueError(msg)
return tmp_string<|docstring|>When using put inside a TCL {} section the newline is considered a new TCL
statement and causes a missing curly-brace message. Convert "
" to "
". TCL
will convert the "
" to a "
" i.e. you will see a "
" inside the file on the
Cisco IOS device.<|endoftext|> |
5458e595bc893a874975c78d94b3e6113f990c7293237a42401e1ac2287848b8 | def file_md5(self, file_name):
'Compute MD5 hash of file.'
file_contents = self._read_file(file_name)
file_contents = (file_contents + '\n')
file_contents = file_contents.encode('UTF-8')
return hashlib.md5(file_contents).hexdigest() | Compute MD5 hash of file. | netmiko/scp_handler.py | file_md5 | r2r-dev/netmiko | 1 | python | def file_md5(self, file_name):
file_contents = self._read_file(file_name)
file_contents = (file_contents + '\n')
file_contents = file_contents.encode('UTF-8')
return hashlib.md5(file_contents).hexdigest() | def file_md5(self, file_name):
file_contents = self._read_file(file_name)
file_contents = (file_contents + '\n')
file_contents = file_contents.encode('UTF-8')
return hashlib.md5(file_contents).hexdigest()<|docstring|>Compute MD5 hash of file.<|endoftext|> |
f0ed6674461f2c075faa351b0012f21ef3d08a653f85c313d74ebdedcfe8ce41 | @staticmethod
def empty_table():
'\n Delete all content from this table. Use carefully !\n '
CoexpressionClusterSimilarity.query.delete() | Delete all content from this table. Use carefully ! | conekt/models/relationships/cluster_similarity.py | empty_table | legumeinfo/CoNekT | 14 | python | @staticmethod
def empty_table():
'\n \n '
CoexpressionClusterSimilarity.query.delete() | @staticmethod
def empty_table():
'\n \n '
CoexpressionClusterSimilarity.query.delete()<|docstring|>Delete all content from this table. Use carefully !<|endoftext|> |
ebab4e26424d221bfba3c0e888566c9b1004c2bfc6e1e2bcdfc807375ccc853a | def get_time_factors(td):
'\n Get different time factor such as days, hours, minutes and seconds\n :param td: timedelta\n :return: tuple(days, hours, minutes, seconds)\n '
if (not td):
return (0, 0, 0, 0)
return (td.days, (td.seconds // 3600), ((td.seconds // 60) % 60), (td.seconds % 60)) | Get different time factor such as days, hours, minutes and seconds
:param td: timedelta
:return: tuple(days, hours, minutes, seconds) | bluebottle/utils/widgets.py | get_time_factors | terrameijar/bluebottle | 10 | python | def get_time_factors(td):
'\n Get different time factor such as days, hours, minutes and seconds\n :param td: timedelta\n :return: tuple(days, hours, minutes, seconds)\n '
if (not td):
return (0, 0, 0, 0)
return (td.days, (td.seconds // 3600), ((td.seconds // 60) % 60), (td.seconds % 60)) | def get_time_factors(td):
'\n Get different time factor such as days, hours, minutes and seconds\n :param td: timedelta\n :return: tuple(days, hours, minutes, seconds)\n '
if (not td):
return (0, 0, 0, 0)
return (td.days, (td.seconds // 3600), ((td.seconds // 60) % 60), (td.seconds % 60))<|docstring|>Get different time factor such as days, hours, minutes and seconds
:param td: timedelta
:return: tuple(days, hours, minutes, seconds)<|endoftext|> |
ddf9fc66d2ac1355bc9ed47af40f08c39fe5609c1f4875969dd1eaa573935a96 | def bounded_ed(a, b, currentDistance, lowerBound, insert_table, delete_table, replace_table, table, i):
'\n Edit distance - but bounded.\n '
global j
j += 1
n = len(a)
m = len(b)
if (a == b):
return currentDistance
if (currentDistance >= lowerBound):
return lowerBound
if (n == 0):
return (m + currentDistance)
if (m == 0):
return (n + currentDistance)
if (a[(- 1)] == b[(- 1)]):
return bounded_ed(a[:(- 1)], b[:(- 1)], currentDistance, lowerBound, insert_table, delete_table, replace_table, table, i)
else:
insertionBranch = bounded_ed(a[:(n - 1)], b, (currentDistance + 1), lowerBound, insert_table, delete_table, replace_table, table, i)
deletionBranch = bounded_ed(a, b[:(m - 1)], (currentDistance + 1), min(insertionBranch, lowerBound), insert_table, delete_table, replace_table, table, i)
replaceBranch = bounded_ed(a[:(n - 1)], b[:(m - 1)], (currentDistance + 1), min(insertionBranch, deletionBranch, lowerBound), insert_table, delete_table, replace_table, table, i)
"\n as we need to find what branch will minimize our cost,\n we need to store that operation before calling next recursion\n that's what is done here\n "
if (min(insertionBranch, deletionBranch, replaceBranch) == insertionBranch):
table[currentDistance] = 1
insert_table[currentDistance] = 1
elif (min(insertionBranch, deletionBranch, replaceBranch) == deletionBranch):
delete_table[currentDistance] = 1
table[currentDistance] = 2
elif (min(insertionBranch, deletionBranch, replaceBranch) == replaceBranch):
replace_table[currentDistance] = 1
table[currentDistance] = 3
return min(insertionBranch, deletionBranch, replaceBranch) | Edit distance - but bounded. | branch_and_bound.py | bounded_ed | alaabenfatma/Edit_Distance | 0 | python | def bounded_ed(a, b, currentDistance, lowerBound, insert_table, delete_table, replace_table, table, i):
'\n \n '
global j
j += 1
n = len(a)
m = len(b)
if (a == b):
return currentDistance
if (currentDistance >= lowerBound):
return lowerBound
if (n == 0):
return (m + currentDistance)
if (m == 0):
return (n + currentDistance)
if (a[(- 1)] == b[(- 1)]):
return bounded_ed(a[:(- 1)], b[:(- 1)], currentDistance, lowerBound, insert_table, delete_table, replace_table, table, i)
else:
insertionBranch = bounded_ed(a[:(n - 1)], b, (currentDistance + 1), lowerBound, insert_table, delete_table, replace_table, table, i)
deletionBranch = bounded_ed(a, b[:(m - 1)], (currentDistance + 1), min(insertionBranch, lowerBound), insert_table, delete_table, replace_table, table, i)
replaceBranch = bounded_ed(a[:(n - 1)], b[:(m - 1)], (currentDistance + 1), min(insertionBranch, deletionBranch, lowerBound), insert_table, delete_table, replace_table, table, i)
"\n as we need to find what branch will minimize our cost,\n we need to store that operation before calling next recursion\n that's what is done here\n "
if (min(insertionBranch, deletionBranch, replaceBranch) == insertionBranch):
table[currentDistance] = 1
insert_table[currentDistance] = 1
elif (min(insertionBranch, deletionBranch, replaceBranch) == deletionBranch):
delete_table[currentDistance] = 1
table[currentDistance] = 2
elif (min(insertionBranch, deletionBranch, replaceBranch) == replaceBranch):
replace_table[currentDistance] = 1
table[currentDistance] = 3
return min(insertionBranch, deletionBranch, replaceBranch) | def bounded_ed(a, b, currentDistance, lowerBound, insert_table, delete_table, replace_table, table, i):
'\n \n '
global j
j += 1
n = len(a)
m = len(b)
if (a == b):
return currentDistance
if (currentDistance >= lowerBound):
return lowerBound
if (n == 0):
return (m + currentDistance)
if (m == 0):
return (n + currentDistance)
if (a[(- 1)] == b[(- 1)]):
return bounded_ed(a[:(- 1)], b[:(- 1)], currentDistance, lowerBound, insert_table, delete_table, replace_table, table, i)
else:
insertionBranch = bounded_ed(a[:(n - 1)], b, (currentDistance + 1), lowerBound, insert_table, delete_table, replace_table, table, i)
deletionBranch = bounded_ed(a, b[:(m - 1)], (currentDistance + 1), min(insertionBranch, lowerBound), insert_table, delete_table, replace_table, table, i)
replaceBranch = bounded_ed(a[:(n - 1)], b[:(m - 1)], (currentDistance + 1), min(insertionBranch, deletionBranch, lowerBound), insert_table, delete_table, replace_table, table, i)
"\n as we need to find what branch will minimize our cost,\n we need to store that operation before calling next recursion\n that's what is done here\n "
if (min(insertionBranch, deletionBranch, replaceBranch) == insertionBranch):
table[currentDistance] = 1
insert_table[currentDistance] = 1
elif (min(insertionBranch, deletionBranch, replaceBranch) == deletionBranch):
delete_table[currentDistance] = 1
table[currentDistance] = 2
elif (min(insertionBranch, deletionBranch, replaceBranch) == replaceBranch):
replace_table[currentDistance] = 1
table[currentDistance] = 3
return min(insertionBranch, deletionBranch, replaceBranch)<|docstring|>Edit distance - but bounded.<|endoftext|> |
f40b8690c5f25edfe80958f96b01bc67b5d1caf56c483602ad0fb51f35d35d20 | def get_displays():
' Get display information and return width and height of desktop.\n\n :return: (tuple) width, height\n '
all_displays = EnumDisplayMonitors(None)
(w, h) = (0, 0)
for display in all_displays:
r = display[2]
w = (r[2] if (r[2] > w) else w)
h = (r[3] if (r[3] > h) else h)
return (w, h) | Get display information and return width and height of desktop.
:return: (tuple) width, height | pyWindowPositionSaver.py | get_displays | syncon303/pyWindowPositionSaver | 0 | python | def get_displays():
' Get display information and return width and height of desktop.\n\n :return: (tuple) width, height\n '
all_displays = EnumDisplayMonitors(None)
(w, h) = (0, 0)
for display in all_displays:
r = display[2]
w = (r[2] if (r[2] > w) else w)
h = (r[3] if (r[3] > h) else h)
return (w, h) | def get_displays():
' Get display information and return width and height of desktop.\n\n :return: (tuple) width, height\n '
all_displays = EnumDisplayMonitors(None)
(w, h) = (0, 0)
for display in all_displays:
r = display[2]
w = (r[2] if (r[2] > w) else w)
h = (r[3] if (r[3] > h) else h)
return (w, h)<|docstring|>Get display information and return width and height of desktop.
:return: (tuple) width, height<|endoftext|> |
409e6d92525baa61deb2a9d7909c1f04a76319565e1444a924e5745f1d4b5558 | def test_returns_200_on_existing_bundle_id(self):
'`ApplicationBundleDetailView` return `OK` for existing bundle\n\n create an `ApplicationBundle`,\n try to access `ApplicationBundleDetailView` using `id`\n assert that 200 OK is returned\n '
self.be_apubdef_user()
with self.assertLogs('project.services.logging_service', logging.INFO) as logs:
result = self.client.get(reverse('intake-app_bundle_detail', kwargs=dict(bundle_id=self.a_pubdef_bundle.id)))
self.assertEqual(result.status_code, 200)
assertInLogsCount(logs, {'app_bundle_opened': self.a_pubdef_bundle.submissions.count()}) | `ApplicationBundleDetailView` return `OK` for existing bundle
create an `ApplicationBundle`,
try to access `ApplicationBundleDetailView` using `id`
assert that 200 OK is returned | intake/tests/views/test_admin_views.py | test_returns_200_on_existing_bundle_id | dane-king/intake | 51 | python | def test_returns_200_on_existing_bundle_id(self):
'`ApplicationBundleDetailView` return `OK` for existing bundle\n\n create an `ApplicationBundle`,\n try to access `ApplicationBundleDetailView` using `id`\n assert that 200 OK is returned\n '
self.be_apubdef_user()
with self.assertLogs('project.services.logging_service', logging.INFO) as logs:
result = self.client.get(reverse('intake-app_bundle_detail', kwargs=dict(bundle_id=self.a_pubdef_bundle.id)))
self.assertEqual(result.status_code, 200)
assertInLogsCount(logs, {'app_bundle_opened': self.a_pubdef_bundle.submissions.count()}) | def test_returns_200_on_existing_bundle_id(self):
'`ApplicationBundleDetailView` return `OK` for existing bundle\n\n create an `ApplicationBundle`,\n try to access `ApplicationBundleDetailView` using `id`\n assert that 200 OK is returned\n '
self.be_apubdef_user()
with self.assertLogs('project.services.logging_service', logging.INFO) as logs:
result = self.client.get(reverse('intake-app_bundle_detail', kwargs=dict(bundle_id=self.a_pubdef_bundle.id)))
self.assertEqual(result.status_code, 200)
assertInLogsCount(logs, {'app_bundle_opened': self.a_pubdef_bundle.submissions.count()})<|docstring|>`ApplicationBundleDetailView` return `OK` for existing bundle
create an `ApplicationBundle`,
try to access `ApplicationBundleDetailView` using `id`
assert that 200 OK is returned<|endoftext|> |
8a175b193103ed2df263a0f0e8c3ea1efbe3355e5fa78b52909c8334f25eff24 | def test_returns_404_on_nonexisting_bundle_id(self):
'ApplicationBundleDetailView return 404 if not found\n\n with no existing `ApplicationBundle`\n try to access `ApplicationBundleDetailView` using a made up `id`\n assert that 404 is returned\n '
self.be_ccpubdef_user()
result = self.client.get(reverse('intake-app_bundle_detail', kwargs=dict(bundle_id=20909872435)))
self.assertEqual(result.status_code, 404) | ApplicationBundleDetailView return 404 if not found
with no existing `ApplicationBundle`
try to access `ApplicationBundleDetailView` using a made up `id`
assert that 404 is returned | intake/tests/views/test_admin_views.py | test_returns_404_on_nonexisting_bundle_id | dane-king/intake | 51 | python | def test_returns_404_on_nonexisting_bundle_id(self):
'ApplicationBundleDetailView return 404 if not found\n\n with no existing `ApplicationBundle`\n try to access `ApplicationBundleDetailView` using a made up `id`\n assert that 404 is returned\n '
self.be_ccpubdef_user()
result = self.client.get(reverse('intake-app_bundle_detail', kwargs=dict(bundle_id=20909872435)))
self.assertEqual(result.status_code, 404) | def test_returns_404_on_nonexisting_bundle_id(self):
'ApplicationBundleDetailView return 404 if not found\n\n with no existing `ApplicationBundle`\n try to access `ApplicationBundleDetailView` using a made up `id`\n assert that 404 is returned\n '
self.be_ccpubdef_user()
result = self.client.get(reverse('intake-app_bundle_detail', kwargs=dict(bundle_id=20909872435)))
self.assertEqual(result.status_code, 404)<|docstring|>ApplicationBundleDetailView return 404 if not found
with no existing `ApplicationBundle`
try to access `ApplicationBundleDetailView` using a made up `id`
assert that 404 is returned<|endoftext|> |
b8182b08df47a5fbf995422e414878e50f359b0f4bcc689c80b3588960ee2431 | def test_user_from_wrong_org_is_redirected_to_profile(self):
'ApplicationBundleDetailView redirects unpermitted users\n\n with existing `ApplicationBundle`\n try to access `ApplicationBundleDetailView` as a user from another org\n assert that redirects to `ApplicationIdex`\n '
self.be_sfpubdef_user()
result = self.client.get(reverse('intake-app_bundle_detail', kwargs=dict(bundle_id=self.a_pubdef_bundle.id)))
self.assertRedirects(result, reverse('user_accounts-profile'), fetch_redirect_response=False) | ApplicationBundleDetailView redirects unpermitted users
with existing `ApplicationBundle`
try to access `ApplicationBundleDetailView` as a user from another org
assert that redirects to `ApplicationIdex` | intake/tests/views/test_admin_views.py | test_user_from_wrong_org_is_redirected_to_profile | dane-king/intake | 51 | python | def test_user_from_wrong_org_is_redirected_to_profile(self):
'ApplicationBundleDetailView redirects unpermitted users\n\n with existing `ApplicationBundle`\n try to access `ApplicationBundleDetailView` as a user from another org\n assert that redirects to `ApplicationIdex`\n '
self.be_sfpubdef_user()
result = self.client.get(reverse('intake-app_bundle_detail', kwargs=dict(bundle_id=self.a_pubdef_bundle.id)))
self.assertRedirects(result, reverse('user_accounts-profile'), fetch_redirect_response=False) | def test_user_from_wrong_org_is_redirected_to_profile(self):
'ApplicationBundleDetailView redirects unpermitted users\n\n with existing `ApplicationBundle`\n try to access `ApplicationBundleDetailView` as a user from another org\n assert that redirects to `ApplicationIdex`\n '
self.be_sfpubdef_user()
result = self.client.get(reverse('intake-app_bundle_detail', kwargs=dict(bundle_id=self.a_pubdef_bundle.id)))
self.assertRedirects(result, reverse('user_accounts-profile'), fetch_redirect_response=False)<|docstring|>ApplicationBundleDetailView redirects unpermitted users
with existing `ApplicationBundle`
try to access `ApplicationBundleDetailView` as a user from another org
assert that redirects to `ApplicationIdex`<|endoftext|> |
8867ee5e5908184b51a0d242cdf3e5cda717f452ec9b3b9976500177c341264a | def test_has_pdf_bundle_url_if_needed(self):
'ApplicationBundleDetailView return pdf url if needed\n\n create an `ApplicationBundle` that needs a pdf\n try to access `ApplicationBundleDetailView` using `id`\n assert that the url for `FilledPDFBundle` is in the template.\n '
self.be_sfpubdef_user()
mock_pdf = SimpleUploadedFile('a.pdf', b'things', content_type='application/pdf')
bundle = BundlesService.create_bundle_from_submissions(organization=self.sf_pubdef, submissions=self.sf_pubdef_submissions, bundled_pdf=mock_pdf)
url = bundle.get_pdf_bundle_url()
result = self.client.get(reverse('intake-app_bundle_detail', kwargs=dict(bundle_id=bundle.id)))
self.assertContains(result, url) | ApplicationBundleDetailView return pdf url if needed
create an `ApplicationBundle` that needs a pdf
try to access `ApplicationBundleDetailView` using `id`
assert that the url for `FilledPDFBundle` is in the template. | intake/tests/views/test_admin_views.py | test_has_pdf_bundle_url_if_needed | dane-king/intake | 51 | python | def test_has_pdf_bundle_url_if_needed(self):
'ApplicationBundleDetailView return pdf url if needed\n\n create an `ApplicationBundle` that needs a pdf\n try to access `ApplicationBundleDetailView` using `id`\n assert that the url for `FilledPDFBundle` is in the template.\n '
self.be_sfpubdef_user()
mock_pdf = SimpleUploadedFile('a.pdf', b'things', content_type='application/pdf')
bundle = BundlesService.create_bundle_from_submissions(organization=self.sf_pubdef, submissions=self.sf_pubdef_submissions, bundled_pdf=mock_pdf)
url = bundle.get_pdf_bundle_url()
result = self.client.get(reverse('intake-app_bundle_detail', kwargs=dict(bundle_id=bundle.id)))
self.assertContains(result, url) | def test_has_pdf_bundle_url_if_needed(self):
'ApplicationBundleDetailView return pdf url if needed\n\n create an `ApplicationBundle` that needs a pdf\n try to access `ApplicationBundleDetailView` using `id`\n assert that the url for `FilledPDFBundle` is in the template.\n '
self.be_sfpubdef_user()
mock_pdf = SimpleUploadedFile('a.pdf', b'things', content_type='application/pdf')
bundle = BundlesService.create_bundle_from_submissions(organization=self.sf_pubdef, submissions=self.sf_pubdef_submissions, bundled_pdf=mock_pdf)
url = bundle.get_pdf_bundle_url()
result = self.client.get(reverse('intake-app_bundle_detail', kwargs=dict(bundle_id=bundle.id)))
self.assertContains(result, url)<|docstring|>ApplicationBundleDetailView return pdf url if needed
create an `ApplicationBundle` that needs a pdf
try to access `ApplicationBundleDetailView` using `id`
assert that the url for `FilledPDFBundle` is in the template.<|endoftext|> |
a91941e5120e1e56e0e624bbcf239c39ccdc4d70309bc44be8f151f734e01abe | def simplify_dataset_name(dataset_name):
'In a couple of cases (BraTS and MURA) the dataset name is not quite correct\n because of a mistake made earlier in the pipeline. \n This function transforms the dataset names into a more readable format.\n\n Args:\n dataset_name (string): name of dataset to simplify\n\n Returns:\n string: simplified dataset name\n '
if ('BraTS20' in dataset_name):
return 'BraTS20'
elif ('study' in dataset_name):
return 'MURA'
else:
return dataset_name | In a couple of cases (BraTS and MURA) the dataset name is not quite correct
because of a mistake made earlier in the pipeline.
This function transforms the dataset names into a more readable format.
Args:
dataset_name (string): name of dataset to simplify
Returns:
string: simplified dataset name | src/test/visualise_test_results.py | simplify_dataset_name | cdmacfadyen/classify-modality | 1 | python | def simplify_dataset_name(dataset_name):
'In a couple of cases (BraTS and MURA) the dataset name is not quite correct\n because of a mistake made earlier in the pipeline. \n This function transforms the dataset names into a more readable format.\n\n Args:\n dataset_name (string): name of dataset to simplify\n\n Returns:\n string: simplified dataset name\n '
if ('BraTS20' in dataset_name):
return 'BraTS20'
elif ('study' in dataset_name):
return 'MURA'
else:
return dataset_name | def simplify_dataset_name(dataset_name):
'In a couple of cases (BraTS and MURA) the dataset name is not quite correct\n because of a mistake made earlier in the pipeline. \n This function transforms the dataset names into a more readable format.\n\n Args:\n dataset_name (string): name of dataset to simplify\n\n Returns:\n string: simplified dataset name\n '
if ('BraTS20' in dataset_name):
return 'BraTS20'
elif ('study' in dataset_name):
return 'MURA'
else:
return dataset_name<|docstring|>In a couple of cases (BraTS and MURA) the dataset name is not quite correct
because of a mistake made earlier in the pipeline.
This function transforms the dataset names into a more readable format.
Args:
dataset_name (string): name of dataset to simplify
Returns:
string: simplified dataset name<|endoftext|> |
6a39a54ca50f35e4bc9f3250cc888656fcea6ef3dd37f98803d96de8a17637d1 | @block
def ram_instance(self, porta, portb, clock):
'\n porta: RamPort32 instance, port A\n portb: RamPort32 instance port B\n '
i_a = port_instance(self.ram, porta)
i_b = port_instance(self.ram, portb)
return instances() | porta: RamPort32 instance, port A
portb: RamPort32 instance port B | uncore/ram_dp.py | ram_instance | bonfireprocessor/bonfire-core | 0 | python | @block
def ram_instance(self, porta, portb, clock):
'\n porta: RamPort32 instance, port A\n portb: RamPort32 instance port B\n '
i_a = port_instance(self.ram, porta)
i_b = port_instance(self.ram, portb)
return instances() | @block
def ram_instance(self, porta, portb, clock):
'\n porta: RamPort32 instance, port A\n portb: RamPort32 instance port B\n '
i_a = port_instance(self.ram, porta)
i_b = port_instance(self.ram, portb)
return instances()<|docstring|>porta: RamPort32 instance, port A
portb: RamPort32 instance port B<|endoftext|> |
a865e41ea855d4b74e7b1acaa4d438302e913938247efb6dbca56b5b331de154 | @block
def ram_instance_dbus(self, db_a, db_b, clock):
'\n db_a: dbus instance, port A\n db_b: dbus instance port B\n \n '
porta = RamPort32(self.adrwidth, db_a.readOnly)
portb = RamPort32(self.adrwidth, db_b.readOnly)
p1 = dbusToRamPort(db_a, porta, clock, db_a.readOnly)
p2 = dbusToRamPort(db_b, portb, clock, db_b.readOnly)
ram = self.ram_instance(porta, portb, clock)
return instances() | db_a: dbus instance, port A
db_b: dbus instance port B | uncore/ram_dp.py | ram_instance_dbus | bonfireprocessor/bonfire-core | 0 | python | @block
def ram_instance_dbus(self, db_a, db_b, clock):
'\n db_a: dbus instance, port A\n db_b: dbus instance port B\n \n '
porta = RamPort32(self.adrwidth, db_a.readOnly)
portb = RamPort32(self.adrwidth, db_b.readOnly)
p1 = dbusToRamPort(db_a, porta, clock, db_a.readOnly)
p2 = dbusToRamPort(db_b, portb, clock, db_b.readOnly)
ram = self.ram_instance(porta, portb, clock)
return instances() | @block
def ram_instance_dbus(self, db_a, db_b, clock):
'\n db_a: dbus instance, port A\n db_b: dbus instance port B\n \n '
porta = RamPort32(self.adrwidth, db_a.readOnly)
portb = RamPort32(self.adrwidth, db_b.readOnly)
p1 = dbusToRamPort(db_a, porta, clock, db_a.readOnly)
p2 = dbusToRamPort(db_b, portb, clock, db_b.readOnly)
ram = self.ram_instance(porta, portb, clock)
return instances()<|docstring|>db_a: dbus instance, port A
db_b: dbus instance port B<|endoftext|> |
31b76f119b828510a39c96ab8e1d1aff00a35778dfd2ccd4a00579cc579c376d | def _get_coef_(self, pipeline: Pipeline=None) -> np.array:
'\n Interface function to get `coef\\_` from classifier used in the pipeline specified\n this might be useful if we switch the classifier, most of them already have a `coef\\_` attribute\n\n\n :param pipeline: pipeline from which the classifier should be used\n :return: `coef\\_` for feature weight report\n '
if (not pipeline):
pipeline = self.pipeline
clf = pipeline.named_steps['clf']
if hasattr(clf, 'coef_'):
return_weights = clf.coef_
else:
weights = np.array([c.base_estimator.coef_[0] for c in clf.calibrated_classifiers_])
return_weights = np.median(weights, axis=0)
return return_weights | Interface function to get `coef\_` from classifier used in the pipeline specified
this might be useful if we switch the classifier, most of them already have a `coef\_` attribute
:param pipeline: pipeline from which the classifier should be used
:return: `coef\_` for feature weight report | phenotrex/ml/clf/svm.py | _get_coef_ | univieCUBE/PICA2 | 2 | python | def _get_coef_(self, pipeline: Pipeline=None) -> np.array:
'\n Interface function to get `coef\\_` from classifier used in the pipeline specified\n this might be useful if we switch the classifier, most of them already have a `coef\\_` attribute\n\n\n :param pipeline: pipeline from which the classifier should be used\n :return: `coef\\_` for feature weight report\n '
if (not pipeline):
pipeline = self.pipeline
clf = pipeline.named_steps['clf']
if hasattr(clf, 'coef_'):
return_weights = clf.coef_
else:
weights = np.array([c.base_estimator.coef_[0] for c in clf.calibrated_classifiers_])
return_weights = np.median(weights, axis=0)
return return_weights | def _get_coef_(self, pipeline: Pipeline=None) -> np.array:
'\n Interface function to get `coef\\_` from classifier used in the pipeline specified\n this might be useful if we switch the classifier, most of them already have a `coef\\_` attribute\n\n\n :param pipeline: pipeline from which the classifier should be used\n :return: `coef\\_` for feature weight report\n '
if (not pipeline):
pipeline = self.pipeline
clf = pipeline.named_steps['clf']
if hasattr(clf, 'coef_'):
return_weights = clf.coef_
else:
weights = np.array([c.base_estimator.coef_[0] for c in clf.calibrated_classifiers_])
return_weights = np.median(weights, axis=0)
return return_weights<|docstring|>Interface function to get `coef\_` from classifier used in the pipeline specified
this might be useful if we switch the classifier, most of them already have a `coef\_` attribute
:param pipeline: pipeline from which the classifier should be used
:return: `coef\_` for feature weight report<|endoftext|> |
aafe26afd26937b7da0c6a93dfed0455deb3c276372d05233c17a0b1cf0a4287 | def get_feature_weights(self) -> Dict:
'\n Extract the weights for features from pipeline.\n\n :return: sorted Dict of feature name: weight\n '
if (self.trait_name is None):
self.logger.error('Pipeline is not fitted. Cannot retrieve weights.')
return {}
names = self.pipeline.named_steps['vec'].get_feature_names()
weights = self._get_coef_()
sorted_weights = {f: w for (f, w) in sorted(zip(names, weights), key=(lambda kv: abs(kv[1])), reverse=True)}
return sorted_weights | Extract the weights for features from pipeline.
:return: sorted Dict of feature name: weight | phenotrex/ml/clf/svm.py | get_feature_weights | univieCUBE/PICA2 | 2 | python | def get_feature_weights(self) -> Dict:
'\n Extract the weights for features from pipeline.\n\n :return: sorted Dict of feature name: weight\n '
if (self.trait_name is None):
self.logger.error('Pipeline is not fitted. Cannot retrieve weights.')
return {}
names = self.pipeline.named_steps['vec'].get_feature_names()
weights = self._get_coef_()
sorted_weights = {f: w for (f, w) in sorted(zip(names, weights), key=(lambda kv: abs(kv[1])), reverse=True)}
return sorted_weights | def get_feature_weights(self) -> Dict:
'\n Extract the weights for features from pipeline.\n\n :return: sorted Dict of feature name: weight\n '
if (self.trait_name is None):
self.logger.error('Pipeline is not fitted. Cannot retrieve weights.')
return {}
names = self.pipeline.named_steps['vec'].get_feature_names()
weights = self._get_coef_()
sorted_weights = {f: w for (f, w) in sorted(zip(names, weights), key=(lambda kv: abs(kv[1])), reverse=True)}
return sorted_weights<|docstring|>Extract the weights for features from pipeline.
:return: sorted Dict of feature name: weight<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.