body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
0ee5b6155418c826e06c8e6f57998720d81afb77e686d50abc57e9b7adac33e9
@requests_mock.mock() def test_dump_to_ckan_package_create_streaming_resource_fail(self, mock_request): 'Create package with streaming resource, which failed to create\n resource.' base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) resource_create_url = '{}resource_create'.format(base_url) mock_request.post(package_create_url, json={'success': True, 'result': {'id': 'ckan-package-id'}}) mock_request.post(resource_create_url, json={'success': False, 'error': {'__type': 'Validation Error', 'name': ['Some validation error.']}}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': [{'dpp:streamedFrom': 'https://example.com/file.csv', 'dpp:streaming': True, 'name': 'resource_streamed.csv', 'path': 'data/file.csv', 'schema': {'fields': [{'name': 'first', 'type': 'string'}, {'name': 'last', 'type': 'string'}]}}, {'dpp:streamedFrom': 'https://example.com/file_02.csv', 'name': 'resource_not_streamed.csv', 'path': '.'}]} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True, 'force-format': True} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') json_file = {'first': 'Fred', 'last': 'Smith'} json_file = json.dumps(json_file) (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, iter([ResourceIterator(io.StringIO(json_file), datapackage['resources'][0], {'schema': {'fields': []}})]))) spew_res_iter = spew_args[1] with self.assertRaises(Exception): for r in spew_res_iter: list(r)
Create package with streaming resource, which failed to create resource.
tests/test_dump_to_ckan.py
test_dump_to_ckan_package_create_streaming_resource_fail
OriHoch/datapackage-pipelines-ckan
0
python
@requests_mock.mock() def test_dump_to_ckan_package_create_streaming_resource_fail(self, mock_request): 'Create package with streaming resource, which failed to create\n resource.' base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) resource_create_url = '{}resource_create'.format(base_url) mock_request.post(package_create_url, json={'success': True, 'result': {'id': 'ckan-package-id'}}) mock_request.post(resource_create_url, json={'success': False, 'error': {'__type': 'Validation Error', 'name': ['Some validation error.']}}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': [{'dpp:streamedFrom': 'https://example.com/file.csv', 'dpp:streaming': True, 'name': 'resource_streamed.csv', 'path': 'data/file.csv', 'schema': {'fields': [{'name': 'first', 'type': 'string'}, {'name': 'last', 'type': 'string'}]}}, {'dpp:streamedFrom': 'https://example.com/file_02.csv', 'name': 'resource_not_streamed.csv', 'path': '.'}]} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True, 'force-format': True} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') json_file = {'first': 'Fred', 'last': 'Smith'} json_file = json.dumps(json_file) (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, iter([ResourceIterator(io.StringIO(json_file), datapackage['resources'][0], {'schema': {'fields': []}})]))) spew_res_iter = spew_args[1] with self.assertRaises(Exception): for r in spew_res_iter: list(r)
@requests_mock.mock() def test_dump_to_ckan_package_create_streaming_resource_fail(self, mock_request): 'Create package with streaming resource, which failed to create\n resource.' base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) resource_create_url = '{}resource_create'.format(base_url) mock_request.post(package_create_url, json={'success': True, 'result': {'id': 'ckan-package-id'}}) mock_request.post(resource_create_url, json={'success': False, 'error': {'__type': 'Validation Error', 'name': ['Some validation error.']}}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': [{'dpp:streamedFrom': 'https://example.com/file.csv', 'dpp:streaming': True, 'name': 'resource_streamed.csv', 'path': 'data/file.csv', 'schema': {'fields': [{'name': 'first', 'type': 'string'}, {'name': 'last', 'type': 'string'}]}}, {'dpp:streamedFrom': 'https://example.com/file_02.csv', 'name': 'resource_not_streamed.csv', 'path': '.'}]} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True, 'force-format': True} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') json_file = {'first': 'Fred', 'last': 'Smith'} json_file = json.dumps(json_file) (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, iter([ResourceIterator(io.StringIO(json_file), datapackage['resources'][0], {'schema': {'fields': []}})]))) spew_res_iter = spew_args[1] with self.assertRaises(Exception): for r in spew_res_iter: list(r)<|docstring|>Create package with streaming resource, which failed to create resource.<|endoftext|>
12caa0c4be2a07649c7c4d7e2a43b68995f137c8f30292e70aa8e6d70a76250c
@requests_mock.mock() def test_dump_to_ckan_package_create_streaming_resource_datastore(self, mock_request): 'Create package with streaming resource, and pushing to datastore.' package_id = 'ckan-package-id' base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) resource_create_url = '{}resource_create'.format(base_url) package_show_url = '{}package_show?id={}'.format(base_url, package_id) datastore_search_url = '{}datastore_search?resource_id=_table_metadata'.format(base_url) datastore_create_url = '{}datastore_create'.format(base_url) datastore_upsert_url = '{}datastore_upsert'.format(base_url) mock_request.post(package_create_url, json={'success': True, 'result': {'id': package_id}}) mock_request.post(resource_create_url, json={'success': True, 'result': {'id': 'ckan-resource-id'}}) mock_request.get(package_show_url, json={'success': True, 'result': {'id': '7766839b-face-4336-8e1a-3c51c5e7634d', 'resources': [{'name': 'co2-mm-mlo_csv_not_streamed', 'format': 'CSV', 'url': 'https://pkgstore.datahub.io/core/co2-ppm:co2-mm-mlo_csv/data/co2-mm-mlo_csv.csv', 'datastore_active': False, 'cache_last_updated': None, 'package_id': '7766839b-face-4336-8e1a-3c51c5e7634d', 'id': '329e4271-8cc3-48c9-a219-c8eab52acc65'}, {'name': 'co2-mm-mlo_csv_streamed', 'encoding': 'utf-8', 'url': 'https://demo.ckan.org/dataset/7766839b-face-4336-8e1a-3c51c5e7634d/resource/723380d7-688a-465f-b0bd-ff6d1ec25680/download/co2-mm-mlo_csv_streamed.csv', 'datastore_active': False, 'format': 'CSV', 'package_id': '7766839b-face-4336-8e1a-3c51c5e7634d', 'id': '723380d7-688a-465f-b0bd-ff6d1ec25680'}], 'num_resources': 2, 'name': 'test-dataset-010203', 'title': 'Test Dataset'}}) mock_request.get(datastore_search_url, json={'success': True, 'result': {'resource_id': '_table_metadata', 'records': []}}) mock_request.post(datastore_create_url, json={'success': True, 'result': {'resource_id': '7564690e-86ec-44de-a3f5-2cff9cbb521f'}}) mock_request.post(datastore_upsert_url, json={'success': True}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': [{'dpp:streamedFrom': 'https://example.com/file.csv', 'dpp:streaming': True, 'name': 'resource_streamed.csv', 'path': 'data/file.csv', 'schema': {'fields': [{'name': 'first', 'type': 'string'}, {'name': 'last', 'type': 'string'}]}}, {'dpp:streamedFrom': 'https://example.com/file_02.csv', 'name': 'resource_not_streamed.csv', 'path': '.'}]} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True, 'force-format': True, 'push_resources_to_datastore': True} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') json_file = {'first': 'Fred', 'last': 'Smith'} json_file = json.dumps(json_file) (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, iter([ResourceIterator(io.StringIO(json_file), datapackage['resources'][0], {'schema': {'fields': []}})]))) spew_res_iter = spew_args[1] for r in spew_res_iter: list(r) requests = mock_request.request_history assert (len(requests) == 7) assert (requests[0].url == package_create_url) assert (requests[1].url == resource_create_url) assert (requests[2].url == resource_create_url) assert (requests[3].url == package_show_url) assert requests[4].url.startswith(datastore_search_url) assert (requests[5].url == datastore_create_url) assert (requests[6].url == datastore_upsert_url)
Create package with streaming resource, and pushing to datastore.
tests/test_dump_to_ckan.py
test_dump_to_ckan_package_create_streaming_resource_datastore
OriHoch/datapackage-pipelines-ckan
0
python
@requests_mock.mock() def test_dump_to_ckan_package_create_streaming_resource_datastore(self, mock_request): package_id = 'ckan-package-id' base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) resource_create_url = '{}resource_create'.format(base_url) package_show_url = '{}package_show?id={}'.format(base_url, package_id) datastore_search_url = '{}datastore_search?resource_id=_table_metadata'.format(base_url) datastore_create_url = '{}datastore_create'.format(base_url) datastore_upsert_url = '{}datastore_upsert'.format(base_url) mock_request.post(package_create_url, json={'success': True, 'result': {'id': package_id}}) mock_request.post(resource_create_url, json={'success': True, 'result': {'id': 'ckan-resource-id'}}) mock_request.get(package_show_url, json={'success': True, 'result': {'id': '7766839b-face-4336-8e1a-3c51c5e7634d', 'resources': [{'name': 'co2-mm-mlo_csv_not_streamed', 'format': 'CSV', 'url': 'https://pkgstore.datahub.io/core/co2-ppm:co2-mm-mlo_csv/data/co2-mm-mlo_csv.csv', 'datastore_active': False, 'cache_last_updated': None, 'package_id': '7766839b-face-4336-8e1a-3c51c5e7634d', 'id': '329e4271-8cc3-48c9-a219-c8eab52acc65'}, {'name': 'co2-mm-mlo_csv_streamed', 'encoding': 'utf-8', 'url': 'https://demo.ckan.org/dataset/7766839b-face-4336-8e1a-3c51c5e7634d/resource/723380d7-688a-465f-b0bd-ff6d1ec25680/download/co2-mm-mlo_csv_streamed.csv', 'datastore_active': False, 'format': 'CSV', 'package_id': '7766839b-face-4336-8e1a-3c51c5e7634d', 'id': '723380d7-688a-465f-b0bd-ff6d1ec25680'}], 'num_resources': 2, 'name': 'test-dataset-010203', 'title': 'Test Dataset'}}) mock_request.get(datastore_search_url, json={'success': True, 'result': {'resource_id': '_table_metadata', 'records': []}}) mock_request.post(datastore_create_url, json={'success': True, 'result': {'resource_id': '7564690e-86ec-44de-a3f5-2cff9cbb521f'}}) mock_request.post(datastore_upsert_url, json={'success': True}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': [{'dpp:streamedFrom': 'https://example.com/file.csv', 'dpp:streaming': True, 'name': 'resource_streamed.csv', 'path': 'data/file.csv', 'schema': {'fields': [{'name': 'first', 'type': 'string'}, {'name': 'last', 'type': 'string'}]}}, {'dpp:streamedFrom': 'https://example.com/file_02.csv', 'name': 'resource_not_streamed.csv', 'path': '.'}]} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True, 'force-format': True, 'push_resources_to_datastore': True} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') json_file = {'first': 'Fred', 'last': 'Smith'} json_file = json.dumps(json_file) (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, iter([ResourceIterator(io.StringIO(json_file), datapackage['resources'][0], {'schema': {'fields': []}})]))) spew_res_iter = spew_args[1] for r in spew_res_iter: list(r) requests = mock_request.request_history assert (len(requests) == 7) assert (requests[0].url == package_create_url) assert (requests[1].url == resource_create_url) assert (requests[2].url == resource_create_url) assert (requests[3].url == package_show_url) assert requests[4].url.startswith(datastore_search_url) assert (requests[5].url == datastore_create_url) assert (requests[6].url == datastore_upsert_url)
@requests_mock.mock() def test_dump_to_ckan_package_create_streaming_resource_datastore(self, mock_request): package_id = 'ckan-package-id' base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) resource_create_url = '{}resource_create'.format(base_url) package_show_url = '{}package_show?id={}'.format(base_url, package_id) datastore_search_url = '{}datastore_search?resource_id=_table_metadata'.format(base_url) datastore_create_url = '{}datastore_create'.format(base_url) datastore_upsert_url = '{}datastore_upsert'.format(base_url) mock_request.post(package_create_url, json={'success': True, 'result': {'id': package_id}}) mock_request.post(resource_create_url, json={'success': True, 'result': {'id': 'ckan-resource-id'}}) mock_request.get(package_show_url, json={'success': True, 'result': {'id': '7766839b-face-4336-8e1a-3c51c5e7634d', 'resources': [{'name': 'co2-mm-mlo_csv_not_streamed', 'format': 'CSV', 'url': 'https://pkgstore.datahub.io/core/co2-ppm:co2-mm-mlo_csv/data/co2-mm-mlo_csv.csv', 'datastore_active': False, 'cache_last_updated': None, 'package_id': '7766839b-face-4336-8e1a-3c51c5e7634d', 'id': '329e4271-8cc3-48c9-a219-c8eab52acc65'}, {'name': 'co2-mm-mlo_csv_streamed', 'encoding': 'utf-8', 'url': 'https://demo.ckan.org/dataset/7766839b-face-4336-8e1a-3c51c5e7634d/resource/723380d7-688a-465f-b0bd-ff6d1ec25680/download/co2-mm-mlo_csv_streamed.csv', 'datastore_active': False, 'format': 'CSV', 'package_id': '7766839b-face-4336-8e1a-3c51c5e7634d', 'id': '723380d7-688a-465f-b0bd-ff6d1ec25680'}], 'num_resources': 2, 'name': 'test-dataset-010203', 'title': 'Test Dataset'}}) mock_request.get(datastore_search_url, json={'success': True, 'result': {'resource_id': '_table_metadata', 'records': []}}) mock_request.post(datastore_create_url, json={'success': True, 'result': {'resource_id': '7564690e-86ec-44de-a3f5-2cff9cbb521f'}}) mock_request.post(datastore_upsert_url, json={'success': True}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': [{'dpp:streamedFrom': 'https://example.com/file.csv', 'dpp:streaming': True, 'name': 'resource_streamed.csv', 'path': 'data/file.csv', 'schema': {'fields': [{'name': 'first', 'type': 'string'}, {'name': 'last', 'type': 'string'}]}}, {'dpp:streamedFrom': 'https://example.com/file_02.csv', 'name': 'resource_not_streamed.csv', 'path': '.'}]} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True, 'force-format': True, 'push_resources_to_datastore': True} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') json_file = {'first': 'Fred', 'last': 'Smith'} json_file = json.dumps(json_file) (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, iter([ResourceIterator(io.StringIO(json_file), datapackage['resources'][0], {'schema': {'fields': []}})]))) spew_res_iter = spew_args[1] for r in spew_res_iter: list(r) requests = mock_request.request_history assert (len(requests) == 7) assert (requests[0].url == package_create_url) assert (requests[1].url == resource_create_url) assert (requests[2].url == resource_create_url) assert (requests[3].url == package_show_url) assert requests[4].url.startswith(datastore_search_url) assert (requests[5].url == datastore_create_url) assert (requests[6].url == datastore_upsert_url)<|docstring|>Create package with streaming resource, and pushing to datastore.<|endoftext|>
411617de1442840d3eeeff65e64d39852e19fa47a2fec0013d97f7cbe2b073a7
@requests_mock.mock() def test_dump_to_ckan_package_create_streaming_resource_datastore_method_invalid(self, mock_request): 'Create package with streaming resource, and pushing to datastore,\n with an invalid method.' datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': [{'dpp:streamedFrom': 'https://example.com/file.csv', 'dpp:streaming': True, 'name': 'resource_streamed.csv', 'path': 'data/file.csv', 'schema': {'fields': [{'name': 'first', 'type': 'string'}, {'name': 'last', 'type': 'string'}]}}, {'dpp:streamedFrom': 'https://example.com/file_02.csv', 'name': 'resource_not_streamed.csv', 'path': '.'}]} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True, 'force-format': True, 'push_resources_to_datastore': True, 'push_resources_to_datastore_method': 'invalid'} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') json_file = {'first': 'Fred', 'last': 'Smith'} json_file = json.dumps(json_file) with self.assertRaises(RuntimeError): (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, iter([ResourceIterator(io.StringIO(json_file), datapackage['resources'][0], {'schema': {'fields': []}})])))
Create package with streaming resource, and pushing to datastore, with an invalid method.
tests/test_dump_to_ckan.py
test_dump_to_ckan_package_create_streaming_resource_datastore_method_invalid
OriHoch/datapackage-pipelines-ckan
0
python
@requests_mock.mock() def test_dump_to_ckan_package_create_streaming_resource_datastore_method_invalid(self, mock_request): 'Create package with streaming resource, and pushing to datastore,\n with an invalid method.' datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': [{'dpp:streamedFrom': 'https://example.com/file.csv', 'dpp:streaming': True, 'name': 'resource_streamed.csv', 'path': 'data/file.csv', 'schema': {'fields': [{'name': 'first', 'type': 'string'}, {'name': 'last', 'type': 'string'}]}}, {'dpp:streamedFrom': 'https://example.com/file_02.csv', 'name': 'resource_not_streamed.csv', 'path': '.'}]} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True, 'force-format': True, 'push_resources_to_datastore': True, 'push_resources_to_datastore_method': 'invalid'} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') json_file = {'first': 'Fred', 'last': 'Smith'} json_file = json.dumps(json_file) with self.assertRaises(RuntimeError): (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, iter([ResourceIterator(io.StringIO(json_file), datapackage['resources'][0], {'schema': {'fields': []}})])))
@requests_mock.mock() def test_dump_to_ckan_package_create_streaming_resource_datastore_method_invalid(self, mock_request): 'Create package with streaming resource, and pushing to datastore,\n with an invalid method.' datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': [{'dpp:streamedFrom': 'https://example.com/file.csv', 'dpp:streaming': True, 'name': 'resource_streamed.csv', 'path': 'data/file.csv', 'schema': {'fields': [{'name': 'first', 'type': 'string'}, {'name': 'last', 'type': 'string'}]}}, {'dpp:streamedFrom': 'https://example.com/file_02.csv', 'name': 'resource_not_streamed.csv', 'path': '.'}]} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True, 'force-format': True, 'push_resources_to_datastore': True, 'push_resources_to_datastore_method': 'invalid'} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') json_file = {'first': 'Fred', 'last': 'Smith'} json_file = json.dumps(json_file) with self.assertRaises(RuntimeError): (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, iter([ResourceIterator(io.StringIO(json_file), datapackage['resources'][0], {'schema': {'fields': []}})])))<|docstring|>Create package with streaming resource, and pushing to datastore, with an invalid method.<|endoftext|>
b15de31dc9318c0ab0937da6d46ec0ad77d604df52e3a10394ef679603910114
def __init__(self, num_features, training_window, training_interval): '\n num_features: the length of the feature vector\n training_window: the number of previous data points to train on\n training_interval: the number of data points between training periods\n ' self.num_features = num_features self.training_interval = training_interval self.training_window = training_window self.samples = deque(maxlen=training_window) self.targets = deque(maxlen=training_window) self.model = BayesianRidge() self.severity = blr.Severity() self.alpha = 1.0 self.parameters = 0 self.train_count = 0 self.have_trained = False self.pred_range = [0.0, np.inf]
num_features: the length of the feature vector training_window: the number of previous data points to train on training_interval: the number of data points between training periods
modules/algo.py
__init__
apadin1/Merit-Smart-Grid-Analytics
10
python
def __init__(self, num_features, training_window, training_interval): '\n num_features: the length of the feature vector\n training_window: the number of previous data points to train on\n training_interval: the number of data points between training periods\n ' self.num_features = num_features self.training_interval = training_interval self.training_window = training_window self.samples = deque(maxlen=training_window) self.targets = deque(maxlen=training_window) self.model = BayesianRidge() self.severity = blr.Severity() self.alpha = 1.0 self.parameters = 0 self.train_count = 0 self.have_trained = False self.pred_range = [0.0, np.inf]
def __init__(self, num_features, training_window, training_interval): '\n num_features: the length of the feature vector\n training_window: the number of previous data points to train on\n training_interval: the number of data points between training periods\n ' self.num_features = num_features self.training_interval = training_interval self.training_window = training_window self.samples = deque(maxlen=training_window) self.targets = deque(maxlen=training_window) self.model = BayesianRidge() self.severity = blr.Severity() self.alpha = 1.0 self.parameters = 0 self.train_count = 0 self.have_trained = False self.pred_range = [0.0, np.inf]<|docstring|>num_features: the length of the feature vector training_window: the number of previous data points to train on training_interval: the number of data points between training periods<|endoftext|>
f1ba96c08a9301c3251d4abd63184380b1536e04548485ed80ff6a7ba3c97b0b
def run(self, sample): '\n Add a single sample to the data pool.\n The sample should be a feature vector: {x_1, x_2, x_3, ..., x_n, y}\n Where x_1->x_n are features and y is the target value\n ' try: assert (len(sample) == (self.num_features + 1)) except AssertionError: raise RuntimeError('sample length {} does not match number of features {}'.format(len(sample), (self.num_features + 1))) sample = np.array(sample).flatten() target = sample[(- 1)] prediction = None anomaly = None p_value = None self.targets.append(target) sample[(- 1)] = 1 if (len(self.samples) > 0): sample = ewma(sample, self.samples[(- 1)], self.alpha) self.samples.append(sample) self.train_count += 1 if self.have_trained: prediction = float(self.model.predict(sample.reshape(1, (- 1)))) prediction = np.clip(prediction, self.pred_range[0], self.pred_range[1]) (anomaly, p_value) = [float(i) for i in self.severity.check((target - prediction), sample)] if ((self.train_count >= self.training_interval) and (len(self.samples) >= self.training_window)): self.train() self.have_trained = True self.train_count = 0 return (target, prediction, anomaly, p_value)
Add a single sample to the data pool. The sample should be a feature vector: {x_1, x_2, x_3, ..., x_n, y} Where x_1->x_n are features and y is the target value
modules/algo.py
run
apadin1/Merit-Smart-Grid-Analytics
10
python
def run(self, sample): '\n Add a single sample to the data pool.\n The sample should be a feature vector: {x_1, x_2, x_3, ..., x_n, y}\n Where x_1->x_n are features and y is the target value\n ' try: assert (len(sample) == (self.num_features + 1)) except AssertionError: raise RuntimeError('sample length {} does not match number of features {}'.format(len(sample), (self.num_features + 1))) sample = np.array(sample).flatten() target = sample[(- 1)] prediction = None anomaly = None p_value = None self.targets.append(target) sample[(- 1)] = 1 if (len(self.samples) > 0): sample = ewma(sample, self.samples[(- 1)], self.alpha) self.samples.append(sample) self.train_count += 1 if self.have_trained: prediction = float(self.model.predict(sample.reshape(1, (- 1)))) prediction = np.clip(prediction, self.pred_range[0], self.pred_range[1]) (anomaly, p_value) = [float(i) for i in self.severity.check((target - prediction), sample)] if ((self.train_count >= self.training_interval) and (len(self.samples) >= self.training_window)): self.train() self.have_trained = True self.train_count = 0 return (target, prediction, anomaly, p_value)
def run(self, sample): '\n Add a single sample to the data pool.\n The sample should be a feature vector: {x_1, x_2, x_3, ..., x_n, y}\n Where x_1->x_n are features and y is the target value\n ' try: assert (len(sample) == (self.num_features + 1)) except AssertionError: raise RuntimeError('sample length {} does not match number of features {}'.format(len(sample), (self.num_features + 1))) sample = np.array(sample).flatten() target = sample[(- 1)] prediction = None anomaly = None p_value = None self.targets.append(target) sample[(- 1)] = 1 if (len(self.samples) > 0): sample = ewma(sample, self.samples[(- 1)], self.alpha) self.samples.append(sample) self.train_count += 1 if self.have_trained: prediction = float(self.model.predict(sample.reshape(1, (- 1)))) prediction = np.clip(prediction, self.pred_range[0], self.pred_range[1]) (anomaly, p_value) = [float(i) for i in self.severity.check((target - prediction), sample)] if ((self.train_count >= self.training_interval) and (len(self.samples) >= self.training_window)): self.train() self.have_trained = True self.train_count = 0 return (target, prediction, anomaly, p_value)<|docstring|>Add a single sample to the data pool. The sample should be a feature vector: {x_1, x_2, x_3, ..., x_n, y} Where x_1->x_n are features and y is the target value<|endoftext|>
3b2685ea48265d2b9dea3e77b55f4c0c9a7c37a6ef2a775ac7020d5ab35df883
def train(self): 'Train the prediction and anomaly detection models' X = np.matrix(self.samples) y = np.array(self.targets).flatten() (w_opt, alpha, beta, S_N) = blr.sklearn_train(X, y) self.model.fit(X, y) self.severity.update_params(beta, S_N)
Train the prediction and anomaly detection models
modules/algo.py
train
apadin1/Merit-Smart-Grid-Analytics
10
python
def train(self): X = np.matrix(self.samples) y = np.array(self.targets).flatten() (w_opt, alpha, beta, S_N) = blr.sklearn_train(X, y) self.model.fit(X, y) self.severity.update_params(beta, S_N)
def train(self): X = np.matrix(self.samples) y = np.array(self.targets).flatten() (w_opt, alpha, beta, S_N) = blr.sklearn_train(X, y) self.model.fit(X, y) self.severity.update_params(beta, S_N)<|docstring|>Train the prediction and anomaly detection models<|endoftext|>
8812e77a7246ac1e7d850c33f04c67b7fc19824d5d94af3ddd53b501acfb40d3
def set_severity(self, w, L): 'Change the severity parameters' self.severity.set_wL(w, L)
Change the severity parameters
modules/algo.py
set_severity
apadin1/Merit-Smart-Grid-Analytics
10
python
def set_severity(self, w, L): self.severity.set_wL(w, L)
def set_severity(self, w, L): self.severity.set_wL(w, L)<|docstring|>Change the severity parameters<|endoftext|>
becf1fb31f43e6327ae590b8af3952a97b53baca8d1a2f0100b8648b6fde1ba0
def set_EWMA(self, alpha): 'Change the EWMA (exponential weighted moving average) weight' self.alpha = alpha
Change the EWMA (exponential weighted moving average) weight
modules/algo.py
set_EWMA
apadin1/Merit-Smart-Grid-Analytics
10
python
def set_EWMA(self, alpha): self.alpha = alpha
def set_EWMA(self, alpha): self.alpha = alpha<|docstring|>Change the EWMA (exponential weighted moving average) weight<|endoftext|>
93371873268052aa436a894b3a1e49894373fb1442bedc0b87ead9a14ae6d6be
def main(training_data_file_path, test_data_file_path, grid_results_file_path, image_plot_file_path): '\n Entry point for script. Takes in training_data_file_path, test_data_file_path\n and image_plot_file_path from commandline, and runs a pre-optimized linear regression\n model. \n\n Arguments:\n ----------\n training_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n test_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n grid_results_file_path\n - file path where grid search results are saved\n image_plot_file_path\n - file path where image of results plot will be saved\n\n Returns\n -------\n None, but saves a plot to the specified file path\n ' plot_results(training_data_file_path, test_data_file_path, grid_results_file_path, image_plot_file_path)
Entry point for script. Takes in training_data_file_path, test_data_file_path and image_plot_file_path from commandline, and runs a pre-optimized linear regression model. Arguments: ---------- training_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) test_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) grid_results_file_path - file path where grid search results are saved image_plot_file_path - file path where image of results plot will be saved Returns ------- None, but saves a plot to the specified file path
src/linear_model.py
main
RobBlumberg/DSCI_522_Group_302
1
python
def main(training_data_file_path, test_data_file_path, grid_results_file_path, image_plot_file_path): '\n Entry point for script. Takes in training_data_file_path, test_data_file_path\n and image_plot_file_path from commandline, and runs a pre-optimized linear regression\n model. \n\n Arguments:\n ----------\n training_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n test_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n grid_results_file_path\n - file path where grid search results are saved\n image_plot_file_path\n - file path where image of results plot will be saved\n\n Returns\n -------\n None, but saves a plot to the specified file path\n ' plot_results(training_data_file_path, test_data_file_path, grid_results_file_path, image_plot_file_path)
def main(training_data_file_path, test_data_file_path, grid_results_file_path, image_plot_file_path): '\n Entry point for script. Takes in training_data_file_path, test_data_file_path\n and image_plot_file_path from commandline, and runs a pre-optimized linear regression\n model. \n\n Arguments:\n ----------\n training_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n test_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n grid_results_file_path\n - file path where grid search results are saved\n image_plot_file_path\n - file path where image of results plot will be saved\n\n Returns\n -------\n None, but saves a plot to the specified file path\n ' plot_results(training_data_file_path, test_data_file_path, grid_results_file_path, image_plot_file_path)<|docstring|>Entry point for script. Takes in training_data_file_path, test_data_file_path and image_plot_file_path from commandline, and runs a pre-optimized linear regression model. Arguments: ---------- training_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) test_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) grid_results_file_path - file path where grid search results are saved image_plot_file_path - file path where image of results plot will be saved Returns ------- None, but saves a plot to the specified file path<|endoftext|>
de3dd6c01579a6c3f1234d03ad66123509a4c9921d361de2686952aead6e2346
def time_parser(input_time): '\n Function which converts a time string of form mm.ss.SS to seconds\n \n Arguments:\n ----------\n input_time \n (str) - input time as string of form "d.dd.dd" where d is a digit\n\n Returns:\n --------\n float representing input time in seconds\n ' assert re.match('\\d\\.\\d{2}\\.\\d{2}', input_time), 'Only strings of format d.dd.dd can be parsed' parsed_time = input_time.split('.') mins = int(parsed_time[0]) secs = int(parsed_time[1]) ss = int(parsed_time[2]) time_in_sec = (((mins * 60.0) + secs) + (ss / 100)) return time_in_sec
Function which converts a time string of form mm.ss.SS to seconds Arguments: ---------- input_time (str) - input time as string of form "d.dd.dd" where d is a digit Returns: -------- float representing input time in seconds
src/linear_model.py
time_parser
RobBlumberg/DSCI_522_Group_302
1
python
def time_parser(input_time): '\n Function which converts a time string of form mm.ss.SS to seconds\n \n Arguments:\n ----------\n input_time \n (str) - input time as string of form "d.dd.dd" where d is a digit\n\n Returns:\n --------\n float representing input time in seconds\n ' assert re.match('\\d\\.\\d{2}\\.\\d{2}', input_time), 'Only strings of format d.dd.dd can be parsed' parsed_time = input_time.split('.') mins = int(parsed_time[0]) secs = int(parsed_time[1]) ss = int(parsed_time[2]) time_in_sec = (((mins * 60.0) + secs) + (ss / 100)) return time_in_sec
def time_parser(input_time): '\n Function which converts a time string of form mm.ss.SS to seconds\n \n Arguments:\n ----------\n input_time \n (str) - input time as string of form "d.dd.dd" where d is a digit\n\n Returns:\n --------\n float representing input time in seconds\n ' assert re.match('\\d\\.\\d{2}\\.\\d{2}', input_time), 'Only strings of format d.dd.dd can be parsed' parsed_time = input_time.split('.') mins = int(parsed_time[0]) secs = int(parsed_time[1]) ss = int(parsed_time[2]) time_in_sec = (((mins * 60.0) + secs) + (ss / 100)) return time_in_sec<|docstring|>Function which converts a time string of form mm.ss.SS to seconds Arguments: ---------- input_time (str) - input time as string of form "d.dd.dd" where d is a digit Returns: -------- float representing input time in seconds<|endoftext|>
866c40d27dc9d37d25b8c448cff1fc5af999098417641a213e3c97d5e340af1a
def load_and_parse_data(training_data_file_path, test_data_file_path): '\n Data loading and cleaning function. Converts finishtime column to seconds\n and drops rows where finishtime is 0. \n\n Arguments:\n ----------\n training_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n test_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n\n Returns\n -------\n X_train, X_test, y_train, y_test \n (np.array) - Split training/test features and targets\n ' training_data = pd.read_csv(training_data_file_path) test_data = pd.read_csv(test_data_file_path) assert (('finishtime' in training_data.columns) and ('finishtime' in test_data.columns)), "Missing column 'finishtime'" y_train = training_data['finishtime'] y_train.fillna('0.00.00', inplace=True) y_train[(~ y_train.str.contains('\\d\\.\\d{2}\\.\\d{2}'))] = '0.00.00' y_train = np.array(list(map((lambda x: time_parser(x)), y_train))) training_data['finishtime'] = y_train training_data = training_data[(training_data['finishtime'] != 0.0)] X_train = training_data.drop('finishtime', axis=1) y_train = training_data['finishtime'] y_test = test_data['finishtime'] y_test.fillna('0.00.00', inplace=True) y_test[(~ y_test.str.contains('\\d\\.\\d{2}\\.\\d{2}'))] = '0.00.00' y_test = np.array(list(map((lambda x: time_parser(x)), y_test))) test_data['finishtime'] = y_test test_data = test_data[(test_data['finishtime'] != 0.0)] X_test = test_data.drop('finishtime', axis=1) y_test = test_data['finishtime'] return (X_train, X_test, y_train, y_test)
Data loading and cleaning function. Converts finishtime column to seconds and drops rows where finishtime is 0. Arguments: ---------- training_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) test_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) Returns ------- X_train, X_test, y_train, y_test (np.array) - Split training/test features and targets
src/linear_model.py
load_and_parse_data
RobBlumberg/DSCI_522_Group_302
1
python
def load_and_parse_data(training_data_file_path, test_data_file_path): '\n Data loading and cleaning function. Converts finishtime column to seconds\n and drops rows where finishtime is 0. \n\n Arguments:\n ----------\n training_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n test_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n\n Returns\n -------\n X_train, X_test, y_train, y_test \n (np.array) - Split training/test features and targets\n ' training_data = pd.read_csv(training_data_file_path) test_data = pd.read_csv(test_data_file_path) assert (('finishtime' in training_data.columns) and ('finishtime' in test_data.columns)), "Missing column 'finishtime'" y_train = training_data['finishtime'] y_train.fillna('0.00.00', inplace=True) y_train[(~ y_train.str.contains('\\d\\.\\d{2}\\.\\d{2}'))] = '0.00.00' y_train = np.array(list(map((lambda x: time_parser(x)), y_train))) training_data['finishtime'] = y_train training_data = training_data[(training_data['finishtime'] != 0.0)] X_train = training_data.drop('finishtime', axis=1) y_train = training_data['finishtime'] y_test = test_data['finishtime'] y_test.fillna('0.00.00', inplace=True) y_test[(~ y_test.str.contains('\\d\\.\\d{2}\\.\\d{2}'))] = '0.00.00' y_test = np.array(list(map((lambda x: time_parser(x)), y_test))) test_data['finishtime'] = y_test test_data = test_data[(test_data['finishtime'] != 0.0)] X_test = test_data.drop('finishtime', axis=1) y_test = test_data['finishtime'] return (X_train, X_test, y_train, y_test)
def load_and_parse_data(training_data_file_path, test_data_file_path): '\n Data loading and cleaning function. Converts finishtime column to seconds\n and drops rows where finishtime is 0. \n\n Arguments:\n ----------\n training_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n test_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n\n Returns\n -------\n X_train, X_test, y_train, y_test \n (np.array) - Split training/test features and targets\n ' training_data = pd.read_csv(training_data_file_path) test_data = pd.read_csv(test_data_file_path) assert (('finishtime' in training_data.columns) and ('finishtime' in test_data.columns)), "Missing column 'finishtime'" y_train = training_data['finishtime'] y_train.fillna('0.00.00', inplace=True) y_train[(~ y_train.str.contains('\\d\\.\\d{2}\\.\\d{2}'))] = '0.00.00' y_train = np.array(list(map((lambda x: time_parser(x)), y_train))) training_data['finishtime'] = y_train training_data = training_data[(training_data['finishtime'] != 0.0)] X_train = training_data.drop('finishtime', axis=1) y_train = training_data['finishtime'] y_test = test_data['finishtime'] y_test.fillna('0.00.00', inplace=True) y_test[(~ y_test.str.contains('\\d\\.\\d{2}\\.\\d{2}'))] = '0.00.00' y_test = np.array(list(map((lambda x: time_parser(x)), y_test))) test_data['finishtime'] = y_test test_data = test_data[(test_data['finishtime'] != 0.0)] X_test = test_data.drop('finishtime', axis=1) y_test = test_data['finishtime'] return (X_train, X_test, y_train, y_test)<|docstring|>Data loading and cleaning function. Converts finishtime column to seconds and drops rows where finishtime is 0. Arguments: ---------- training_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) test_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) Returns ------- X_train, X_test, y_train, y_test (np.array) - Split training/test features and targets<|endoftext|>
350e8bfb6d7c6d9af2299823f339d1f723cc7a8870ec7567adffece06a4ec2f0
def data_preprocessing(training_data_file_path, test_data_file_path): '\n Data preprocessing for linear regression. Applies imputer (mean), \n and polynomial order 5 tranformation to numeric features. Applies \n imputer (fill with "not_specified" constant value) and one-hot encoding\n to categorical features. Uses output of load_and_parse_data() function.\n\n Arguments:\n ----------\n training_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n test_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n\n Returns\n -------\n X_train_preprocessed, X_test_preprocessed, y_train, y_test \n (np.array) - Preprocessed training/test features and targets\n ' (X_train, X_test, y_train, y_test) = load_and_parse_data(training_data_file_path, test_data_file_path) assert all([(x in X_train.columns) for x in ['country', 'dataset']]), "Must have colums 'country', 'dataset'" assert all([(x in X_train.columns) for x in ['declarwt', 'age', 'winodds', 'stake', 'distance']]), "Must have colums 'declarwt', 'age', 'winodds', 'stake', 'distance'" numeric_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='mean')), ('poly', PolynomialFeatures(degree=5))]) categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='constant', fill_value='not_specified')), ('one_hot_encoder', OneHotEncoder())]) categorical_features = ['country', 'dataset'] numeric_features = ['declarwt', 'age', 'winodds', 'stake', 'distance'] preprocessing = ColumnTransformer(transformers=[('num', numeric_transformer, numeric_features), ('cat', categorical_transformer, categorical_features)]) full_pipeline = Pipeline([('data_preprocessing', preprocessing)]) X_train_preprocessed = full_pipeline.fit_transform(X_train) X_test_preprocessed = full_pipeline.transform(X_test) return (X_train_preprocessed, X_test_preprocessed, y_train, y_test)
Data preprocessing for linear regression. Applies imputer (mean), and polynomial order 5 tranformation to numeric features. Applies imputer (fill with "not_specified" constant value) and one-hot encoding to categorical features. Uses output of load_and_parse_data() function. Arguments: ---------- training_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) test_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) Returns ------- X_train_preprocessed, X_test_preprocessed, y_train, y_test (np.array) - Preprocessed training/test features and targets
src/linear_model.py
data_preprocessing
RobBlumberg/DSCI_522_Group_302
1
python
def data_preprocessing(training_data_file_path, test_data_file_path): '\n Data preprocessing for linear regression. Applies imputer (mean), \n and polynomial order 5 tranformation to numeric features. Applies \n imputer (fill with "not_specified" constant value) and one-hot encoding\n to categorical features. Uses output of load_and_parse_data() function.\n\n Arguments:\n ----------\n training_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n test_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n\n Returns\n -------\n X_train_preprocessed, X_test_preprocessed, y_train, y_test \n (np.array) - Preprocessed training/test features and targets\n ' (X_train, X_test, y_train, y_test) = load_and_parse_data(training_data_file_path, test_data_file_path) assert all([(x in X_train.columns) for x in ['country', 'dataset']]), "Must have colums 'country', 'dataset'" assert all([(x in X_train.columns) for x in ['declarwt', 'age', 'winodds', 'stake', 'distance']]), "Must have colums 'declarwt', 'age', 'winodds', 'stake', 'distance'" numeric_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='mean')), ('poly', PolynomialFeatures(degree=5))]) categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='constant', fill_value='not_specified')), ('one_hot_encoder', OneHotEncoder())]) categorical_features = ['country', 'dataset'] numeric_features = ['declarwt', 'age', 'winodds', 'stake', 'distance'] preprocessing = ColumnTransformer(transformers=[('num', numeric_transformer, numeric_features), ('cat', categorical_transformer, categorical_features)]) full_pipeline = Pipeline([('data_preprocessing', preprocessing)]) X_train_preprocessed = full_pipeline.fit_transform(X_train) X_test_preprocessed = full_pipeline.transform(X_test) return (X_train_preprocessed, X_test_preprocessed, y_train, y_test)
def data_preprocessing(training_data_file_path, test_data_file_path): '\n Data preprocessing for linear regression. Applies imputer (mean), \n and polynomial order 5 tranformation to numeric features. Applies \n imputer (fill with "not_specified" constant value) and one-hot encoding\n to categorical features. Uses output of load_and_parse_data() function.\n\n Arguments:\n ----------\n training_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n test_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n\n Returns\n -------\n X_train_preprocessed, X_test_preprocessed, y_train, y_test \n (np.array) - Preprocessed training/test features and targets\n ' (X_train, X_test, y_train, y_test) = load_and_parse_data(training_data_file_path, test_data_file_path) assert all([(x in X_train.columns) for x in ['country', 'dataset']]), "Must have colums 'country', 'dataset'" assert all([(x in X_train.columns) for x in ['declarwt', 'age', 'winodds', 'stake', 'distance']]), "Must have colums 'declarwt', 'age', 'winodds', 'stake', 'distance'" numeric_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='mean')), ('poly', PolynomialFeatures(degree=5))]) categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='constant', fill_value='not_specified')), ('one_hot_encoder', OneHotEncoder())]) categorical_features = ['country', 'dataset'] numeric_features = ['declarwt', 'age', 'winodds', 'stake', 'distance'] preprocessing = ColumnTransformer(transformers=[('num', numeric_transformer, numeric_features), ('cat', categorical_transformer, categorical_features)]) full_pipeline = Pipeline([('data_preprocessing', preprocessing)]) X_train_preprocessed = full_pipeline.fit_transform(X_train) X_test_preprocessed = full_pipeline.transform(X_test) return (X_train_preprocessed, X_test_preprocessed, y_train, y_test)<|docstring|>Data preprocessing for linear regression. Applies imputer (mean), and polynomial order 5 tranformation to numeric features. Applies imputer (fill with "not_specified" constant value) and one-hot encoding to categorical features. Uses output of load_and_parse_data() function. Arguments: ---------- training_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) test_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) Returns ------- X_train_preprocessed, X_test_preprocessed, y_train, y_test (np.array) - Preprocessed training/test features and targets<|endoftext|>
7fd59b2b4b07e653bffe874e597c889982b1fc1a83c5dca3a47f050dbca6a999
def linear_model_results(training_data_file_path, test_data_file_path, grid_results_file_path): '\n Fits pre-optimized linear regression model on training data,\n and makes predictions on test data. Uses output of \n data_preprocessing() function.\n\n Arguments:\n ----------\n training_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n test_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n\n Returns\n -------\n test_results \n (pd.DataFrame) - Data frame containing test set predictions and actual values\n ' grid_results = pd.read_csv(grid_results_file_path) n_features_to_select = grid_results['n_features_to_select'][0] (X_train_preprocessed, X_test_preprocessed, y_train, y_test) = data_preprocessing(training_data_file_path, test_data_file_path) rfe = RFE(LinearRegression(), n_features_to_select=n_features_to_select) rfe.fit(X_train_preprocessed, y_train) lr = LinearRegression() lr.fit(rfe.transform(X_train_preprocessed), y_train) test_results = pd.DataFrame({'Actual finish time': y_test, 'Predicted finish time': lr.predict(rfe.transform(X_test_preprocessed))}) return test_results
Fits pre-optimized linear regression model on training data, and makes predictions on test data. Uses output of data_preprocessing() function. Arguments: ---------- training_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) test_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) Returns ------- test_results (pd.DataFrame) - Data frame containing test set predictions and actual values
src/linear_model.py
linear_model_results
RobBlumberg/DSCI_522_Group_302
1
python
def linear_model_results(training_data_file_path, test_data_file_path, grid_results_file_path): '\n Fits pre-optimized linear regression model on training data,\n and makes predictions on test data. Uses output of \n data_preprocessing() function.\n\n Arguments:\n ----------\n training_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n test_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n\n Returns\n -------\n test_results \n (pd.DataFrame) - Data frame containing test set predictions and actual values\n ' grid_results = pd.read_csv(grid_results_file_path) n_features_to_select = grid_results['n_features_to_select'][0] (X_train_preprocessed, X_test_preprocessed, y_train, y_test) = data_preprocessing(training_data_file_path, test_data_file_path) rfe = RFE(LinearRegression(), n_features_to_select=n_features_to_select) rfe.fit(X_train_preprocessed, y_train) lr = LinearRegression() lr.fit(rfe.transform(X_train_preprocessed), y_train) test_results = pd.DataFrame({'Actual finish time': y_test, 'Predicted finish time': lr.predict(rfe.transform(X_test_preprocessed))}) return test_results
def linear_model_results(training_data_file_path, test_data_file_path, grid_results_file_path): '\n Fits pre-optimized linear regression model on training data,\n and makes predictions on test data. Uses output of \n data_preprocessing() function.\n\n Arguments:\n ----------\n training_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n test_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n\n Returns\n -------\n test_results \n (pd.DataFrame) - Data frame containing test set predictions and actual values\n ' grid_results = pd.read_csv(grid_results_file_path) n_features_to_select = grid_results['n_features_to_select'][0] (X_train_preprocessed, X_test_preprocessed, y_train, y_test) = data_preprocessing(training_data_file_path, test_data_file_path) rfe = RFE(LinearRegression(), n_features_to_select=n_features_to_select) rfe.fit(X_train_preprocessed, y_train) lr = LinearRegression() lr.fit(rfe.transform(X_train_preprocessed), y_train) test_results = pd.DataFrame({'Actual finish time': y_test, 'Predicted finish time': lr.predict(rfe.transform(X_test_preprocessed))}) return test_results<|docstring|>Fits pre-optimized linear regression model on training data, and makes predictions on test data. Uses output of data_preprocessing() function. Arguments: ---------- training_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) test_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) Returns ------- test_results (pd.DataFrame) - Data frame containing test set predictions and actual values<|endoftext|>
d13ae664411753906bba0ae3483541e1d2a84c048b80207600fb134cc434b6b8
def plot_results(training_data_file_path, test_data_file_path, grid_results_file_path, image_plot_file_path): '\n Plots results from linear regression on test set. Uses output of \n linear_model_results() function. Saves plot to specified file path.\n\n Arguments:\n ----------\n training_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n test_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n image_plot_file_path\n - file path where image of results plot will be saved\n\n Returns\n -------\n None\n ' test_results = linear_model_results(training_data_file_path, test_data_file_path, grid_results_file_path) (fig, ax) = plt.subplots(1, 1, figsize=(8, 8)) ax.scatter(test_results['Predicted finish time'], test_results['Actual finish time'], alpha=0.5) ax.plot([min(test_results['Actual finish time']), max(test_results['Actual finish time'])], [min(test_results['Actual finish time']), max(test_results['Actual finish time'])], linestyle='--', color='red') ax.set_xlabel('Predicted finish time (s)', size=14) ax.set_ylabel('Actual finish time (s)', size=14) ax.set_title('Actual vs predicted finish times \non test set from optimized linear model', size=15) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) plt.savefig(image_plot_file_path)
Plots results from linear regression on test set. Uses output of linear_model_results() function. Saves plot to specified file path. Arguments: ---------- training_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) test_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) image_plot_file_path - file path where image of results plot will be saved Returns ------- None
src/linear_model.py
plot_results
RobBlumberg/DSCI_522_Group_302
1
python
def plot_results(training_data_file_path, test_data_file_path, grid_results_file_path, image_plot_file_path): '\n Plots results from linear regression on test set. Uses output of \n linear_model_results() function. Saves plot to specified file path.\n\n Arguments:\n ----------\n training_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n test_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n image_plot_file_path\n - file path where image of results plot will be saved\n\n Returns\n -------\n None\n ' test_results = linear_model_results(training_data_file_path, test_data_file_path, grid_results_file_path) (fig, ax) = plt.subplots(1, 1, figsize=(8, 8)) ax.scatter(test_results['Predicted finish time'], test_results['Actual finish time'], alpha=0.5) ax.plot([min(test_results['Actual finish time']), max(test_results['Actual finish time'])], [min(test_results['Actual finish time']), max(test_results['Actual finish time'])], linestyle='--', color='red') ax.set_xlabel('Predicted finish time (s)', size=14) ax.set_ylabel('Actual finish time (s)', size=14) ax.set_title('Actual vs predicted finish times \non test set from optimized linear model', size=15) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) plt.savefig(image_plot_file_path)
def plot_results(training_data_file_path, test_data_file_path, grid_results_file_path, image_plot_file_path): '\n Plots results from linear regression on test set. Uses output of \n linear_model_results() function. Saves plot to specified file path.\n\n Arguments:\n ----------\n training_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n test_data_file_path \n - file path where training data is located. Assumes data is a .csv file in same\n format as data/data_train.csv (output of download_data.py script)\n image_plot_file_path\n - file path where image of results plot will be saved\n\n Returns\n -------\n None\n ' test_results = linear_model_results(training_data_file_path, test_data_file_path, grid_results_file_path) (fig, ax) = plt.subplots(1, 1, figsize=(8, 8)) ax.scatter(test_results['Predicted finish time'], test_results['Actual finish time'], alpha=0.5) ax.plot([min(test_results['Actual finish time']), max(test_results['Actual finish time'])], [min(test_results['Actual finish time']), max(test_results['Actual finish time'])], linestyle='--', color='red') ax.set_xlabel('Predicted finish time (s)', size=14) ax.set_ylabel('Actual finish time (s)', size=14) ax.set_title('Actual vs predicted finish times \non test set from optimized linear model', size=15) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) plt.savefig(image_plot_file_path)<|docstring|>Plots results from linear regression on test set. Uses output of linear_model_results() function. Saves plot to specified file path. Arguments: ---------- training_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) test_data_file_path - file path where training data is located. Assumes data is a .csv file in same format as data/data_train.csv (output of download_data.py script) image_plot_file_path - file path where image of results plot will be saved Returns ------- None<|endoftext|>
1836b3db7b85cdeaadbbe9acc1b70764bd6d4e777f20ed40bb5fd67570cb1e69
def setup() -> None: 'Set up loggers.' logging.TRACE = TRACE_LEVEL logging.addLevelName(TRACE_LEVEL, 'TRACE') Logger.trace = _monkeypatch_trace log_level = (TRACE_LEVEL if constants.DEBUG_MODE else logging.INFO) format_string = '%(asctime)s | %(name)s | %(levelname)s | %(message)s' log_format = logging.Formatter(format_string) log_file = Path('logs', 'bot.log') log_file.parent.mkdir(exist_ok=True) file_handler = handlers.RotatingFileHandler(log_file, maxBytes=5242880, backupCount=7, encoding='utf8') file_handler.setFormatter(log_format) root_log = logging.getLogger() root_log.setLevel(log_level) root_log.addHandler(file_handler) if ('COLOREDLOGS_LEVEL_STYLES' not in os.environ): coloredlogs.DEFAULT_LEVEL_STYLES = {**coloredlogs.DEFAULT_LEVEL_STYLES, 'trace': {'color': 246}, 'critical': {'background': 'red'}, 'debug': coloredlogs.DEFAULT_LEVEL_STYLES['info']} if ('COLOREDLOGS_LOG_FORMAT' not in os.environ): coloredlogs.DEFAULT_LOG_FORMAT = format_string if ('COLOREDLOGS_LOG_LEVEL' not in os.environ): coloredlogs.DEFAULT_LOG_LEVEL = log_level coloredlogs.install(logger=root_log, stream=sys.stdout) logging.getLogger('discord').setLevel(logging.WARNING)
Set up loggers.
griffinbot/logging.py
setup
MrAwesomeRocks/griffinbot
2
python
def setup() -> None: logging.TRACE = TRACE_LEVEL logging.addLevelName(TRACE_LEVEL, 'TRACE') Logger.trace = _monkeypatch_trace log_level = (TRACE_LEVEL if constants.DEBUG_MODE else logging.INFO) format_string = '%(asctime)s | %(name)s | %(levelname)s | %(message)s' log_format = logging.Formatter(format_string) log_file = Path('logs', 'bot.log') log_file.parent.mkdir(exist_ok=True) file_handler = handlers.RotatingFileHandler(log_file, maxBytes=5242880, backupCount=7, encoding='utf8') file_handler.setFormatter(log_format) root_log = logging.getLogger() root_log.setLevel(log_level) root_log.addHandler(file_handler) if ('COLOREDLOGS_LEVEL_STYLES' not in os.environ): coloredlogs.DEFAULT_LEVEL_STYLES = {**coloredlogs.DEFAULT_LEVEL_STYLES, 'trace': {'color': 246}, 'critical': {'background': 'red'}, 'debug': coloredlogs.DEFAULT_LEVEL_STYLES['info']} if ('COLOREDLOGS_LOG_FORMAT' not in os.environ): coloredlogs.DEFAULT_LOG_FORMAT = format_string if ('COLOREDLOGS_LOG_LEVEL' not in os.environ): coloredlogs.DEFAULT_LOG_LEVEL = log_level coloredlogs.install(logger=root_log, stream=sys.stdout) logging.getLogger('discord').setLevel(logging.WARNING)
def setup() -> None: logging.TRACE = TRACE_LEVEL logging.addLevelName(TRACE_LEVEL, 'TRACE') Logger.trace = _monkeypatch_trace log_level = (TRACE_LEVEL if constants.DEBUG_MODE else logging.INFO) format_string = '%(asctime)s | %(name)s | %(levelname)s | %(message)s' log_format = logging.Formatter(format_string) log_file = Path('logs', 'bot.log') log_file.parent.mkdir(exist_ok=True) file_handler = handlers.RotatingFileHandler(log_file, maxBytes=5242880, backupCount=7, encoding='utf8') file_handler.setFormatter(log_format) root_log = logging.getLogger() root_log.setLevel(log_level) root_log.addHandler(file_handler) if ('COLOREDLOGS_LEVEL_STYLES' not in os.environ): coloredlogs.DEFAULT_LEVEL_STYLES = {**coloredlogs.DEFAULT_LEVEL_STYLES, 'trace': {'color': 246}, 'critical': {'background': 'red'}, 'debug': coloredlogs.DEFAULT_LEVEL_STYLES['info']} if ('COLOREDLOGS_LOG_FORMAT' not in os.environ): coloredlogs.DEFAULT_LOG_FORMAT = format_string if ('COLOREDLOGS_LOG_LEVEL' not in os.environ): coloredlogs.DEFAULT_LOG_LEVEL = log_level coloredlogs.install(logger=root_log, stream=sys.stdout) logging.getLogger('discord').setLevel(logging.WARNING)<|docstring|>Set up loggers.<|endoftext|>
50d8bb4a88ade8b215abb73fe2e1ef22d085dfa6251c059b02e092829a49a54c
def _monkeypatch_trace(self: logging.Logger, msg: str, *args, **kwargs) -> None: '\n Log \'msg % args\' with severity \'TRACE\'.\n\n To pass exception information, use the keyword argument exc_info with\n a true value, e.g.\n logger.trace("Houston, we have an %s", "interesting problem", exc_info=1)\n ' if self.isEnabledFor(TRACE_LEVEL): self._log(TRACE_LEVEL, msg, args, **kwargs)
Log 'msg % args' with severity 'TRACE'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.trace("Houston, we have an %s", "interesting problem", exc_info=1)
griffinbot/logging.py
_monkeypatch_trace
MrAwesomeRocks/griffinbot
2
python
def _monkeypatch_trace(self: logging.Logger, msg: str, *args, **kwargs) -> None: '\n Log \'msg % args\' with severity \'TRACE\'.\n\n To pass exception information, use the keyword argument exc_info with\n a true value, e.g.\n logger.trace("Houston, we have an %s", "interesting problem", exc_info=1)\n ' if self.isEnabledFor(TRACE_LEVEL): self._log(TRACE_LEVEL, msg, args, **kwargs)
def _monkeypatch_trace(self: logging.Logger, msg: str, *args, **kwargs) -> None: '\n Log \'msg % args\' with severity \'TRACE\'.\n\n To pass exception information, use the keyword argument exc_info with\n a true value, e.g.\n logger.trace("Houston, we have an %s", "interesting problem", exc_info=1)\n ' if self.isEnabledFor(TRACE_LEVEL): self._log(TRACE_LEVEL, msg, args, **kwargs)<|docstring|>Log 'msg % args' with severity 'TRACE'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.trace("Houston, we have an %s", "interesting problem", exc_info=1)<|endoftext|>
5ca93845bb0f5878a4477faaa81d753303838d4e425b4d14300f76226936d115
def box_to_point8(boxes): '\n Args:\n boxes: nx4\n\n Returns:\n (nx4)x2\n ' b = boxes[(:, [0, 1, 2, 3, 0, 3, 2, 1])] b = b.reshape(((- 1), 2)) return b
Args: boxes: nx4 Returns: (nx4)x2
official/vision/detection/GSOC 21/Mask RCNN/Experiments/Experiment 01/Custom Code Base/common.py
box_to_point8
Anustup900/models
0
python
def box_to_point8(boxes): '\n Args:\n boxes: nx4\n\n Returns:\n (nx4)x2\n ' b = boxes[(:, [0, 1, 2, 3, 0, 3, 2, 1])] b = b.reshape(((- 1), 2)) return b
def box_to_point8(boxes): '\n Args:\n boxes: nx4\n\n Returns:\n (nx4)x2\n ' b = boxes[(:, [0, 1, 2, 3, 0, 3, 2, 1])] b = b.reshape(((- 1), 2)) return b<|docstring|>Args: boxes: nx4 Returns: (nx4)x2<|endoftext|>
145b3e556a0d30f6a9f11b19f5a5a241091c9b9bb6f2ab33a1819112669a27f1
def point8_to_box(points): '\n Args:\n points: (nx4)x2\n Returns:\n nx4 boxes (x1y1x2y2)\n ' p = points.reshape(((- 1), 4, 2)) minxy = p.min(axis=1) maxxy = p.max(axis=1) return np.concatenate((minxy, maxxy), axis=1)
Args: points: (nx4)x2 Returns: nx4 boxes (x1y1x2y2)
official/vision/detection/GSOC 21/Mask RCNN/Experiments/Experiment 01/Custom Code Base/common.py
point8_to_box
Anustup900/models
0
python
def point8_to_box(points): '\n Args:\n points: (nx4)x2\n Returns:\n nx4 boxes (x1y1x2y2)\n ' p = points.reshape(((- 1), 4, 2)) minxy = p.min(axis=1) maxxy = p.max(axis=1) return np.concatenate((minxy, maxxy), axis=1)
def point8_to_box(points): '\n Args:\n points: (nx4)x2\n Returns:\n nx4 boxes (x1y1x2y2)\n ' p = points.reshape(((- 1), 4, 2)) minxy = p.min(axis=1) maxxy = p.max(axis=1) return np.concatenate((minxy, maxxy), axis=1)<|docstring|>Args: points: (nx4)x2 Returns: nx4 boxes (x1y1x2y2)<|endoftext|>
630b9dd4851d42e554f68f7ee013c01d756c4a3a1de9c170693e4298be952640
def segmentation_to_mask(polys, height, width): '\n Convert polygons to binary masks.\n\n Args:\n polys: a list of nx2 float array. Each array contains many (x, y) coordinates.\n\n Returns:\n a binary matrix of (height, width)\n ' polys = [p.flatten().tolist() for p in polys] assert (len(polys) > 0), 'Polygons are empty!' import pycocotools.mask as cocomask rles = cocomask.frPyObjects(polys, height, width) rle = cocomask.merge(rles) return cocomask.decode(rle)
Convert polygons to binary masks. Args: polys: a list of nx2 float array. Each array contains many (x, y) coordinates. Returns: a binary matrix of (height, width)
official/vision/detection/GSOC 21/Mask RCNN/Experiments/Experiment 01/Custom Code Base/common.py
segmentation_to_mask
Anustup900/models
0
python
def segmentation_to_mask(polys, height, width): '\n Convert polygons to binary masks.\n\n Args:\n polys: a list of nx2 float array. Each array contains many (x, y) coordinates.\n\n Returns:\n a binary matrix of (height, width)\n ' polys = [p.flatten().tolist() for p in polys] assert (len(polys) > 0), 'Polygons are empty!' import pycocotools.mask as cocomask rles = cocomask.frPyObjects(polys, height, width) rle = cocomask.merge(rles) return cocomask.decode(rle)
def segmentation_to_mask(polys, height, width): '\n Convert polygons to binary masks.\n\n Args:\n polys: a list of nx2 float array. Each array contains many (x, y) coordinates.\n\n Returns:\n a binary matrix of (height, width)\n ' polys = [p.flatten().tolist() for p in polys] assert (len(polys) > 0), 'Polygons are empty!' import pycocotools.mask as cocomask rles = cocomask.frPyObjects(polys, height, width) rle = cocomask.merge(rles) return cocomask.decode(rle)<|docstring|>Convert polygons to binary masks. Args: polys: a list of nx2 float array. Each array contains many (x, y) coordinates. Returns: a binary matrix of (height, width)<|endoftext|>
d9d36c0afb29100c1a40af457fc05eec463fd383254775fe9181f2bc08272413
def clip_boxes(boxes, shape): '\n Args:\n boxes: (...)x4, float\n shape: h, w\n ' orig_shape = boxes.shape boxes = boxes.reshape([(- 1), 4]) (h, w) = shape boxes[(:, [0, 1])] = np.maximum(boxes[(:, [0, 1])], 0) boxes[(:, 2)] = np.minimum(boxes[(:, 2)], w) boxes[(:, 3)] = np.minimum(boxes[(:, 3)], h) return boxes.reshape(orig_shape)
Args: boxes: (...)x4, float shape: h, w
official/vision/detection/GSOC 21/Mask RCNN/Experiments/Experiment 01/Custom Code Base/common.py
clip_boxes
Anustup900/models
0
python
def clip_boxes(boxes, shape): '\n Args:\n boxes: (...)x4, float\n shape: h, w\n ' orig_shape = boxes.shape boxes = boxes.reshape([(- 1), 4]) (h, w) = shape boxes[(:, [0, 1])] = np.maximum(boxes[(:, [0, 1])], 0) boxes[(:, 2)] = np.minimum(boxes[(:, 2)], w) boxes[(:, 3)] = np.minimum(boxes[(:, 3)], h) return boxes.reshape(orig_shape)
def clip_boxes(boxes, shape): '\n Args:\n boxes: (...)x4, float\n shape: h, w\n ' orig_shape = boxes.shape boxes = boxes.reshape([(- 1), 4]) (h, w) = shape boxes[(:, [0, 1])] = np.maximum(boxes[(:, [0, 1])], 0) boxes[(:, 2)] = np.minimum(boxes[(:, 2)], w) boxes[(:, 3)] = np.minimum(boxes[(:, 3)], h) return boxes.reshape(orig_shape)<|docstring|>Args: boxes: (...)x4, float shape: h, w<|endoftext|>
1f0324be435ce4f142789645a5f974b9f26a050363d1b4843a4283c0ff2795f8
def filter_boxes_inside_shape(boxes, shape): '\n Args:\n boxes: (nx4), float\n shape: (h, w)\n\n Returns:\n indices: (k, )\n selection: (kx4)\n ' assert (boxes.ndim == 2), boxes.shape assert (len(shape) == 2), shape (h, w) = shape indices = np.where(((((boxes[(:, 0)] >= 0) & (boxes[(:, 1)] >= 0)) & (boxes[(:, 2)] <= w)) & (boxes[(:, 3)] <= h)))[0] return (indices, boxes[(indices, :)])
Args: boxes: (nx4), float shape: (h, w) Returns: indices: (k, ) selection: (kx4)
official/vision/detection/GSOC 21/Mask RCNN/Experiments/Experiment 01/Custom Code Base/common.py
filter_boxes_inside_shape
Anustup900/models
0
python
def filter_boxes_inside_shape(boxes, shape): '\n Args:\n boxes: (nx4), float\n shape: (h, w)\n\n Returns:\n indices: (k, )\n selection: (kx4)\n ' assert (boxes.ndim == 2), boxes.shape assert (len(shape) == 2), shape (h, w) = shape indices = np.where(((((boxes[(:, 0)] >= 0) & (boxes[(:, 1)] >= 0)) & (boxes[(:, 2)] <= w)) & (boxes[(:, 3)] <= h)))[0] return (indices, boxes[(indices, :)])
def filter_boxes_inside_shape(boxes, shape): '\n Args:\n boxes: (nx4), float\n shape: (h, w)\n\n Returns:\n indices: (k, )\n selection: (kx4)\n ' assert (boxes.ndim == 2), boxes.shape assert (len(shape) == 2), shape (h, w) = shape indices = np.where(((((boxes[(:, 0)] >= 0) & (boxes[(:, 1)] >= 0)) & (boxes[(:, 2)] <= w)) & (boxes[(:, 3)] <= h)))[0] return (indices, boxes[(indices, :)])<|docstring|>Args: boxes: (nx4), float shape: (h, w) Returns: indices: (k, ) selection: (kx4)<|endoftext|>
126d71a0b7dcc437f2bb54725d84e2c48f7205021c26ec9778fd5f6fa97599d9
def __init__(self, short_edge_length, max_size, interp=cv2.INTER_LINEAR): '\n Args:\n short_edge_length ([int, int]): a [min, max] interval from which to sample the\n shortest edge length.\n max_size (int): maximum allowed longest edge length.\n ' super(CustomResize, self).__init__() if isinstance(short_edge_length, int): short_edge_length = (short_edge_length, short_edge_length) self._init(locals())
Args: short_edge_length ([int, int]): a [min, max] interval from which to sample the shortest edge length. max_size (int): maximum allowed longest edge length.
official/vision/detection/GSOC 21/Mask RCNN/Experiments/Experiment 01/Custom Code Base/common.py
__init__
Anustup900/models
0
python
def __init__(self, short_edge_length, max_size, interp=cv2.INTER_LINEAR): '\n Args:\n short_edge_length ([int, int]): a [min, max] interval from which to sample the\n shortest edge length.\n max_size (int): maximum allowed longest edge length.\n ' super(CustomResize, self).__init__() if isinstance(short_edge_length, int): short_edge_length = (short_edge_length, short_edge_length) self._init(locals())
def __init__(self, short_edge_length, max_size, interp=cv2.INTER_LINEAR): '\n Args:\n short_edge_length ([int, int]): a [min, max] interval from which to sample the\n shortest edge length.\n max_size (int): maximum allowed longest edge length.\n ' super(CustomResize, self).__init__() if isinstance(short_edge_length, int): short_edge_length = (short_edge_length, short_edge_length) self._init(locals())<|docstring|>Args: short_edge_length ([int, int]): a [min, max] interval from which to sample the shortest edge length. max_size (int): maximum allowed longest edge length.<|endoftext|>
37aa7d3b7de8ab9cdad95fe60a5d38c0b413948061bdfde71748a7783c277ade
def __init__(self, filename=None): 'Create a Document instance.' self.filename = filename self.filepath = None self.filedir = None self.filebase = None self.fileext = None if self.filename: self.filepath = os.path.realpath(self.filename) (self.filedir, self.filename) = os.path.split(self.filepath) (self.filebase, self.fileext) = os.path.splitext(self.filename)
Create a Document instance.
Lib/site-packages/wx-2.8-msw-unicode/wx/py/document.py
__init__
William22FM/RobotTest
27
python
def __init__(self, filename=None): self.filename = filename self.filepath = None self.filedir = None self.filebase = None self.fileext = None if self.filename: self.filepath = os.path.realpath(self.filename) (self.filedir, self.filename) = os.path.split(self.filepath) (self.filebase, self.fileext) = os.path.splitext(self.filename)
def __init__(self, filename=None): self.filename = filename self.filepath = None self.filedir = None self.filebase = None self.fileext = None if self.filename: self.filepath = os.path.realpath(self.filename) (self.filedir, self.filename) = os.path.split(self.filepath) (self.filebase, self.fileext) = os.path.splitext(self.filename)<|docstring|>Create a Document instance.<|endoftext|>
1c766bba7dc0d9cd00337dead08a8d3bc13721724acb313d30cc19e65a4657ee
def read(self): 'Return contents of file.' if (self.filepath and os.path.exists(self.filepath)): f = file(self.filepath, 'rb') try: return f.read() finally: f.close() else: return ''
Return contents of file.
Lib/site-packages/wx-2.8-msw-unicode/wx/py/document.py
read
William22FM/RobotTest
27
python
def read(self): if (self.filepath and os.path.exists(self.filepath)): f = file(self.filepath, 'rb') try: return f.read() finally: f.close() else: return
def read(self): if (self.filepath and os.path.exists(self.filepath)): f = file(self.filepath, 'rb') try: return f.read() finally: f.close() else: return <|docstring|>Return contents of file.<|endoftext|>
aa7503244b15d4a1395a9d9fa5946d44f5e63a21ca9a6fa61f4ff62baf745c20
def write(self, text): 'Write text to file.' try: f = file(self.filepath, 'wb') f.write(text) finally: if f: f.close()
Write text to file.
Lib/site-packages/wx-2.8-msw-unicode/wx/py/document.py
write
William22FM/RobotTest
27
python
def write(self, text): try: f = file(self.filepath, 'wb') f.write(text) finally: if f: f.close()
def write(self, text): try: f = file(self.filepath, 'wb') f.write(text) finally: if f: f.close()<|docstring|>Write text to file.<|endoftext|>
93d10f6357bf6a9f343c8a652daa16621f3e7cea10843e6b69450519feb97096
@quo.group() def scan(): 'Scan an IP/ netblock using Shodan.' pass
Scan an IP/ netblock using Shodan.
src/expositor/cli/scan.py
scan
secretuminc/expositor
1
python
@quo.group() def scan(): pass
@quo.group() def scan(): pass<|docstring|>Scan an IP/ netblock using Shodan.<|endoftext|>
e063526a878eaf774cc423d56ec0d9e4e698cabbffae00b22ffd293cc9625107
@scan.command(name='list') def scan_list(): 'Show recently launched scans' key = get_api_key() api = shodan.Shodan(key) try: scans = api.scans() except shodan.APIError as e: raise quo.QuoException(e.value) if (len(scans) > 0): quo.echo(u'# {} Scans Total - Showing 10 most recent scans:'.format(scans['total'])) quo.echo(u'# {:20} {:<15} {:<10} {:<15s}'.format('Scan ID', 'Status', 'Size', 'Timestamp')) for scan in scans['matches'][:10]: quo.echo(u'{:31} {:<24} {:<10} {:<15s}'.format(quo.style(scan['id'], fg='yellow'), quo.style(scan['status'], fg='cyan'), scan['size'], scan['created'])) else: quo.echo("You haven't yet launched any scans.")
Show recently launched scans
src/expositor/cli/scan.py
scan_list
secretuminc/expositor
1
python
@scan.command(name='list') def scan_list(): key = get_api_key() api = shodan.Shodan(key) try: scans = api.scans() except shodan.APIError as e: raise quo.QuoException(e.value) if (len(scans) > 0): quo.echo(u'# {} Scans Total - Showing 10 most recent scans:'.format(scans['total'])) quo.echo(u'# {:20} {:<15} {:<10} {:<15s}'.format('Scan ID', 'Status', 'Size', 'Timestamp')) for scan in scans['matches'][:10]: quo.echo(u'{:31} {:<24} {:<10} {:<15s}'.format(quo.style(scan['id'], fg='yellow'), quo.style(scan['status'], fg='cyan'), scan['size'], scan['created'])) else: quo.echo("You haven't yet launched any scans.")
@scan.command(name='list') def scan_list(): key = get_api_key() api = shodan.Shodan(key) try: scans = api.scans() except shodan.APIError as e: raise quo.QuoException(e.value) if (len(scans) > 0): quo.echo(u'# {} Scans Total - Showing 10 most recent scans:'.format(scans['total'])) quo.echo(u'# {:20} {:<15} {:<10} {:<15s}'.format('Scan ID', 'Status', 'Size', 'Timestamp')) for scan in scans['matches'][:10]: quo.echo(u'{:31} {:<24} {:<10} {:<15s}'.format(quo.style(scan['id'], fg='yellow'), quo.style(scan['status'], fg='cyan'), scan['size'], scan['created'])) else: quo.echo("You haven't yet launched any scans.")<|docstring|>Show recently launched scans<|endoftext|>
3b9ef00015e6ccabb6c1a541fba09ab4b486bfb5cf4901d55bff4459452f98b3
@scan.command(name='internet') @quo.option('--quiet', help='Disable the printing of information to the screen.', default=False, is_flag=True) @quo.argument('port', type=int) @quo.argument('protocol', type=str) def scan_internet(quiet, port, protocol): 'Scan the Internet for a specific port and protocol using the Shodan infrastructure.' key = get_api_key() api = shodan.Shodan(key) try: quo.echo('Submitting Internet scan to Shodan...', nl=False) scan = api.scan_internet(port, protocol) quo.echo('Done') official_ports = api.ports() if (port in official_ports): quo.echo('The requested port is already indexed by Shodan. A new scan for the port has been launched, please subscribe to the real-time stream for results.') else: filename = '{0}-{1}.json.gz'.format(port, protocol) counter = 0 with helpers.open_file(filename, 'w') as fout: quo.echo('Saving results to file: {0}'.format(filename)) done = False quo.echo('Waiting for data, please stand by...') while (not done): try: for banner in api.stream.ports([port], timeout=90): counter += 1 helpers.write_banner(fout, banner) if (not quiet): quo.echo('{0:<40} {1:<20} {2}'.format(quo.style(helpers.get_ip(banner), fg=COLORIZE_FIELDS['ip_str']), quo.style(str(banner['port']), fg=COLORIZE_FIELDS['port']), ';'.join(banner['hostnames']))) except shodan.APIError: if done: break scan = api.scan_status(scan['id']) if (scan['status'] == 'DONE'): done = True except socket.timeout: if done: break scan = api.scan_status(scan['id']) if (scan['status'] == 'DONE'): done = True except Exception as e: raise quo.QuoException(repr(e)) quo.echo('Scan finished: {0} devices found'.format(counter)) except shodan.APIError as e: raise quo.QuoException(e.value)
Scan the Internet for a specific port and protocol using the Shodan infrastructure.
src/expositor/cli/scan.py
scan_internet
secretuminc/expositor
1
python
@scan.command(name='internet') @quo.option('--quiet', help='Disable the printing of information to the screen.', default=False, is_flag=True) @quo.argument('port', type=int) @quo.argument('protocol', type=str) def scan_internet(quiet, port, protocol): key = get_api_key() api = shodan.Shodan(key) try: quo.echo('Submitting Internet scan to Shodan...', nl=False) scan = api.scan_internet(port, protocol) quo.echo('Done') official_ports = api.ports() if (port in official_ports): quo.echo('The requested port is already indexed by Shodan. A new scan for the port has been launched, please subscribe to the real-time stream for results.') else: filename = '{0}-{1}.json.gz'.format(port, protocol) counter = 0 with helpers.open_file(filename, 'w') as fout: quo.echo('Saving results to file: {0}'.format(filename)) done = False quo.echo('Waiting for data, please stand by...') while (not done): try: for banner in api.stream.ports([port], timeout=90): counter += 1 helpers.write_banner(fout, banner) if (not quiet): quo.echo('{0:<40} {1:<20} {2}'.format(quo.style(helpers.get_ip(banner), fg=COLORIZE_FIELDS['ip_str']), quo.style(str(banner['port']), fg=COLORIZE_FIELDS['port']), ';'.join(banner['hostnames']))) except shodan.APIError: if done: break scan = api.scan_status(scan['id']) if (scan['status'] == 'DONE'): done = True except socket.timeout: if done: break scan = api.scan_status(scan['id']) if (scan['status'] == 'DONE'): done = True except Exception as e: raise quo.QuoException(repr(e)) quo.echo('Scan finished: {0} devices found'.format(counter)) except shodan.APIError as e: raise quo.QuoException(e.value)
@scan.command(name='internet') @quo.option('--quiet', help='Disable the printing of information to the screen.', default=False, is_flag=True) @quo.argument('port', type=int) @quo.argument('protocol', type=str) def scan_internet(quiet, port, protocol): key = get_api_key() api = shodan.Shodan(key) try: quo.echo('Submitting Internet scan to Shodan...', nl=False) scan = api.scan_internet(port, protocol) quo.echo('Done') official_ports = api.ports() if (port in official_ports): quo.echo('The requested port is already indexed by Shodan. A new scan for the port has been launched, please subscribe to the real-time stream for results.') else: filename = '{0}-{1}.json.gz'.format(port, protocol) counter = 0 with helpers.open_file(filename, 'w') as fout: quo.echo('Saving results to file: {0}'.format(filename)) done = False quo.echo('Waiting for data, please stand by...') while (not done): try: for banner in api.stream.ports([port], timeout=90): counter += 1 helpers.write_banner(fout, banner) if (not quiet): quo.echo('{0:<40} {1:<20} {2}'.format(quo.style(helpers.get_ip(banner), fg=COLORIZE_FIELDS['ip_str']), quo.style(str(banner['port']), fg=COLORIZE_FIELDS['port']), ';'.join(banner['hostnames']))) except shodan.APIError: if done: break scan = api.scan_status(scan['id']) if (scan['status'] == 'DONE'): done = True except socket.timeout: if done: break scan = api.scan_status(scan['id']) if (scan['status'] == 'DONE'): done = True except Exception as e: raise quo.QuoException(repr(e)) quo.echo('Scan finished: {0} devices found'.format(counter)) except shodan.APIError as e: raise quo.QuoException(e.value)<|docstring|>Scan the Internet for a specific port and protocol using the Shodan infrastructure.<|endoftext|>
0c389d7e4cb6df782dfb3988dace0fff92abd9ad41a4ebfc7376a6c9c7498054
@scan.command(name='protocols') def scan_protocols(): 'List the protocols that you can scan with using Shodan.' key = get_api_key() api = shodan.Shodan(key) try: protocols = api.protocols() for (name, description) in iter(protocols.items()): quo.echo((quo.style('{0:<30}'.format(name), fg='cyan') + description)) except shodan.APIError as e: raise quo.QuoException(e.value)
List the protocols that you can scan with using Shodan.
src/expositor/cli/scan.py
scan_protocols
secretuminc/expositor
1
python
@scan.command(name='protocols') def scan_protocols(): key = get_api_key() api = shodan.Shodan(key) try: protocols = api.protocols() for (name, description) in iter(protocols.items()): quo.echo((quo.style('{0:<30}'.format(name), fg='cyan') + description)) except shodan.APIError as e: raise quo.QuoException(e.value)
@scan.command(name='protocols') def scan_protocols(): key = get_api_key() api = shodan.Shodan(key) try: protocols = api.protocols() for (name, description) in iter(protocols.items()): quo.echo((quo.style('{0:<30}'.format(name), fg='cyan') + description)) except shodan.APIError as e: raise quo.QuoException(e.value)<|docstring|>List the protocols that you can scan with using Shodan.<|endoftext|>
af72eaafcda9a8fba06f7131865920087593d578b0cb6feddaa5641a24b397a4
@scan.command(name='submit') @quo.option('--wait', help='How long to wait for results to come back. If this is set to "0" or below return immediately.', default=20, type=int) @quo.option('--filename', help='Save the results in the given file.', default='', type=str) @quo.option('--force', default=False, is_flag=True) @quo.option('--verbose', default=False, is_flag=True) @quo.argument('netblocks', metavar='<ip address>', nargs=(- 1)) def scan_submit(wait, filename, force, verbose, netblocks): 'Scan an IP/ netblock using Shodan.' key = get_api_key() api = shodan.Shodan(key) alert = None try: scan = api.scan(netblocks, force=force) now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M') quo.echo('') quo.echo('Starting Shodan scan at {} - {} scan credits left'.format(now, scan['credits_left'])) if verbose: quo.echo('# Scan ID: {}'.format(scan['id'])) if (wait <= 0): quo.echo('Exiting now, not waiting for results. Use the API or website to retrieve the results of the scan.') else: alert = api.create_alert('Scan: {}'.format(', '.join(netblocks)), netblocks) filename = filename.strip() fout = None if (filename != ''): if (not filename.endswith('.json.gz')): filename += '.json.gz' fout = helpers.open_file(filename, 'w') finished_event = threading.Event() progress_bar_thread = threading.Thread(target=async_spinner, args=(finished_event,)) progress_bar_thread.start() hosts = collections.defaultdict(dict) done = False scan_start = time.time() cache = {} while (not done): try: for banner in api.stream.alert(aid=alert['id'], timeout=wait): ip = banner.get('ip', banner.get('ipv6', None)) if (not ip): continue cache_key = '{}:{}'.format(ip, banner['port']) if (cache_key not in cache): hosts[helpers.get_ip(banner)][banner['port']] = banner cache[cache_key] = True if ((time.time() - scan_start) >= 60): scan = api.scan_status(scan['id']) if verbose: quo.echo('# Scan status: {}'.format(scan['status'])) if (scan['status'] == 'DONE'): done = True break except shodan.APIError: if ((time.time() - scan_start) < wait): time.sleep(0.5) continue if done: break scan = api.scan_status(scan['id']) if (scan['status'] == 'DONE'): done = True if verbose: quo.echo('# Scan status: {}'.format(scan['status'])) except socket.timeout: if ((time.time() - scan_start) < wait): continue done = True except Exception as e: finished_event.set() progress_bar_thread.join() raise quo.QuoException(repr(e)) finished_event.set() progress_bar_thread.join() def print_field(name, value): quo.echo(' {:25s}{}'.format(name, value)) def print_banner(banner): quo.echo(' {:20s}'.format(((quo.style(str(banner['port']), fg='green') + '/') + banner['transport'])), nl=False) if ('product' in banner): quo.echo(banner['product'], nl=False) if ('version' in banner): quo.echo(' ({})'.format(banner['version']), nl=False) quo.echo('') if ('ssl' in banner): if ('versions' in banner['ssl']): versions = [version for version in sorted(banner['ssl']['versions']) if (not version.startswith('-'))] if (len(versions) > 0): quo.echo(' |-- SSL Versions: {}'.format(', '.join(versions))) if (('dhparams' in banner['ssl']) and banner['ssl']['dhparams']): quo.echo(' |-- Diffie-Hellman Parameters:') quo.echo(' {:15s}{}\n {:15s}{}'.format('Bits:', banner['ssl']['dhparams']['bits'], 'Generator:', banner['ssl']['dhparams']['generator'])) if ('fingerprint' in banner['ssl']['dhparams']): quo.echo(' {:15s}{}'.format('Fingerprint:', banner['ssl']['dhparams']['fingerprint'])) if hosts: quo.echo('\x08 ') for ip in sorted(hosts): host = next(iter(hosts[ip].items()))[1] quo.echo(quo.style(ip, fg='cyan'), nl=False) if (('hostnames' in host) and host['hostnames']): quo.echo(' ({})'.format(', '.join(host['hostnames'])), nl=False) quo.echo('') if (('location' in host) and ('country_name' in host['location']) and host['location']['country_name']): print_field('Country', host['location']['country_name']) if (('city' in host['location']) and host['location']['city']): print_field('City', host['location']['city']) if (('org' in host) and host['org']): print_field('Organization', host['org']) if (('os' in host) and host['os']): print_field('Operating System', host['os']) quo.echo('') if (('vulns' in host) and (len(host['vulns']) > 0)): vulns = [] for vuln in host['vulns']: if vuln.startswith('!'): continue if (vuln.upper() == 'CVE-2014-0160'): vulns.append(quo.style('Heartbleed', fg='red')) else: vulns.append(quo.style(vuln, fg='red')) if (len(vulns) > 0): quo.echo(' {:25s}'.format('Vulnerabilities:'), nl=False) for vuln in vulns: quo.echo((vuln + '\t'), nl=False) quo.echo('') quo.echo(' Open Ports:') for port in sorted(hosts[ip]): print_banner(hosts[ip][port]) if fout: helpers.write_banner(fout, hosts[ip][port]) quo.echo('') else: quo.echo('\x08No open ports found or the host has been recently crawled and cant get scanned again so soon.') except shodan.APIError as e: raise quo.QuoException(e.value) finally: if alert: api.delete_alert(alert['id'])
Scan an IP/ netblock using Shodan.
src/expositor/cli/scan.py
scan_submit
secretuminc/expositor
1
python
@scan.command(name='submit') @quo.option('--wait', help='How long to wait for results to come back. If this is set to "0" or below return immediately.', default=20, type=int) @quo.option('--filename', help='Save the results in the given file.', default=, type=str) @quo.option('--force', default=False, is_flag=True) @quo.option('--verbose', default=False, is_flag=True) @quo.argument('netblocks', metavar='<ip address>', nargs=(- 1)) def scan_submit(wait, filename, force, verbose, netblocks): key = get_api_key() api = shodan.Shodan(key) alert = None try: scan = api.scan(netblocks, force=force) now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M') quo.echo() quo.echo('Starting Shodan scan at {} - {} scan credits left'.format(now, scan['credits_left'])) if verbose: quo.echo('# Scan ID: {}'.format(scan['id'])) if (wait <= 0): quo.echo('Exiting now, not waiting for results. Use the API or website to retrieve the results of the scan.') else: alert = api.create_alert('Scan: {}'.format(', '.join(netblocks)), netblocks) filename = filename.strip() fout = None if (filename != ): if (not filename.endswith('.json.gz')): filename += '.json.gz' fout = helpers.open_file(filename, 'w') finished_event = threading.Event() progress_bar_thread = threading.Thread(target=async_spinner, args=(finished_event,)) progress_bar_thread.start() hosts = collections.defaultdict(dict) done = False scan_start = time.time() cache = {} while (not done): try: for banner in api.stream.alert(aid=alert['id'], timeout=wait): ip = banner.get('ip', banner.get('ipv6', None)) if (not ip): continue cache_key = '{}:{}'.format(ip, banner['port']) if (cache_key not in cache): hosts[helpers.get_ip(banner)][banner['port']] = banner cache[cache_key] = True if ((time.time() - scan_start) >= 60): scan = api.scan_status(scan['id']) if verbose: quo.echo('# Scan status: {}'.format(scan['status'])) if (scan['status'] == 'DONE'): done = True break except shodan.APIError: if ((time.time() - scan_start) < wait): time.sleep(0.5) continue if done: break scan = api.scan_status(scan['id']) if (scan['status'] == 'DONE'): done = True if verbose: quo.echo('# Scan status: {}'.format(scan['status'])) except socket.timeout: if ((time.time() - scan_start) < wait): continue done = True except Exception as e: finished_event.set() progress_bar_thread.join() raise quo.QuoException(repr(e)) finished_event.set() progress_bar_thread.join() def print_field(name, value): quo.echo(' {:25s}{}'.format(name, value)) def print_banner(banner): quo.echo(' {:20s}'.format(((quo.style(str(banner['port']), fg='green') + '/') + banner['transport'])), nl=False) if ('product' in banner): quo.echo(banner['product'], nl=False) if ('version' in banner): quo.echo(' ({})'.format(banner['version']), nl=False) quo.echo() if ('ssl' in banner): if ('versions' in banner['ssl']): versions = [version for version in sorted(banner['ssl']['versions']) if (not version.startswith('-'))] if (len(versions) > 0): quo.echo(' |-- SSL Versions: {}'.format(', '.join(versions))) if (('dhparams' in banner['ssl']) and banner['ssl']['dhparams']): quo.echo(' |-- Diffie-Hellman Parameters:') quo.echo(' {:15s}{}\n {:15s}{}'.format('Bits:', banner['ssl']['dhparams']['bits'], 'Generator:', banner['ssl']['dhparams']['generator'])) if ('fingerprint' in banner['ssl']['dhparams']): quo.echo(' {:15s}{}'.format('Fingerprint:', banner['ssl']['dhparams']['fingerprint'])) if hosts: quo.echo('\x08 ') for ip in sorted(hosts): host = next(iter(hosts[ip].items()))[1] quo.echo(quo.style(ip, fg='cyan'), nl=False) if (('hostnames' in host) and host['hostnames']): quo.echo(' ({})'.format(', '.join(host['hostnames'])), nl=False) quo.echo() if (('location' in host) and ('country_name' in host['location']) and host['location']['country_name']): print_field('Country', host['location']['country_name']) if (('city' in host['location']) and host['location']['city']): print_field('City', host['location']['city']) if (('org' in host) and host['org']): print_field('Organization', host['org']) if (('os' in host) and host['os']): print_field('Operating System', host['os']) quo.echo() if (('vulns' in host) and (len(host['vulns']) > 0)): vulns = [] for vuln in host['vulns']: if vuln.startswith('!'): continue if (vuln.upper() == 'CVE-2014-0160'): vulns.append(quo.style('Heartbleed', fg='red')) else: vulns.append(quo.style(vuln, fg='red')) if (len(vulns) > 0): quo.echo(' {:25s}'.format('Vulnerabilities:'), nl=False) for vuln in vulns: quo.echo((vuln + '\t'), nl=False) quo.echo() quo.echo(' Open Ports:') for port in sorted(hosts[ip]): print_banner(hosts[ip][port]) if fout: helpers.write_banner(fout, hosts[ip][port]) quo.echo() else: quo.echo('\x08No open ports found or the host has been recently crawled and cant get scanned again so soon.') except shodan.APIError as e: raise quo.QuoException(e.value) finally: if alert: api.delete_alert(alert['id'])
@scan.command(name='submit') @quo.option('--wait', help='How long to wait for results to come back. If this is set to "0" or below return immediately.', default=20, type=int) @quo.option('--filename', help='Save the results in the given file.', default=, type=str) @quo.option('--force', default=False, is_flag=True) @quo.option('--verbose', default=False, is_flag=True) @quo.argument('netblocks', metavar='<ip address>', nargs=(- 1)) def scan_submit(wait, filename, force, verbose, netblocks): key = get_api_key() api = shodan.Shodan(key) alert = None try: scan = api.scan(netblocks, force=force) now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M') quo.echo() quo.echo('Starting Shodan scan at {} - {} scan credits left'.format(now, scan['credits_left'])) if verbose: quo.echo('# Scan ID: {}'.format(scan['id'])) if (wait <= 0): quo.echo('Exiting now, not waiting for results. Use the API or website to retrieve the results of the scan.') else: alert = api.create_alert('Scan: {}'.format(', '.join(netblocks)), netblocks) filename = filename.strip() fout = None if (filename != ): if (not filename.endswith('.json.gz')): filename += '.json.gz' fout = helpers.open_file(filename, 'w') finished_event = threading.Event() progress_bar_thread = threading.Thread(target=async_spinner, args=(finished_event,)) progress_bar_thread.start() hosts = collections.defaultdict(dict) done = False scan_start = time.time() cache = {} while (not done): try: for banner in api.stream.alert(aid=alert['id'], timeout=wait): ip = banner.get('ip', banner.get('ipv6', None)) if (not ip): continue cache_key = '{}:{}'.format(ip, banner['port']) if (cache_key not in cache): hosts[helpers.get_ip(banner)][banner['port']] = banner cache[cache_key] = True if ((time.time() - scan_start) >= 60): scan = api.scan_status(scan['id']) if verbose: quo.echo('# Scan status: {}'.format(scan['status'])) if (scan['status'] == 'DONE'): done = True break except shodan.APIError: if ((time.time() - scan_start) < wait): time.sleep(0.5) continue if done: break scan = api.scan_status(scan['id']) if (scan['status'] == 'DONE'): done = True if verbose: quo.echo('# Scan status: {}'.format(scan['status'])) except socket.timeout: if ((time.time() - scan_start) < wait): continue done = True except Exception as e: finished_event.set() progress_bar_thread.join() raise quo.QuoException(repr(e)) finished_event.set() progress_bar_thread.join() def print_field(name, value): quo.echo(' {:25s}{}'.format(name, value)) def print_banner(banner): quo.echo(' {:20s}'.format(((quo.style(str(banner['port']), fg='green') + '/') + banner['transport'])), nl=False) if ('product' in banner): quo.echo(banner['product'], nl=False) if ('version' in banner): quo.echo(' ({})'.format(banner['version']), nl=False) quo.echo() if ('ssl' in banner): if ('versions' in banner['ssl']): versions = [version for version in sorted(banner['ssl']['versions']) if (not version.startswith('-'))] if (len(versions) > 0): quo.echo(' |-- SSL Versions: {}'.format(', '.join(versions))) if (('dhparams' in banner['ssl']) and banner['ssl']['dhparams']): quo.echo(' |-- Diffie-Hellman Parameters:') quo.echo(' {:15s}{}\n {:15s}{}'.format('Bits:', banner['ssl']['dhparams']['bits'], 'Generator:', banner['ssl']['dhparams']['generator'])) if ('fingerprint' in banner['ssl']['dhparams']): quo.echo(' {:15s}{}'.format('Fingerprint:', banner['ssl']['dhparams']['fingerprint'])) if hosts: quo.echo('\x08 ') for ip in sorted(hosts): host = next(iter(hosts[ip].items()))[1] quo.echo(quo.style(ip, fg='cyan'), nl=False) if (('hostnames' in host) and host['hostnames']): quo.echo(' ({})'.format(', '.join(host['hostnames'])), nl=False) quo.echo() if (('location' in host) and ('country_name' in host['location']) and host['location']['country_name']): print_field('Country', host['location']['country_name']) if (('city' in host['location']) and host['location']['city']): print_field('City', host['location']['city']) if (('org' in host) and host['org']): print_field('Organization', host['org']) if (('os' in host) and host['os']): print_field('Operating System', host['os']) quo.echo() if (('vulns' in host) and (len(host['vulns']) > 0)): vulns = [] for vuln in host['vulns']: if vuln.startswith('!'): continue if (vuln.upper() == 'CVE-2014-0160'): vulns.append(quo.style('Heartbleed', fg='red')) else: vulns.append(quo.style(vuln, fg='red')) if (len(vulns) > 0): quo.echo(' {:25s}'.format('Vulnerabilities:'), nl=False) for vuln in vulns: quo.echo((vuln + '\t'), nl=False) quo.echo() quo.echo(' Open Ports:') for port in sorted(hosts[ip]): print_banner(hosts[ip][port]) if fout: helpers.write_banner(fout, hosts[ip][port]) quo.echo() else: quo.echo('\x08No open ports found or the host has been recently crawled and cant get scanned again so soon.') except shodan.APIError as e: raise quo.QuoException(e.value) finally: if alert: api.delete_alert(alert['id'])<|docstring|>Scan an IP/ netblock using Shodan.<|endoftext|>
2ae2f2e49ee4ce90a477cdfabbf51be71a73c358256379e4a991511c14a55d02
@scan.command(name='status') @quo.argument('scan_id', type=str) def scan_status(scan_id): 'Check the status of an on-demand scan.' key = get_api_key() api = shodan.Shodan(key) try: scan = api.scan_status(scan_id) quo.echo(scan['status']) except shodan.APIError as e: raise quo.QuoException(e.value)
Check the status of an on-demand scan.
src/expositor/cli/scan.py
scan_status
secretuminc/expositor
1
python
@scan.command(name='status') @quo.argument('scan_id', type=str) def scan_status(scan_id): key = get_api_key() api = shodan.Shodan(key) try: scan = api.scan_status(scan_id) quo.echo(scan['status']) except shodan.APIError as e: raise quo.QuoException(e.value)
@scan.command(name='status') @quo.argument('scan_id', type=str) def scan_status(scan_id): key = get_api_key() api = shodan.Shodan(key) try: scan = api.scan_status(scan_id) quo.echo(scan['status']) except shodan.APIError as e: raise quo.QuoException(e.value)<|docstring|>Check the status of an on-demand scan.<|endoftext|>
edbeafc48750dad5450a25cba564d344f2e0bb7f19a27d59ab40b01f4202b6f1
def spheric_to_cartesian(phi, theta, rho): ' Spheric to cartesian coordinates ' if hasattr(phi, '__iter__'): n = len(phi) elif hasattr(theta, '__iter__'): n = len(theta) elif hasattr(rho, '__iter__'): n = len(rho) P = np.empty((n, 3), dtype=np.float32) sin_theta = np.sin(theta) P[(:, 0)] = ((sin_theta * np.sin(phi)) * rho) P[(:, 1)] = ((sin_theta * np.cos(phi)) * rho) P[(:, 2)] = (np.cos(theta) * rho) return P
Spheric to cartesian coordinates
examples/earth.py
spheric_to_cartesian
CodeDoes/glumpy
1,074
python
def spheric_to_cartesian(phi, theta, rho): ' ' if hasattr(phi, '__iter__'): n = len(phi) elif hasattr(theta, '__iter__'): n = len(theta) elif hasattr(rho, '__iter__'): n = len(rho) P = np.empty((n, 3), dtype=np.float32) sin_theta = np.sin(theta) P[(:, 0)] = ((sin_theta * np.sin(phi)) * rho) P[(:, 1)] = ((sin_theta * np.cos(phi)) * rho) P[(:, 2)] = (np.cos(theta) * rho) return P
def spheric_to_cartesian(phi, theta, rho): ' ' if hasattr(phi, '__iter__'): n = len(phi) elif hasattr(theta, '__iter__'): n = len(theta) elif hasattr(rho, '__iter__'): n = len(rho) P = np.empty((n, 3), dtype=np.float32) sin_theta = np.sin(theta) P[(:, 0)] = ((sin_theta * np.sin(phi)) * rho) P[(:, 1)] = ((sin_theta * np.cos(phi)) * rho) P[(:, 2)] = (np.cos(theta) * rho) return P<|docstring|>Spheric to cartesian coordinates<|endoftext|>
21991ae8a1567b5d1257b269d1f966300b217ba4c4d6b2e64edeafb14d386148
def get_data_element_impls(reload_modules=False): '\n Discover and return discovered ``DataElement`` classes. Keys in the\n returned map are the names of the discovered classes, and the paired values\n are the actual class type objects.\n\n We search for implementation classes in:\n - modules next to this file this function is defined in (ones that begin\n with an alphanumeric character),\n - python modules listed in the environment variable\n ``DATA_ELEMENT_PATH``\n - This variable should contain a sequence of python module\n specifications, separated by the platform specific PATH separator\n character (``;`` for Windows, ``:`` for unix)\n\n Within a module we first look for a helper variable by the name\n ``DATA_ELEMENT_CLASS``, which can either be a single class object or\n an iterable of class objects, to be specifically exported. If the variable\n is set to None, we skip that module and do not import anything. If the\n variable is not present, we look at attributes defined in that module for\n classes that descend from the given base class type. If none of the above\n are found, or if an exception occurs, the module is skipped.\n\n :param reload_modules: Explicitly reload discovered modules from source.\n :type reload_modules: bool\n\n :return: Map of discovered class object of type ``DataElement``\n whose keys are the string names of the classes.\n :rtype: dict[str, type]\n\n ' this_dir = os.path.abspath(os.path.dirname(__file__)) env_var = 'DATA_ELEMENT_PATH' helper_var = 'DATA_ELEMENT_CLASS' return plugin.get_plugins(__name__, this_dir, env_var, helper_var, DataElement, reload_modules=reload_modules)
Discover and return discovered ``DataElement`` classes. Keys in the returned map are the names of the discovered classes, and the paired values are the actual class type objects. We search for implementation classes in: - modules next to this file this function is defined in (ones that begin with an alphanumeric character), - python modules listed in the environment variable ``DATA_ELEMENT_PATH`` - This variable should contain a sequence of python module specifications, separated by the platform specific PATH separator character (``;`` for Windows, ``:`` for unix) Within a module we first look for a helper variable by the name ``DATA_ELEMENT_CLASS``, which can either be a single class object or an iterable of class objects, to be specifically exported. If the variable is set to None, we skip that module and do not import anything. If the variable is not present, we look at attributes defined in that module for classes that descend from the given base class type. If none of the above are found, or if an exception occurs, the module is skipped. :param reload_modules: Explicitly reload discovered modules from source. :type reload_modules: bool :return: Map of discovered class object of type ``DataElement`` whose keys are the string names of the classes. :rtype: dict[str, type]
python/smqtk/representation/data_element/__init__.py
get_data_element_impls
jbeezley/SMQTK
1
python
def get_data_element_impls(reload_modules=False): '\n Discover and return discovered ``DataElement`` classes. Keys in the\n returned map are the names of the discovered classes, and the paired values\n are the actual class type objects.\n\n We search for implementation classes in:\n - modules next to this file this function is defined in (ones that begin\n with an alphanumeric character),\n - python modules listed in the environment variable\n ``DATA_ELEMENT_PATH``\n - This variable should contain a sequence of python module\n specifications, separated by the platform specific PATH separator\n character (``;`` for Windows, ``:`` for unix)\n\n Within a module we first look for a helper variable by the name\n ``DATA_ELEMENT_CLASS``, which can either be a single class object or\n an iterable of class objects, to be specifically exported. If the variable\n is set to None, we skip that module and do not import anything. If the\n variable is not present, we look at attributes defined in that module for\n classes that descend from the given base class type. If none of the above\n are found, or if an exception occurs, the module is skipped.\n\n :param reload_modules: Explicitly reload discovered modules from source.\n :type reload_modules: bool\n\n :return: Map of discovered class object of type ``DataElement``\n whose keys are the string names of the classes.\n :rtype: dict[str, type]\n\n ' this_dir = os.path.abspath(os.path.dirname(__file__)) env_var = 'DATA_ELEMENT_PATH' helper_var = 'DATA_ELEMENT_CLASS' return plugin.get_plugins(__name__, this_dir, env_var, helper_var, DataElement, reload_modules=reload_modules)
def get_data_element_impls(reload_modules=False): '\n Discover and return discovered ``DataElement`` classes. Keys in the\n returned map are the names of the discovered classes, and the paired values\n are the actual class type objects.\n\n We search for implementation classes in:\n - modules next to this file this function is defined in (ones that begin\n with an alphanumeric character),\n - python modules listed in the environment variable\n ``DATA_ELEMENT_PATH``\n - This variable should contain a sequence of python module\n specifications, separated by the platform specific PATH separator\n character (``;`` for Windows, ``:`` for unix)\n\n Within a module we first look for a helper variable by the name\n ``DATA_ELEMENT_CLASS``, which can either be a single class object or\n an iterable of class objects, to be specifically exported. If the variable\n is set to None, we skip that module and do not import anything. If the\n variable is not present, we look at attributes defined in that module for\n classes that descend from the given base class type. If none of the above\n are found, or if an exception occurs, the module is skipped.\n\n :param reload_modules: Explicitly reload discovered modules from source.\n :type reload_modules: bool\n\n :return: Map of discovered class object of type ``DataElement``\n whose keys are the string names of the classes.\n :rtype: dict[str, type]\n\n ' this_dir = os.path.abspath(os.path.dirname(__file__)) env_var = 'DATA_ELEMENT_PATH' helper_var = 'DATA_ELEMENT_CLASS' return plugin.get_plugins(__name__, this_dir, env_var, helper_var, DataElement, reload_modules=reload_modules)<|docstring|>Discover and return discovered ``DataElement`` classes. Keys in the returned map are the names of the discovered classes, and the paired values are the actual class type objects. We search for implementation classes in: - modules next to this file this function is defined in (ones that begin with an alphanumeric character), - python modules listed in the environment variable ``DATA_ELEMENT_PATH`` - This variable should contain a sequence of python module specifications, separated by the platform specific PATH separator character (``;`` for Windows, ``:`` for unix) Within a module we first look for a helper variable by the name ``DATA_ELEMENT_CLASS``, which can either be a single class object or an iterable of class objects, to be specifically exported. If the variable is set to None, we skip that module and do not import anything. If the variable is not present, we look at attributes defined in that module for classes that descend from the given base class type. If none of the above are found, or if an exception occurs, the module is skipped. :param reload_modules: Explicitly reload discovered modules from source. :type reload_modules: bool :return: Map of discovered class object of type ``DataElement`` whose keys are the string names of the classes. :rtype: dict[str, type]<|endoftext|>
e11f072aa5f7bd92fb0ef3d528aab65cb11b397bb794efe3ac897fd74e5fc9ff
def from_uri(uri, impl_generator=get_data_element_impls): '\n Create a data element instance from available plugin implementations.\n\n The first implementation that can resolve the URI is what is returned. If no\n implementations can resolve the URL, an ``InvalidUriError`` is raised.\n\n :param uri: URI to try to resolve into a DataElement instance.\n :type uri: str\n\n :param impl_generator: Function that returns a dictionary mapping\n implementation type names to the class type. By default this refers to\n the standard ``get_data_element_impls`` function, however this can be\n changed to refer to a custom set of classes if desired.\n :type impl_generator: () -> dict[str, type]\n\n :raises smqtk.exceptions.InvalidUriError: No data element implementations\n could resolve the given URI.\n\n :return: New data element instance providing access to the data pointed to\n by the input URI.\n :rtype: DataElement\n\n ' log = logging.getLogger(__name__) log.debug("Trying to parse URI: '%s'", uri) de_type_iter = six.itervalues(impl_generator()) inst = None for de_type in de_type_iter: try: inst = de_type.from_uri(uri) except NoUriResolutionError: pass except InvalidUriError as ex: log.debug("Implementation '%s' failed to parse URI: %s", de_type.__name__, ex.reason) if (inst is not None): break if (inst is None): raise InvalidUriError(uri, 'No available implementation to handle URI.') return inst
Create a data element instance from available plugin implementations. The first implementation that can resolve the URI is what is returned. If no implementations can resolve the URL, an ``InvalidUriError`` is raised. :param uri: URI to try to resolve into a DataElement instance. :type uri: str :param impl_generator: Function that returns a dictionary mapping implementation type names to the class type. By default this refers to the standard ``get_data_element_impls`` function, however this can be changed to refer to a custom set of classes if desired. :type impl_generator: () -> dict[str, type] :raises smqtk.exceptions.InvalidUriError: No data element implementations could resolve the given URI. :return: New data element instance providing access to the data pointed to by the input URI. :rtype: DataElement
python/smqtk/representation/data_element/__init__.py
from_uri
jbeezley/SMQTK
1
python
def from_uri(uri, impl_generator=get_data_element_impls): '\n Create a data element instance from available plugin implementations.\n\n The first implementation that can resolve the URI is what is returned. If no\n implementations can resolve the URL, an ``InvalidUriError`` is raised.\n\n :param uri: URI to try to resolve into a DataElement instance.\n :type uri: str\n\n :param impl_generator: Function that returns a dictionary mapping\n implementation type names to the class type. By default this refers to\n the standard ``get_data_element_impls`` function, however this can be\n changed to refer to a custom set of classes if desired.\n :type impl_generator: () -> dict[str, type]\n\n :raises smqtk.exceptions.InvalidUriError: No data element implementations\n could resolve the given URI.\n\n :return: New data element instance providing access to the data pointed to\n by the input URI.\n :rtype: DataElement\n\n ' log = logging.getLogger(__name__) log.debug("Trying to parse URI: '%s'", uri) de_type_iter = six.itervalues(impl_generator()) inst = None for de_type in de_type_iter: try: inst = de_type.from_uri(uri) except NoUriResolutionError: pass except InvalidUriError as ex: log.debug("Implementation '%s' failed to parse URI: %s", de_type.__name__, ex.reason) if (inst is not None): break if (inst is None): raise InvalidUriError(uri, 'No available implementation to handle URI.') return inst
def from_uri(uri, impl_generator=get_data_element_impls): '\n Create a data element instance from available plugin implementations.\n\n The first implementation that can resolve the URI is what is returned. If no\n implementations can resolve the URL, an ``InvalidUriError`` is raised.\n\n :param uri: URI to try to resolve into a DataElement instance.\n :type uri: str\n\n :param impl_generator: Function that returns a dictionary mapping\n implementation type names to the class type. By default this refers to\n the standard ``get_data_element_impls`` function, however this can be\n changed to refer to a custom set of classes if desired.\n :type impl_generator: () -> dict[str, type]\n\n :raises smqtk.exceptions.InvalidUriError: No data element implementations\n could resolve the given URI.\n\n :return: New data element instance providing access to the data pointed to\n by the input URI.\n :rtype: DataElement\n\n ' log = logging.getLogger(__name__) log.debug("Trying to parse URI: '%s'", uri) de_type_iter = six.itervalues(impl_generator()) inst = None for de_type in de_type_iter: try: inst = de_type.from_uri(uri) except NoUriResolutionError: pass except InvalidUriError as ex: log.debug("Implementation '%s' failed to parse URI: %s", de_type.__name__, ex.reason) if (inst is not None): break if (inst is None): raise InvalidUriError(uri, 'No available implementation to handle URI.') return inst<|docstring|>Create a data element instance from available plugin implementations. The first implementation that can resolve the URI is what is returned. If no implementations can resolve the URL, an ``InvalidUriError`` is raised. :param uri: URI to try to resolve into a DataElement instance. :type uri: str :param impl_generator: Function that returns a dictionary mapping implementation type names to the class type. By default this refers to the standard ``get_data_element_impls`` function, however this can be changed to refer to a custom set of classes if desired. :type impl_generator: () -> dict[str, type] :raises smqtk.exceptions.InvalidUriError: No data element implementations could resolve the given URI. :return: New data element instance providing access to the data pointed to by the input URI. :rtype: DataElement<|endoftext|>
e9c93a8e8229862da58e19dce9f0287b4814a06a2cef39b7cf63edf1a4c2a345
@classmethod def from_uri(cls, uri): '\n Construct a new instance based on the given URI.\n\n This function may not be implemented for all DataElement types.\n\n :param uri: URI string to resolve into an element instance\n :type uri: str\n\n :raises NoUriResolutionError: This element type does not implement URI\n resolution.\n :raises smqtk.exceptions.InvalidUriError: This element type could not\n resolve the provided URI string.\n\n :return: New element instance of our type.\n :rtype: DataElement\n\n ' raise NoUriResolutionError()
Construct a new instance based on the given URI. This function may not be implemented for all DataElement types. :param uri: URI string to resolve into an element instance :type uri: str :raises NoUriResolutionError: This element type does not implement URI resolution. :raises smqtk.exceptions.InvalidUriError: This element type could not resolve the provided URI string. :return: New element instance of our type. :rtype: DataElement
python/smqtk/representation/data_element/__init__.py
from_uri
jbeezley/SMQTK
1
python
@classmethod def from_uri(cls, uri): '\n Construct a new instance based on the given URI.\n\n This function may not be implemented for all DataElement types.\n\n :param uri: URI string to resolve into an element instance\n :type uri: str\n\n :raises NoUriResolutionError: This element type does not implement URI\n resolution.\n :raises smqtk.exceptions.InvalidUriError: This element type could not\n resolve the provided URI string.\n\n :return: New element instance of our type.\n :rtype: DataElement\n\n ' raise NoUriResolutionError()
@classmethod def from_uri(cls, uri): '\n Construct a new instance based on the given URI.\n\n This function may not be implemented for all DataElement types.\n\n :param uri: URI string to resolve into an element instance\n :type uri: str\n\n :raises NoUriResolutionError: This element type does not implement URI\n resolution.\n :raises smqtk.exceptions.InvalidUriError: This element type could not\n resolve the provided URI string.\n\n :return: New element instance of our type.\n :rtype: DataElement\n\n ' raise NoUriResolutionError()<|docstring|>Construct a new instance based on the given URI. This function may not be implemented for all DataElement types. :param uri: URI string to resolve into an element instance :type uri: str :raises NoUriResolutionError: This element type does not implement URI resolution. :raises smqtk.exceptions.InvalidUriError: This element type could not resolve the provided URI string. :return: New element instance of our type. :rtype: DataElement<|endoftext|>
cacee0461d35c9438b13166c00c6dd51df3febdcbc2d6bcecb7645950499666b
def _write_new_temp(self, d): '\n Actually write our bytes to a new temp file\n Always creates new file.\n\n :param d: directory to write temp file in or None to use system default.\n :returns: path to file written\n\n ' if d: file_utils.safe_create_dir(d) ext = MIMETYPES.guess_extension((self.content_type() or '')) if (ext in {'.jpe', '.jfif'}): ext = '.jpg' (fd, fp) = tempfile.mkstemp(suffix=(ext or ''), dir=d) os.close(fd) with open(fp, 'wb') as f: f.write(self.get_bytes()) return fp
Actually write our bytes to a new temp file Always creates new file. :param d: directory to write temp file in or None to use system default. :returns: path to file written
python/smqtk/representation/data_element/__init__.py
_write_new_temp
jbeezley/SMQTK
1
python
def _write_new_temp(self, d): '\n Actually write our bytes to a new temp file\n Always creates new file.\n\n :param d: directory to write temp file in or None to use system default.\n :returns: path to file written\n\n ' if d: file_utils.safe_create_dir(d) ext = MIMETYPES.guess_extension((self.content_type() or )) if (ext in {'.jpe', '.jfif'}): ext = '.jpg' (fd, fp) = tempfile.mkstemp(suffix=(ext or ), dir=d) os.close(fd) with open(fp, 'wb') as f: f.write(self.get_bytes()) return fp
def _write_new_temp(self, d): '\n Actually write our bytes to a new temp file\n Always creates new file.\n\n :param d: directory to write temp file in or None to use system default.\n :returns: path to file written\n\n ' if d: file_utils.safe_create_dir(d) ext = MIMETYPES.guess_extension((self.content_type() or )) if (ext in {'.jpe', '.jfif'}): ext = '.jpg' (fd, fp) = tempfile.mkstemp(suffix=(ext or ), dir=d) os.close(fd) with open(fp, 'wb') as f: f.write(self.get_bytes()) return fp<|docstring|>Actually write our bytes to a new temp file Always creates new file. :param d: directory to write temp file in or None to use system default. :returns: path to file written<|endoftext|>
38a030a70d121d5d146838d673b636f5996d914fb49227b047f8b49fe2043127
def _clear_no_exist(self): "\n Clear paths in temp stack that don't exist on the system.\n " no_exist_paths = deque() for fp in self._temp_filepath_stack: if (not osp.isfile(fp)): no_exist_paths.append(fp) for fp in no_exist_paths: self._temp_filepath_stack.remove(fp)
Clear paths in temp stack that don't exist on the system.
python/smqtk/representation/data_element/__init__.py
_clear_no_exist
jbeezley/SMQTK
1
python
def _clear_no_exist(self): "\n \n " no_exist_paths = deque() for fp in self._temp_filepath_stack: if (not osp.isfile(fp)): no_exist_paths.append(fp) for fp in no_exist_paths: self._temp_filepath_stack.remove(fp)
def _clear_no_exist(self): "\n \n " no_exist_paths = deque() for fp in self._temp_filepath_stack: if (not osp.isfile(fp)): no_exist_paths.append(fp) for fp in no_exist_paths: self._temp_filepath_stack.remove(fp)<|docstring|>Clear paths in temp stack that don't exist on the system.<|endoftext|>
7aa6aefa0f2e0074287f0823e9d78631427d587db56440517a936a3a0b2c73ed
def md5(self): "\n Get the MD5 checksum of this element's binary content.\n\n :return: MD5 hex checksum of the data content.\n :rtype: str\n " return hashlib.md5(self.get_bytes()).hexdigest()
Get the MD5 checksum of this element's binary content. :return: MD5 hex checksum of the data content. :rtype: str
python/smqtk/representation/data_element/__init__.py
md5
jbeezley/SMQTK
1
python
def md5(self): "\n Get the MD5 checksum of this element's binary content.\n\n :return: MD5 hex checksum of the data content.\n :rtype: str\n " return hashlib.md5(self.get_bytes()).hexdigest()
def md5(self): "\n Get the MD5 checksum of this element's binary content.\n\n :return: MD5 hex checksum of the data content.\n :rtype: str\n " return hashlib.md5(self.get_bytes()).hexdigest()<|docstring|>Get the MD5 checksum of this element's binary content. :return: MD5 hex checksum of the data content. :rtype: str<|endoftext|>
2d6aeafeb571856eb701e73bac665227c86f6d48cdc89c2a98eced9307d073de
def sha1(self): "\n Get the SHA1 checksum of this element's binary content.\n\n :return: SHA1 hex checksum of the data content.\n :rtype: str\n " return hashlib.sha1(self.get_bytes()).hexdigest()
Get the SHA1 checksum of this element's binary content. :return: SHA1 hex checksum of the data content. :rtype: str
python/smqtk/representation/data_element/__init__.py
sha1
jbeezley/SMQTK
1
python
def sha1(self): "\n Get the SHA1 checksum of this element's binary content.\n\n :return: SHA1 hex checksum of the data content.\n :rtype: str\n " return hashlib.sha1(self.get_bytes()).hexdigest()
def sha1(self): "\n Get the SHA1 checksum of this element's binary content.\n\n :return: SHA1 hex checksum of the data content.\n :rtype: str\n " return hashlib.sha1(self.get_bytes()).hexdigest()<|docstring|>Get the SHA1 checksum of this element's binary content. :return: SHA1 hex checksum of the data content. :rtype: str<|endoftext|>
c134778b9a3c42b1a3327a346d04545fc6f5ff257a47cfe58ebbb2fd4502b416
def sha512(self): "\n Get the SHA512 checksum of this element's binary content.\n\n :return: SHA512 hex checksum of the data content.\n :rtype: str\n " return hashlib.sha512(self.get_bytes()).hexdigest()
Get the SHA512 checksum of this element's binary content. :return: SHA512 hex checksum of the data content. :rtype: str
python/smqtk/representation/data_element/__init__.py
sha512
jbeezley/SMQTK
1
python
def sha512(self): "\n Get the SHA512 checksum of this element's binary content.\n\n :return: SHA512 hex checksum of the data content.\n :rtype: str\n " return hashlib.sha512(self.get_bytes()).hexdigest()
def sha512(self): "\n Get the SHA512 checksum of this element's binary content.\n\n :return: SHA512 hex checksum of the data content.\n :rtype: str\n " return hashlib.sha512(self.get_bytes()).hexdigest()<|docstring|>Get the SHA512 checksum of this element's binary content. :return: SHA512 hex checksum of the data content. :rtype: str<|endoftext|>
79a9c3acd440794cdff6c01da2c858ee71cb0645f4a5719b0a1a7da792054710
def write_temp(self, temp_dir=None): "\n Write this data's bytes to a temporary file on disk, returning the path\n to the written file, whose extension is guessed based on this data's\n content type.\n\n It is not guaranteed that the returned file path does not point to the\n original data, i.e. writing to the returned filepath may modify the\n original data.\n\n NOTE:\n The file path returned should not be explicitly removed by the user.\n Instead, the ``clean_temp()`` method should be called on this\n object.\n\n :param temp_dir: Optional directory to write temporary file in,\n otherwise we use the platform default temporary files directory.\n If this is an empty string, we count it the same as having provided\n None.\n :type temp_dir: None or str\n\n :return: Path to the temporary file\n :rtype: str\n\n " self._clear_no_exist() if temp_dir: abs_temp_dir = osp.abspath(osp.expanduser(temp_dir)) for tf in self._temp_filepath_stack: if (osp.dirname(tf) == abs_temp_dir): return tf self._temp_filepath_stack.append(self._write_new_temp(temp_dir)) elif (not self._temp_filepath_stack): self._temp_filepath_stack.append(self._write_new_temp(None)) return self._temp_filepath_stack[(- 1)]
Write this data's bytes to a temporary file on disk, returning the path to the written file, whose extension is guessed based on this data's content type. It is not guaranteed that the returned file path does not point to the original data, i.e. writing to the returned filepath may modify the original data. NOTE: The file path returned should not be explicitly removed by the user. Instead, the ``clean_temp()`` method should be called on this object. :param temp_dir: Optional directory to write temporary file in, otherwise we use the platform default temporary files directory. If this is an empty string, we count it the same as having provided None. :type temp_dir: None or str :return: Path to the temporary file :rtype: str
python/smqtk/representation/data_element/__init__.py
write_temp
jbeezley/SMQTK
1
python
def write_temp(self, temp_dir=None): "\n Write this data's bytes to a temporary file on disk, returning the path\n to the written file, whose extension is guessed based on this data's\n content type.\n\n It is not guaranteed that the returned file path does not point to the\n original data, i.e. writing to the returned filepath may modify the\n original data.\n\n NOTE:\n The file path returned should not be explicitly removed by the user.\n Instead, the ``clean_temp()`` method should be called on this\n object.\n\n :param temp_dir: Optional directory to write temporary file in,\n otherwise we use the platform default temporary files directory.\n If this is an empty string, we count it the same as having provided\n None.\n :type temp_dir: None or str\n\n :return: Path to the temporary file\n :rtype: str\n\n " self._clear_no_exist() if temp_dir: abs_temp_dir = osp.abspath(osp.expanduser(temp_dir)) for tf in self._temp_filepath_stack: if (osp.dirname(tf) == abs_temp_dir): return tf self._temp_filepath_stack.append(self._write_new_temp(temp_dir)) elif (not self._temp_filepath_stack): self._temp_filepath_stack.append(self._write_new_temp(None)) return self._temp_filepath_stack[(- 1)]
def write_temp(self, temp_dir=None): "\n Write this data's bytes to a temporary file on disk, returning the path\n to the written file, whose extension is guessed based on this data's\n content type.\n\n It is not guaranteed that the returned file path does not point to the\n original data, i.e. writing to the returned filepath may modify the\n original data.\n\n NOTE:\n The file path returned should not be explicitly removed by the user.\n Instead, the ``clean_temp()`` method should be called on this\n object.\n\n :param temp_dir: Optional directory to write temporary file in,\n otherwise we use the platform default temporary files directory.\n If this is an empty string, we count it the same as having provided\n None.\n :type temp_dir: None or str\n\n :return: Path to the temporary file\n :rtype: str\n\n " self._clear_no_exist() if temp_dir: abs_temp_dir = osp.abspath(osp.expanduser(temp_dir)) for tf in self._temp_filepath_stack: if (osp.dirname(tf) == abs_temp_dir): return tf self._temp_filepath_stack.append(self._write_new_temp(temp_dir)) elif (not self._temp_filepath_stack): self._temp_filepath_stack.append(self._write_new_temp(None)) return self._temp_filepath_stack[(- 1)]<|docstring|>Write this data's bytes to a temporary file on disk, returning the path to the written file, whose extension is guessed based on this data's content type. It is not guaranteed that the returned file path does not point to the original data, i.e. writing to the returned filepath may modify the original data. NOTE: The file path returned should not be explicitly removed by the user. Instead, the ``clean_temp()`` method should be called on this object. :param temp_dir: Optional directory to write temporary file in, otherwise we use the platform default temporary files directory. If this is an empty string, we count it the same as having provided None. :type temp_dir: None or str :return: Path to the temporary file :rtype: str<|endoftext|>
190e0efc39fb31660e541b3d388131abb92b6fdc0bb8a96e1b6d8cab414810d8
def clean_temp(self): '\n Clean any temporary files created by this element. This does nothing if\n no temporary files have been generated for this element yet.\n ' if len(self._temp_filepath_stack): for fp in self._temp_filepath_stack: if os.path.isfile(fp): os.remove(fp) self._temp_filepath_stack = []
Clean any temporary files created by this element. This does nothing if no temporary files have been generated for this element yet.
python/smqtk/representation/data_element/__init__.py
clean_temp
jbeezley/SMQTK
1
python
def clean_temp(self): '\n Clean any temporary files created by this element. This does nothing if\n no temporary files have been generated for this element yet.\n ' if len(self._temp_filepath_stack): for fp in self._temp_filepath_stack: if os.path.isfile(fp): os.remove(fp) self._temp_filepath_stack = []
def clean_temp(self): '\n Clean any temporary files created by this element. This does nothing if\n no temporary files have been generated for this element yet.\n ' if len(self._temp_filepath_stack): for fp in self._temp_filepath_stack: if os.path.isfile(fp): os.remove(fp) self._temp_filepath_stack = []<|docstring|>Clean any temporary files created by this element. This does nothing if no temporary files have been generated for this element yet.<|endoftext|>
aa6d757b53d5b6d530f1ab54052e4d65aacb165ce7b6a7403b6e701444462faa
def uuid(self): "\n UUID for this data element.\n\n This many take different forms from integers to strings to a uuid.UUID\n instance. This must return a hashable data type.\n\n By default, this ends up being the hex stringification of the SHA1 hash\n of this data's bytes. Specific implementations may provide other UUIDs,\n however.\n\n :return: UUID value for this data element. This return value should be\n hashable.\n :rtype: collections.Hashable\n\n " return self.sha1()
UUID for this data element. This many take different forms from integers to strings to a uuid.UUID instance. This must return a hashable data type. By default, this ends up being the hex stringification of the SHA1 hash of this data's bytes. Specific implementations may provide other UUIDs, however. :return: UUID value for this data element. This return value should be hashable. :rtype: collections.Hashable
python/smqtk/representation/data_element/__init__.py
uuid
jbeezley/SMQTK
1
python
def uuid(self): "\n UUID for this data element.\n\n This many take different forms from integers to strings to a uuid.UUID\n instance. This must return a hashable data type.\n\n By default, this ends up being the hex stringification of the SHA1 hash\n of this data's bytes. Specific implementations may provide other UUIDs,\n however.\n\n :return: UUID value for this data element. This return value should be\n hashable.\n :rtype: collections.Hashable\n\n " return self.sha1()
def uuid(self): "\n UUID for this data element.\n\n This many take different forms from integers to strings to a uuid.UUID\n instance. This must return a hashable data type.\n\n By default, this ends up being the hex stringification of the SHA1 hash\n of this data's bytes. Specific implementations may provide other UUIDs,\n however.\n\n :return: UUID value for this data element. This return value should be\n hashable.\n :rtype: collections.Hashable\n\n " return self.sha1()<|docstring|>UUID for this data element. This many take different forms from integers to strings to a uuid.UUID instance. This must return a hashable data type. By default, this ends up being the hex stringification of the SHA1 hash of this data's bytes. Specific implementations may provide other UUIDs, however. :return: UUID value for this data element. This return value should be hashable. :rtype: collections.Hashable<|endoftext|>
c39f3a54199a58a99258644d47ad1fb12381418e79814aafc6f7273912f93977
def to_buffered_reader(self): "\n Wrap this element's bytes in a ``io.BufferedReader`` instance for use as\n file-like object for reading.\n\n As we use the ``get_bytes`` function, this element's bytes must safely\n fit in memory for this method to be usable.\n\n :return: New BufferedReader instance\n :rtype: io.BufferedReader\n\n " return io.BufferedReader(io.BytesIO(self.get_bytes()))
Wrap this element's bytes in a ``io.BufferedReader`` instance for use as file-like object for reading. As we use the ``get_bytes`` function, this element's bytes must safely fit in memory for this method to be usable. :return: New BufferedReader instance :rtype: io.BufferedReader
python/smqtk/representation/data_element/__init__.py
to_buffered_reader
jbeezley/SMQTK
1
python
def to_buffered_reader(self): "\n Wrap this element's bytes in a ``io.BufferedReader`` instance for use as\n file-like object for reading.\n\n As we use the ``get_bytes`` function, this element's bytes must safely\n fit in memory for this method to be usable.\n\n :return: New BufferedReader instance\n :rtype: io.BufferedReader\n\n " return io.BufferedReader(io.BytesIO(self.get_bytes()))
def to_buffered_reader(self): "\n Wrap this element's bytes in a ``io.BufferedReader`` instance for use as\n file-like object for reading.\n\n As we use the ``get_bytes`` function, this element's bytes must safely\n fit in memory for this method to be usable.\n\n :return: New BufferedReader instance\n :rtype: io.BufferedReader\n\n " return io.BufferedReader(io.BytesIO(self.get_bytes()))<|docstring|>Wrap this element's bytes in a ``io.BufferedReader`` instance for use as file-like object for reading. As we use the ``get_bytes`` function, this element's bytes must safely fit in memory for this method to be usable. :return: New BufferedReader instance :rtype: io.BufferedReader<|endoftext|>
92df7dce4909d01fd1a7b5f34359d34f82115992ea9be0719290f77ec043f360
def is_read_only(self): '\n :return: If this element can only be read from.\n :rtype: bool\n ' return (not self.writable())
:return: If this element can only be read from. :rtype: bool
python/smqtk/representation/data_element/__init__.py
is_read_only
jbeezley/SMQTK
1
python
def is_read_only(self): '\n :return: If this element can only be read from.\n :rtype: bool\n ' return (not self.writable())
def is_read_only(self): '\n :return: If this element can only be read from.\n :rtype: bool\n ' return (not self.writable())<|docstring|>:return: If this element can only be read from. :rtype: bool<|endoftext|>
29eff2eb1db49def565062c584d5d9f7012df757f65b685546dbecbbd43fd69a
@abc.abstractmethod def content_type(self): '\n :return: Standard type/subtype string for this data element, or None if\n the content type is unknown.\n :rtype: str or None\n '
:return: Standard type/subtype string for this data element, or None if the content type is unknown. :rtype: str or None
python/smqtk/representation/data_element/__init__.py
content_type
jbeezley/SMQTK
1
python
@abc.abstractmethod def content_type(self): '\n :return: Standard type/subtype string for this data element, or None if\n the content type is unknown.\n :rtype: str or None\n '
@abc.abstractmethod def content_type(self): '\n :return: Standard type/subtype string for this data element, or None if\n the content type is unknown.\n :rtype: str or None\n '<|docstring|>:return: Standard type/subtype string for this data element, or None if the content type is unknown. :rtype: str or None<|endoftext|>
4e5e51cf90d71c8e9d5b6b7a3d7c5bc75ec25547a7faf8c5f5d9049cab8ae76e
@abc.abstractmethod def is_empty(self): '\n Check if this element contains no bytes.\n\n The intend of this method is to quickly check if there is any data\n behind this element, ideally without having to read all/any of the\n underlying data.\n\n :return: If this element contains 0 bytes.\n :rtype: bool\n\n '
Check if this element contains no bytes. The intend of this method is to quickly check if there is any data behind this element, ideally without having to read all/any of the underlying data. :return: If this element contains 0 bytes. :rtype: bool
python/smqtk/representation/data_element/__init__.py
is_empty
jbeezley/SMQTK
1
python
@abc.abstractmethod def is_empty(self): '\n Check if this element contains no bytes.\n\n The intend of this method is to quickly check if there is any data\n behind this element, ideally without having to read all/any of the\n underlying data.\n\n :return: If this element contains 0 bytes.\n :rtype: bool\n\n '
@abc.abstractmethod def is_empty(self): '\n Check if this element contains no bytes.\n\n The intend of this method is to quickly check if there is any data\n behind this element, ideally without having to read all/any of the\n underlying data.\n\n :return: If this element contains 0 bytes.\n :rtype: bool\n\n '<|docstring|>Check if this element contains no bytes. The intend of this method is to quickly check if there is any data behind this element, ideally without having to read all/any of the underlying data. :return: If this element contains 0 bytes. :rtype: bool<|endoftext|>
1ef59d911695f76ec92d9cffdf152b5e20d604dc7063f42e5d6051ede22189fa
@abc.abstractmethod def get_bytes(self): '\n :return: Get the bytes for this data element.\n :rtype: bytes\n '
:return: Get the bytes for this data element. :rtype: bytes
python/smqtk/representation/data_element/__init__.py
get_bytes
jbeezley/SMQTK
1
python
@abc.abstractmethod def get_bytes(self): '\n :return: Get the bytes for this data element.\n :rtype: bytes\n '
@abc.abstractmethod def get_bytes(self): '\n :return: Get the bytes for this data element.\n :rtype: bytes\n '<|docstring|>:return: Get the bytes for this data element. :rtype: bytes<|endoftext|>
aa89b96096f48199bb07298eaf33ad177a03b3d41debb62ccbe264fb7b66887e
@abc.abstractmethod def writable(self): '\n :return: if this instance supports setting bytes.\n :rtype: bool\n '
:return: if this instance supports setting bytes. :rtype: bool
python/smqtk/representation/data_element/__init__.py
writable
jbeezley/SMQTK
1
python
@abc.abstractmethod def writable(self): '\n :return: if this instance supports setting bytes.\n :rtype: bool\n '
@abc.abstractmethod def writable(self): '\n :return: if this instance supports setting bytes.\n :rtype: bool\n '<|docstring|>:return: if this instance supports setting bytes. :rtype: bool<|endoftext|>
467ff0f77f61707f26a31aa124b9b84fb5a915a9e4a14c75f1f09fdd13f130fe
@abc.abstractmethod def set_bytes(self, b): '\n Set bytes to this data element.\n\n Not all implementations may support setting bytes (check ``writable``\n method return).\n\n This base abstract method should be called by sub-class implementations\n first. We check for mutability based on ``writable()`` method return and\n invalidate checksum caches.\n\n :param b: bytes to set.\n :type b: str\n\n :raises ReadOnlyError: This data element can only be read from / does\n not support writing.\n\n ' if (not self.writable()): raise ReadOnlyError(('This %s element is read only.' % self))
Set bytes to this data element. Not all implementations may support setting bytes (check ``writable`` method return). This base abstract method should be called by sub-class implementations first. We check for mutability based on ``writable()`` method return and invalidate checksum caches. :param b: bytes to set. :type b: str :raises ReadOnlyError: This data element can only be read from / does not support writing.
python/smqtk/representation/data_element/__init__.py
set_bytes
jbeezley/SMQTK
1
python
@abc.abstractmethod def set_bytes(self, b): '\n Set bytes to this data element.\n\n Not all implementations may support setting bytes (check ``writable``\n method return).\n\n This base abstract method should be called by sub-class implementations\n first. We check for mutability based on ``writable()`` method return and\n invalidate checksum caches.\n\n :param b: bytes to set.\n :type b: str\n\n :raises ReadOnlyError: This data element can only be read from / does\n not support writing.\n\n ' if (not self.writable()): raise ReadOnlyError(('This %s element is read only.' % self))
@abc.abstractmethod def set_bytes(self, b): '\n Set bytes to this data element.\n\n Not all implementations may support setting bytes (check ``writable``\n method return).\n\n This base abstract method should be called by sub-class implementations\n first. We check for mutability based on ``writable()`` method return and\n invalidate checksum caches.\n\n :param b: bytes to set.\n :type b: str\n\n :raises ReadOnlyError: This data element can only be read from / does\n not support writing.\n\n ' if (not self.writable()): raise ReadOnlyError(('This %s element is read only.' % self))<|docstring|>Set bytes to this data element. Not all implementations may support setting bytes (check ``writable`` method return). This base abstract method should be called by sub-class implementations first. We check for mutability based on ``writable()`` method return and invalidate checksum caches. :param b: bytes to set. :type b: str :raises ReadOnlyError: This data element can only be read from / does not support writing.<|endoftext|>
64344056a62ba9f59ace1d50b83e6b20c11ee44689ddbfa3874284b7274eb18c
def DB_loss_function(estimator, X, y_true=None): '\n Computes Davis-Bolding Index for a fitted KMeans\n\n args:\n model: Fitted KMeans object\n x: input data for evaluation \n ' preds = estimator.predict(X) n_clusters = (int(np.max(preds)) + 1) db_values = [] for i in range(n_clusters): for j in range((i + 1), n_clusters): cluster_i = X[(preds == i)] cluster_j = X[(preds == j)] avg_cluster_i = (((2 * np.sum(euclidean_distances(cluster_i, cluster_i))) / len(cluster_i)) * (len(cluster_i) - 1)) avg_cluster_j = (((2 * np.sum(euclidean_distances(cluster_j, cluster_j))) / len(cluster_j)) * (len(cluster_j) - 1)) u_cluster_i = (np.sum(cluster_i, axis=0) / len(cluster_i)) u_cluster_j = (np.sum(cluster_j, axis=0) / len(cluster_j)) db = ((avg_cluster_i + avg_cluster_j) / np.sum(euclidean_distances(u_cluster_i.reshape((- 1), 1), u_cluster_j.reshape((- 1), 1)))) db_values.append(db) dbi = (np.sum(np.array(db_values)) / n_clusters) return dbi
Computes Davis-Bolding Index for a fitted KMeans args: model: Fitted KMeans object x: input data for evaluation
HW02/HW02.py
DB_loss_function
iust-projects/Data-Mining-IUST
0
python
def DB_loss_function(estimator, X, y_true=None): '\n Computes Davis-Bolding Index for a fitted KMeans\n\n args:\n model: Fitted KMeans object\n x: input data for evaluation \n ' preds = estimator.predict(X) n_clusters = (int(np.max(preds)) + 1) db_values = [] for i in range(n_clusters): for j in range((i + 1), n_clusters): cluster_i = X[(preds == i)] cluster_j = X[(preds == j)] avg_cluster_i = (((2 * np.sum(euclidean_distances(cluster_i, cluster_i))) / len(cluster_i)) * (len(cluster_i) - 1)) avg_cluster_j = (((2 * np.sum(euclidean_distances(cluster_j, cluster_j))) / len(cluster_j)) * (len(cluster_j) - 1)) u_cluster_i = (np.sum(cluster_i, axis=0) / len(cluster_i)) u_cluster_j = (np.sum(cluster_j, axis=0) / len(cluster_j)) db = ((avg_cluster_i + avg_cluster_j) / np.sum(euclidean_distances(u_cluster_i.reshape((- 1), 1), u_cluster_j.reshape((- 1), 1)))) db_values.append(db) dbi = (np.sum(np.array(db_values)) / n_clusters) return dbi
def DB_loss_function(estimator, X, y_true=None): '\n Computes Davis-Bolding Index for a fitted KMeans\n\n args:\n model: Fitted KMeans object\n x: input data for evaluation \n ' preds = estimator.predict(X) n_clusters = (int(np.max(preds)) + 1) db_values = [] for i in range(n_clusters): for j in range((i + 1), n_clusters): cluster_i = X[(preds == i)] cluster_j = X[(preds == j)] avg_cluster_i = (((2 * np.sum(euclidean_distances(cluster_i, cluster_i))) / len(cluster_i)) * (len(cluster_i) - 1)) avg_cluster_j = (((2 * np.sum(euclidean_distances(cluster_j, cluster_j))) / len(cluster_j)) * (len(cluster_j) - 1)) u_cluster_i = (np.sum(cluster_i, axis=0) / len(cluster_i)) u_cluster_j = (np.sum(cluster_j, axis=0) / len(cluster_j)) db = ((avg_cluster_i + avg_cluster_j) / np.sum(euclidean_distances(u_cluster_i.reshape((- 1), 1), u_cluster_j.reshape((- 1), 1)))) db_values.append(db) dbi = (np.sum(np.array(db_values)) / n_clusters) return dbi<|docstring|>Computes Davis-Bolding Index for a fitted KMeans args: model: Fitted KMeans object x: input data for evaluation<|endoftext|>
a5e1a3c2192782c301313653808ede9732dec11e9f0737d07a9648ad7a1dca3e
def __init__(self, min_samples, eps): '\n Constructs DBSCAN given parameters of neighborhood\n\n :param min_samples: Minimum samples within eps radius to be consider as a core point\n :param eps: Radius of core point\n ' self.min_samples = min_samples self.eps = eps self.labels = None self.core_points = None
Constructs DBSCAN given parameters of neighborhood :param min_samples: Minimum samples within eps radius to be consider as a core point :param eps: Radius of core point
HW02/HW02.py
__init__
iust-projects/Data-Mining-IUST
0
python
def __init__(self, min_samples, eps): '\n Constructs DBSCAN given parameters of neighborhood\n\n :param min_samples: Minimum samples within eps radius to be consider as a core point\n :param eps: Radius of core point\n ' self.min_samples = min_samples self.eps = eps self.labels = None self.core_points = None
def __init__(self, min_samples, eps): '\n Constructs DBSCAN given parameters of neighborhood\n\n :param min_samples: Minimum samples within eps radius to be consider as a core point\n :param eps: Radius of core point\n ' self.min_samples = min_samples self.eps = eps self.labels = None self.core_points = None<|docstring|>Constructs DBSCAN given parameters of neighborhood :param min_samples: Minimum samples within eps radius to be consider as a core point :param eps: Radius of core point<|endoftext|>
d5888a2012962e1bb80605ec07bf96a4e049987931e3a65f4251848ab50d5969
def fit_predict(self, x, *args, **kwargs): '\n Fits the data using DBSCAN and returns labels and core points\n Order of data matter!\n\n Algorithm:\n 1. Consider a list of points that have not been seen yet\n 2. Read an arbitrary point until there is no unseen point left\n 3. If there are at least ``min_samples`` points within a radius of ``eps``\n then all these points are from same cluster\n 4. Expand this cluster for its all core points for all neighbors\n 5. Repeat\n\n :param x: N-dimensional numpy array\n\n :return: A tuple of labels of each point and index of core points\n where label=-1 corresponds to noise data and label=N N>=1 demonstrates cluster label\n ' self.labels = np.zeros((len(x),)) self.core_points = np.zeros((len(x),)) current_cluster = 1 for pnt in range(len(x)): if (self.labels[pnt] == 0): neighbor_indices = self.__nearest_neighbors(x, x[pnt]) if (len(neighbor_indices) >= self.min_samples): self.__expand(x, pnt, current_cluster) current_cluster += 1 else: self.labels[pnt] = (- 1) return (self.labels, self.core_points)
Fits the data using DBSCAN and returns labels and core points Order of data matter! Algorithm: 1. Consider a list of points that have not been seen yet 2. Read an arbitrary point until there is no unseen point left 3. If there are at least ``min_samples`` points within a radius of ``eps`` then all these points are from same cluster 4. Expand this cluster for its all core points for all neighbors 5. Repeat :param x: N-dimensional numpy array :return: A tuple of labels of each point and index of core points where label=-1 corresponds to noise data and label=N N>=1 demonstrates cluster label
HW02/HW02.py
fit_predict
iust-projects/Data-Mining-IUST
0
python
def fit_predict(self, x, *args, **kwargs): '\n Fits the data using DBSCAN and returns labels and core points\n Order of data matter!\n\n Algorithm:\n 1. Consider a list of points that have not been seen yet\n 2. Read an arbitrary point until there is no unseen point left\n 3. If there are at least ``min_samples`` points within a radius of ``eps``\n then all these points are from same cluster\n 4. Expand this cluster for its all core points for all neighbors\n 5. Repeat\n\n :param x: N-dimensional numpy array\n\n :return: A tuple of labels of each point and index of core points\n where label=-1 corresponds to noise data and label=N N>=1 demonstrates cluster label\n ' self.labels = np.zeros((len(x),)) self.core_points = np.zeros((len(x),)) current_cluster = 1 for pnt in range(len(x)): if (self.labels[pnt] == 0): neighbor_indices = self.__nearest_neighbors(x, x[pnt]) if (len(neighbor_indices) >= self.min_samples): self.__expand(x, pnt, current_cluster) current_cluster += 1 else: self.labels[pnt] = (- 1) return (self.labels, self.core_points)
def fit_predict(self, x, *args, **kwargs): '\n Fits the data using DBSCAN and returns labels and core points\n Order of data matter!\n\n Algorithm:\n 1. Consider a list of points that have not been seen yet\n 2. Read an arbitrary point until there is no unseen point left\n 3. If there are at least ``min_samples`` points within a radius of ``eps``\n then all these points are from same cluster\n 4. Expand this cluster for its all core points for all neighbors\n 5. Repeat\n\n :param x: N-dimensional numpy array\n\n :return: A tuple of labels of each point and index of core points\n where label=-1 corresponds to noise data and label=N N>=1 demonstrates cluster label\n ' self.labels = np.zeros((len(x),)) self.core_points = np.zeros((len(x),)) current_cluster = 1 for pnt in range(len(x)): if (self.labels[pnt] == 0): neighbor_indices = self.__nearest_neighbors(x, x[pnt]) if (len(neighbor_indices) >= self.min_samples): self.__expand(x, pnt, current_cluster) current_cluster += 1 else: self.labels[pnt] = (- 1) return (self.labels, self.core_points)<|docstring|>Fits the data using DBSCAN and returns labels and core points Order of data matter! Algorithm: 1. Consider a list of points that have not been seen yet 2. Read an arbitrary point until there is no unseen point left 3. If there are at least ``min_samples`` points within a radius of ``eps`` then all these points are from same cluster 4. Expand this cluster for its all core points for all neighbors 5. Repeat :param x: N-dimensional numpy array :return: A tuple of labels of each point and index of core points where label=-1 corresponds to noise data and label=N N>=1 demonstrates cluster label<|endoftext|>
d1d1570ca0ae2de329fa09d4df4a21773bfc9a31f9c7383481bf17fcac9b3bf2
def __nearest_neighbors(self, data, point): '\n Finds points near to the point ``point`` within the range of ``eps``\n\n :param point: A point\n :param: All points\n\n :return: Indices of nearest neighbor points\n ' distances = euclidean_distances(data, point.reshape(1, (- 1))) neighbors = (distances <= self.eps) topk = np.argsort(distances, axis=0) neighbors_idx = (np.max(neighbors[topk].nonzero()[0]) + 1) return topk[:neighbors_idx].flatten()
Finds points near to the point ``point`` within the range of ``eps`` :param point: A point :param: All points :return: Indices of nearest neighbor points
HW02/HW02.py
__nearest_neighbors
iust-projects/Data-Mining-IUST
0
python
def __nearest_neighbors(self, data, point): '\n Finds points near to the point ``point`` within the range of ``eps``\n\n :param point: A point\n :param: All points\n\n :return: Indices of nearest neighbor points\n ' distances = euclidean_distances(data, point.reshape(1, (- 1))) neighbors = (distances <= self.eps) topk = np.argsort(distances, axis=0) neighbors_idx = (np.max(neighbors[topk].nonzero()[0]) + 1) return topk[:neighbors_idx].flatten()
def __nearest_neighbors(self, data, point): '\n Finds points near to the point ``point`` within the range of ``eps``\n\n :param point: A point\n :param: All points\n\n :return: Indices of nearest neighbor points\n ' distances = euclidean_distances(data, point.reshape(1, (- 1))) neighbors = (distances <= self.eps) topk = np.argsort(distances, axis=0) neighbors_idx = (np.max(neighbors[topk].nonzero()[0]) + 1) return topk[:neighbors_idx].flatten()<|docstring|>Finds points near to the point ``point`` within the range of ``eps`` :param point: A point :param: All points :return: Indices of nearest neighbor points<|endoftext|>
b4b4a98dbab9b3e16c156528af444146dd24a096548fd1c95d70a2bbabb663ec
def __expand(self, data, point_idx, current_cluster): '\n Expands ``current_cluster`` using given point w.r.t. ``eps`` and ``min_samples``\n Algorithm:\n 1. Get a point as the start point for ``current_cluster``\n 2. Get its neighbors and go through them one by one using queue logic\n 3. If the neighbor is noise, then add it to the current cluster, if it is unseen, get all its neighbors\n then add them to the list of neighbors of original point\n 4. Repeat step 2 and 3 until all points in the list of neighbors are processed.\n\n :param data: Whole data to be clustered\n :param point_idx: The index of a point of the current cluster as the start point for expansion\n :param current_cluster: The label of current cluster\n :return: None\n ' self.labels[point_idx] = current_cluster neighbors_indices = deepcopy(self.__nearest_neighbors(data, data[point_idx])) while (len(neighbors_indices) > 0): neighbor_point = neighbors_indices[0] neighbors_indices = np.delete(neighbors_indices, 0, 0) if (self.labels[neighbor_point] == (- 1)): self.labels[neighbor_point] = current_cluster elif (self.labels[neighbor_point] == 0): self.labels[neighbor_point] = current_cluster neighbors_indices_neighbor_point = self.__nearest_neighbors(data, data[neighbor_point]) if (len(neighbors_indices_neighbor_point) >= self.min_samples): neighbors_indices = np.concatenate((neighbors_indices, neighbors_indices_neighbor_point)) self.core_points[neighbor_point] = 1
Expands ``current_cluster`` using given point w.r.t. ``eps`` and ``min_samples`` Algorithm: 1. Get a point as the start point for ``current_cluster`` 2. Get its neighbors and go through them one by one using queue logic 3. If the neighbor is noise, then add it to the current cluster, if it is unseen, get all its neighbors then add them to the list of neighbors of original point 4. Repeat step 2 and 3 until all points in the list of neighbors are processed. :param data: Whole data to be clustered :param point_idx: The index of a point of the current cluster as the start point for expansion :param current_cluster: The label of current cluster :return: None
HW02/HW02.py
__expand
iust-projects/Data-Mining-IUST
0
python
def __expand(self, data, point_idx, current_cluster): '\n Expands ``current_cluster`` using given point w.r.t. ``eps`` and ``min_samples``\n Algorithm:\n 1. Get a point as the start point for ``current_cluster``\n 2. Get its neighbors and go through them one by one using queue logic\n 3. If the neighbor is noise, then add it to the current cluster, if it is unseen, get all its neighbors\n then add them to the list of neighbors of original point\n 4. Repeat step 2 and 3 until all points in the list of neighbors are processed.\n\n :param data: Whole data to be clustered\n :param point_idx: The index of a point of the current cluster as the start point for expansion\n :param current_cluster: The label of current cluster\n :return: None\n ' self.labels[point_idx] = current_cluster neighbors_indices = deepcopy(self.__nearest_neighbors(data, data[point_idx])) while (len(neighbors_indices) > 0): neighbor_point = neighbors_indices[0] neighbors_indices = np.delete(neighbors_indices, 0, 0) if (self.labels[neighbor_point] == (- 1)): self.labels[neighbor_point] = current_cluster elif (self.labels[neighbor_point] == 0): self.labels[neighbor_point] = current_cluster neighbors_indices_neighbor_point = self.__nearest_neighbors(data, data[neighbor_point]) if (len(neighbors_indices_neighbor_point) >= self.min_samples): neighbors_indices = np.concatenate((neighbors_indices, neighbors_indices_neighbor_point)) self.core_points[neighbor_point] = 1
def __expand(self, data, point_idx, current_cluster): '\n Expands ``current_cluster`` using given point w.r.t. ``eps`` and ``min_samples``\n Algorithm:\n 1. Get a point as the start point for ``current_cluster``\n 2. Get its neighbors and go through them one by one using queue logic\n 3. If the neighbor is noise, then add it to the current cluster, if it is unseen, get all its neighbors\n then add them to the list of neighbors of original point\n 4. Repeat step 2 and 3 until all points in the list of neighbors are processed.\n\n :param data: Whole data to be clustered\n :param point_idx: The index of a point of the current cluster as the start point for expansion\n :param current_cluster: The label of current cluster\n :return: None\n ' self.labels[point_idx] = current_cluster neighbors_indices = deepcopy(self.__nearest_neighbors(data, data[point_idx])) while (len(neighbors_indices) > 0): neighbor_point = neighbors_indices[0] neighbors_indices = np.delete(neighbors_indices, 0, 0) if (self.labels[neighbor_point] == (- 1)): self.labels[neighbor_point] = current_cluster elif (self.labels[neighbor_point] == 0): self.labels[neighbor_point] = current_cluster neighbors_indices_neighbor_point = self.__nearest_neighbors(data, data[neighbor_point]) if (len(neighbors_indices_neighbor_point) >= self.min_samples): neighbors_indices = np.concatenate((neighbors_indices, neighbors_indices_neighbor_point)) self.core_points[neighbor_point] = 1<|docstring|>Expands ``current_cluster`` using given point w.r.t. ``eps`` and ``min_samples`` Algorithm: 1. Get a point as the start point for ``current_cluster`` 2. Get its neighbors and go through them one by one using queue logic 3. If the neighbor is noise, then add it to the current cluster, if it is unseen, get all its neighbors then add them to the list of neighbors of original point 4. Repeat step 2 and 3 until all points in the list of neighbors are processed. :param data: Whole data to be clustered :param point_idx: The index of a point of the current cluster as the start point for expansion :param current_cluster: The label of current cluster :return: None<|endoftext|>
068d6fd4d4c026d8c6f0544da21de53bcdfe2441989db66cb370ad87e34e63f1
def __init__(self, amqp_client, zookeeper_client, db_conn, args, dm_logger): 'Initialize ZooKeeper, RabbitMQ, Sandesh, DB conn etc.' DeviceJobManager._instance = self self._amqp_client = amqp_client self._zookeeper_client = zookeeper_client self._db_conn = db_conn self._args = args self._job_mgr_statistics = {'max_job_count': self._args.max_job_count, 'running_job_count': 0} self.job_status = {} self._job_mgr_running_instances = {} job_args = {'collectors': self._args.collectors, 'fabric_ansible_conf_file': self._args.fabric_ansible_conf_file, 'host_ip': self._args.host_ip, 'zk_server_ip': self._args.zk_server_ip, 'cluster_id': self._args.cluster_id} self._job_args = json.dumps(job_args) self._job_log_utils = JobLogUtils(sandesh_instance_id=('DeviceJobManager' + str(time.time())), config_args=self._job_args, sandesh_instance=dm_logger._sandesh) self._logger = self._job_log_utils.config_logger self._sandesh = self._logger._sandesh self._amqp_client.add_exchange(self.JOB_STATUS_EXCHANGE, type='direct') self._amqp_client.add_consumer((self.JOB_STATUS_CONSUMER + 'dummy'), self.JOB_STATUS_EXCHANGE, routing_key=(self.JOB_STATUS_ROUTING_KEY + 'dummy'), auto_delete=True) self._amqp_client.add_exchange(self.JOB_REQUEST_EXCHANGE, type='direct') self._amqp_client.add_consumer(self.JOB_REQUEST_CONSUMER, self.JOB_REQUEST_EXCHANGE, routing_key=self.JOB_REQUEST_ROUTING_KEY, callback=self.handle_execute_job_request) abort_q_name = '.'.join([self.JOB_ABORT_CONSUMER, socket.getfqdn(self._args.host_ip)]) self._amqp_client.add_consumer(abort_q_name, self.JOB_REQUEST_EXCHANGE, routing_key=self.JOB_ABORT_ROUTING_KEY, callback=self.handle_abort_job_request)
Initialize ZooKeeper, RabbitMQ, Sandesh, DB conn etc.
src/config/device-manager/device_manager/device_job_manager.py
__init__
pltf/contrail-controller
0
python
def __init__(self, amqp_client, zookeeper_client, db_conn, args, dm_logger): DeviceJobManager._instance = self self._amqp_client = amqp_client self._zookeeper_client = zookeeper_client self._db_conn = db_conn self._args = args self._job_mgr_statistics = {'max_job_count': self._args.max_job_count, 'running_job_count': 0} self.job_status = {} self._job_mgr_running_instances = {} job_args = {'collectors': self._args.collectors, 'fabric_ansible_conf_file': self._args.fabric_ansible_conf_file, 'host_ip': self._args.host_ip, 'zk_server_ip': self._args.zk_server_ip, 'cluster_id': self._args.cluster_id} self._job_args = json.dumps(job_args) self._job_log_utils = JobLogUtils(sandesh_instance_id=('DeviceJobManager' + str(time.time())), config_args=self._job_args, sandesh_instance=dm_logger._sandesh) self._logger = self._job_log_utils.config_logger self._sandesh = self._logger._sandesh self._amqp_client.add_exchange(self.JOB_STATUS_EXCHANGE, type='direct') self._amqp_client.add_consumer((self.JOB_STATUS_CONSUMER + 'dummy'), self.JOB_STATUS_EXCHANGE, routing_key=(self.JOB_STATUS_ROUTING_KEY + 'dummy'), auto_delete=True) self._amqp_client.add_exchange(self.JOB_REQUEST_EXCHANGE, type='direct') self._amqp_client.add_consumer(self.JOB_REQUEST_CONSUMER, self.JOB_REQUEST_EXCHANGE, routing_key=self.JOB_REQUEST_ROUTING_KEY, callback=self.handle_execute_job_request) abort_q_name = '.'.join([self.JOB_ABORT_CONSUMER, socket.getfqdn(self._args.host_ip)]) self._amqp_client.add_consumer(abort_q_name, self.JOB_REQUEST_EXCHANGE, routing_key=self.JOB_ABORT_ROUTING_KEY, callback=self.handle_abort_job_request)
def __init__(self, amqp_client, zookeeper_client, db_conn, args, dm_logger): DeviceJobManager._instance = self self._amqp_client = amqp_client self._zookeeper_client = zookeeper_client self._db_conn = db_conn self._args = args self._job_mgr_statistics = {'max_job_count': self._args.max_job_count, 'running_job_count': 0} self.job_status = {} self._job_mgr_running_instances = {} job_args = {'collectors': self._args.collectors, 'fabric_ansible_conf_file': self._args.fabric_ansible_conf_file, 'host_ip': self._args.host_ip, 'zk_server_ip': self._args.zk_server_ip, 'cluster_id': self._args.cluster_id} self._job_args = json.dumps(job_args) self._job_log_utils = JobLogUtils(sandesh_instance_id=('DeviceJobManager' + str(time.time())), config_args=self._job_args, sandesh_instance=dm_logger._sandesh) self._logger = self._job_log_utils.config_logger self._sandesh = self._logger._sandesh self._amqp_client.add_exchange(self.JOB_STATUS_EXCHANGE, type='direct') self._amqp_client.add_consumer((self.JOB_STATUS_CONSUMER + 'dummy'), self.JOB_STATUS_EXCHANGE, routing_key=(self.JOB_STATUS_ROUTING_KEY + 'dummy'), auto_delete=True) self._amqp_client.add_exchange(self.JOB_REQUEST_EXCHANGE, type='direct') self._amqp_client.add_consumer(self.JOB_REQUEST_CONSUMER, self.JOB_REQUEST_EXCHANGE, routing_key=self.JOB_REQUEST_ROUTING_KEY, callback=self.handle_execute_job_request) abort_q_name = '.'.join([self.JOB_ABORT_CONSUMER, socket.getfqdn(self._args.host_ip)]) self._amqp_client.add_consumer(abort_q_name, self.JOB_REQUEST_EXCHANGE, routing_key=self.JOB_ABORT_ROUTING_KEY, callback=self.handle_abort_job_request)<|docstring|>Initialize ZooKeeper, RabbitMQ, Sandesh, DB conn etc.<|endoftext|>
d2b1a97b65f14baa4ea5c2d168fe8f94d5d74fef8b81e18004cd705bdc7dca76
def setup(bot: Bot) -> None: 'Load the Information cog.' bot.add_cog(Information(bot))
Load the Information cog.
bot/exts/info/information.py
setup
Zedeldi/bot
2
python
def setup(bot: Bot) -> None: bot.add_cog(Information(bot))
def setup(bot: Bot) -> None: bot.add_cog(Information(bot))<|docstring|>Load the Information cog.<|endoftext|>
87241dad735b33a2ded0a005fb359d96a32c7622b30b5ba3cc00dfa20603a92f
@staticmethod def get_channel_type_counts(guild: Guild) -> DefaultDict[(str, int)]: 'Return the total amounts of the various types of channels in `guild`.' channel_counter = defaultdict(int) for channel in guild.channels: if is_staff_channel(channel): channel_counter['staff'] += 1 else: channel_counter[str(channel.type)] += 1 return channel_counter
Return the total amounts of the various types of channels in `guild`.
bot/exts/info/information.py
get_channel_type_counts
Zedeldi/bot
2
python
@staticmethod def get_channel_type_counts(guild: Guild) -> DefaultDict[(str, int)]: channel_counter = defaultdict(int) for channel in guild.channels: if is_staff_channel(channel): channel_counter['staff'] += 1 else: channel_counter[str(channel.type)] += 1 return channel_counter
@staticmethod def get_channel_type_counts(guild: Guild) -> DefaultDict[(str, int)]: channel_counter = defaultdict(int) for channel in guild.channels: if is_staff_channel(channel): channel_counter['staff'] += 1 else: channel_counter[str(channel.type)] += 1 return channel_counter<|docstring|>Return the total amounts of the various types of channels in `guild`.<|endoftext|>
981445dddccced083b4dca643d52a467eb71eac3ee2379571f9fe09e62279b31
@staticmethod def get_member_counts(guild: Guild) -> Dict[(str, int)]: 'Return the total number of members for certain roles in `guild`.' roles = (guild.get_role(role_id) for role_id in (constants.Roles.helpers, constants.Roles.mod_team, constants.Roles.admins, constants.Roles.owners, constants.Roles.contributors)) return {role.name.title(): len(role.members) for role in roles}
Return the total number of members for certain roles in `guild`.
bot/exts/info/information.py
get_member_counts
Zedeldi/bot
2
python
@staticmethod def get_member_counts(guild: Guild) -> Dict[(str, int)]: roles = (guild.get_role(role_id) for role_id in (constants.Roles.helpers, constants.Roles.mod_team, constants.Roles.admins, constants.Roles.owners, constants.Roles.contributors)) return {role.name.title(): len(role.members) for role in roles}
@staticmethod def get_member_counts(guild: Guild) -> Dict[(str, int)]: roles = (guild.get_role(role_id) for role_id in (constants.Roles.helpers, constants.Roles.mod_team, constants.Roles.admins, constants.Roles.owners, constants.Roles.contributors)) return {role.name.title(): len(role.members) for role in roles}<|docstring|>Return the total number of members for certain roles in `guild`.<|endoftext|>
5dd6300e1f201a410162c2128d9568d1f1adef767467b5af2db4b4a561819ae3
def get_extended_server_info(self, ctx: Context) -> str: 'Return additional server info only visible in moderation channels.' talentpool_info = '' if (cog := self.bot.get_cog('Talentpool')): talentpool_info = f'''Nominated: {len(cog.watched_users)} ''' bb_info = '' if (cog := self.bot.get_cog('Big Brother')): bb_info = f'''BB-watched: {len(cog.watched_users)} ''' defcon_info = '' if (cog := self.bot.get_cog('Defcon')): threshold = (humanize_delta(cog.threshold) if cog.threshold else '-') defcon_info = f'''Defcon threshold: {threshold} ''' verification = f'''Verification level: {ctx.guild.verification_level.name} ''' python_general = self.bot.get_channel(constants.Channels.python_general) return textwrap.dedent(f''' {talentpool_info} {bb_info} {defcon_info} {verification} {python_general.mention} cooldown: {python_general.slowmode_delay}s ''')
Return additional server info only visible in moderation channels.
bot/exts/info/information.py
get_extended_server_info
Zedeldi/bot
2
python
def get_extended_server_info(self, ctx: Context) -> str: talentpool_info = if (cog := self.bot.get_cog('Talentpool')): talentpool_info = f'Nominated: {len(cog.watched_users)} ' bb_info = if (cog := self.bot.get_cog('Big Brother')): bb_info = f'BB-watched: {len(cog.watched_users)} ' defcon_info = if (cog := self.bot.get_cog('Defcon')): threshold = (humanize_delta(cog.threshold) if cog.threshold else '-') defcon_info = f'Defcon threshold: {threshold} ' verification = f'Verification level: {ctx.guild.verification_level.name} ' python_general = self.bot.get_channel(constants.Channels.python_general) return textwrap.dedent(f' {talentpool_info} {bb_info} {defcon_info} {verification} {python_general.mention} cooldown: {python_general.slowmode_delay}s ')
def get_extended_server_info(self, ctx: Context) -> str: talentpool_info = if (cog := self.bot.get_cog('Talentpool')): talentpool_info = f'Nominated: {len(cog.watched_users)} ' bb_info = if (cog := self.bot.get_cog('Big Brother')): bb_info = f'BB-watched: {len(cog.watched_users)} ' defcon_info = if (cog := self.bot.get_cog('Defcon')): threshold = (humanize_delta(cog.threshold) if cog.threshold else '-') defcon_info = f'Defcon threshold: {threshold} ' verification = f'Verification level: {ctx.guild.verification_level.name} ' python_general = self.bot.get_channel(constants.Channels.python_general) return textwrap.dedent(f' {talentpool_info} {bb_info} {defcon_info} {verification} {python_general.mention} cooldown: {python_general.slowmode_delay}s ')<|docstring|>Return additional server info only visible in moderation channels.<|endoftext|>
a7944e546f7c38afdc994c95a590e34aea3fdaccf0fc6f17bf6a8a3a88729eaa
@has_any_role(*constants.STAFF_ROLES) @command(name='roles') async def roles_info(self, ctx: Context) -> None: 'Returns a list of all roles and their corresponding IDs.' roles = sorted(ctx.guild.roles[1:], key=(lambda role: role.name)) role_list = [] for role in roles: role_list.append(f'`{role.id}` - {role.mention}') embed = Embed(title=f"Role information (Total {len(roles)} role{('s' * (len(role_list) > 1))})", colour=Colour.blurple()) (await LinePaginator.paginate(role_list, ctx, embed, empty=False))
Returns a list of all roles and their corresponding IDs.
bot/exts/info/information.py
roles_info
Zedeldi/bot
2
python
@has_any_role(*constants.STAFF_ROLES) @command(name='roles') async def roles_info(self, ctx: Context) -> None: roles = sorted(ctx.guild.roles[1:], key=(lambda role: role.name)) role_list = [] for role in roles: role_list.append(f'`{role.id}` - {role.mention}') embed = Embed(title=f"Role information (Total {len(roles)} role{('s' * (len(role_list) > 1))})", colour=Colour.blurple()) (await LinePaginator.paginate(role_list, ctx, embed, empty=False))
@has_any_role(*constants.STAFF_ROLES) @command(name='roles') async def roles_info(self, ctx: Context) -> None: roles = sorted(ctx.guild.roles[1:], key=(lambda role: role.name)) role_list = [] for role in roles: role_list.append(f'`{role.id}` - {role.mention}') embed = Embed(title=f"Role information (Total {len(roles)} role{('s' * (len(role_list) > 1))})", colour=Colour.blurple()) (await LinePaginator.paginate(role_list, ctx, embed, empty=False))<|docstring|>Returns a list of all roles and their corresponding IDs.<|endoftext|>
dd85bf178c6fa127ecf91f3c52845bb5fe47b9d9a50dd1662d6ea9706fde4324
@has_any_role(*constants.STAFF_ROLES) @command(name='role') async def role_info(self, ctx: Context, *roles: Union[(Role, str)]) -> None: '\n Return information on a role or list of roles.\n\n To specify multiple roles just add to the arguments, delimit roles with spaces in them using quotation marks.\n ' parsed_roles = set() failed_roles = set() all_roles = {role.id: role.name for role in ctx.guild.roles} for role_name in roles: if isinstance(role_name, Role): parsed_roles.add(role_name) continue match = rapidfuzz.process.extractOne(role_name, all_roles, score_cutoff=80, scorer=rapidfuzz.fuzz.ratio) if (not match): failed_roles.add(role_name) continue role = ctx.guild.get_role(match[2]) parsed_roles.add(role) if failed_roles: (await ctx.send(f":x: Could not retrieve the following roles: {', '.join(failed_roles)}")) for role in parsed_roles: (h, s, v) = colorsys.rgb_to_hsv(*role.colour.to_rgb()) embed = Embed(title=f'{role.name} info', colour=role.colour) embed.add_field(name='ID', value=role.id, inline=True) embed.add_field(name='Colour (RGB)', value=f'#{role.colour.value:0>6x}', inline=True) embed.add_field(name='Colour (HSV)', value=f'{h:.2f} {s:.2f} {v}', inline=True) embed.add_field(name='Member count', value=len(role.members), inline=True) embed.add_field(name='Position', value=role.position) embed.add_field(name='Permission code', value=role.permissions.value, inline=True) (await ctx.send(embed=embed))
Return information on a role or list of roles. To specify multiple roles just add to the arguments, delimit roles with spaces in them using quotation marks.
bot/exts/info/information.py
role_info
Zedeldi/bot
2
python
@has_any_role(*constants.STAFF_ROLES) @command(name='role') async def role_info(self, ctx: Context, *roles: Union[(Role, str)]) -> None: '\n Return information on a role or list of roles.\n\n To specify multiple roles just add to the arguments, delimit roles with spaces in them using quotation marks.\n ' parsed_roles = set() failed_roles = set() all_roles = {role.id: role.name for role in ctx.guild.roles} for role_name in roles: if isinstance(role_name, Role): parsed_roles.add(role_name) continue match = rapidfuzz.process.extractOne(role_name, all_roles, score_cutoff=80, scorer=rapidfuzz.fuzz.ratio) if (not match): failed_roles.add(role_name) continue role = ctx.guild.get_role(match[2]) parsed_roles.add(role) if failed_roles: (await ctx.send(f":x: Could not retrieve the following roles: {', '.join(failed_roles)}")) for role in parsed_roles: (h, s, v) = colorsys.rgb_to_hsv(*role.colour.to_rgb()) embed = Embed(title=f'{role.name} info', colour=role.colour) embed.add_field(name='ID', value=role.id, inline=True) embed.add_field(name='Colour (RGB)', value=f'#{role.colour.value:0>6x}', inline=True) embed.add_field(name='Colour (HSV)', value=f'{h:.2f} {s:.2f} {v}', inline=True) embed.add_field(name='Member count', value=len(role.members), inline=True) embed.add_field(name='Position', value=role.position) embed.add_field(name='Permission code', value=role.permissions.value, inline=True) (await ctx.send(embed=embed))
@has_any_role(*constants.STAFF_ROLES) @command(name='role') async def role_info(self, ctx: Context, *roles: Union[(Role, str)]) -> None: '\n Return information on a role or list of roles.\n\n To specify multiple roles just add to the arguments, delimit roles with spaces in them using quotation marks.\n ' parsed_roles = set() failed_roles = set() all_roles = {role.id: role.name for role in ctx.guild.roles} for role_name in roles: if isinstance(role_name, Role): parsed_roles.add(role_name) continue match = rapidfuzz.process.extractOne(role_name, all_roles, score_cutoff=80, scorer=rapidfuzz.fuzz.ratio) if (not match): failed_roles.add(role_name) continue role = ctx.guild.get_role(match[2]) parsed_roles.add(role) if failed_roles: (await ctx.send(f":x: Could not retrieve the following roles: {', '.join(failed_roles)}")) for role in parsed_roles: (h, s, v) = colorsys.rgb_to_hsv(*role.colour.to_rgb()) embed = Embed(title=f'{role.name} info', colour=role.colour) embed.add_field(name='ID', value=role.id, inline=True) embed.add_field(name='Colour (RGB)', value=f'#{role.colour.value:0>6x}', inline=True) embed.add_field(name='Colour (HSV)', value=f'{h:.2f} {s:.2f} {v}', inline=True) embed.add_field(name='Member count', value=len(role.members), inline=True) embed.add_field(name='Position', value=role.position) embed.add_field(name='Permission code', value=role.permissions.value, inline=True) (await ctx.send(embed=embed))<|docstring|>Return information on a role or list of roles. To specify multiple roles just add to the arguments, delimit roles with spaces in them using quotation marks.<|endoftext|>
a64bec3ba6c3e30bbaec06b5573408e0c2453acf47d1dae3201ea6b144c63ebb
@command(name='server', aliases=['server_info', 'guild', 'guild_info']) async def server_info(self, ctx: Context) -> None: 'Returns an embed full of server information.' embed = Embed(colour=Colour.blurple(), title='Server Information') created = discord_timestamp(ctx.guild.created_at, TimestampFormats.RELATIVE) region = ctx.guild.region num_roles = (len(ctx.guild.roles) - 1) if (ctx.channel.id in (*constants.MODERATION_CHANNELS, constants.Channels.dev_core, constants.Channels.dev_contrib)): features = f''' Features: {', '.join(ctx.guild.features)}''' else: features = '' py_invite = (await self.bot.fetch_invite(constants.Guild.invite)) online_presences = py_invite.approximate_presence_count offline_presences = (py_invite.approximate_member_count - online_presences) member_status = f'{constants.Emojis.status_online} {online_presences} {constants.Emojis.status_offline} {offline_presences}' embed.description = textwrap.dedent(f''' Created: {created} Voice region: {region} {features} Roles: {num_roles} Member status: {member_status} ''') embed.set_thumbnail(url=ctx.guild.icon_url) total_members = ctx.guild.member_count member_counts = self.get_member_counts(ctx.guild) member_info = '\n'.join((f'{role}: {count}' for (role, count) in member_counts.items())) embed.add_field(name=f'Members: {total_members}', value=member_info) total_channels = len(ctx.guild.channels) channel_counts = self.get_channel_type_counts(ctx.guild) channel_info = '\n'.join((f'{channel.title()}: {count}' for (channel, count) in sorted(channel_counts.items()))) embed.add_field(name=f'Channels: {total_channels}', value=channel_info) if is_mod_channel(ctx.channel): embed.add_field(name='Moderation:', value=self.get_extended_server_info(ctx)) (await ctx.send(embed=embed))
Returns an embed full of server information.
bot/exts/info/information.py
server_info
Zedeldi/bot
2
python
@command(name='server', aliases=['server_info', 'guild', 'guild_info']) async def server_info(self, ctx: Context) -> None: embed = Embed(colour=Colour.blurple(), title='Server Information') created = discord_timestamp(ctx.guild.created_at, TimestampFormats.RELATIVE) region = ctx.guild.region num_roles = (len(ctx.guild.roles) - 1) if (ctx.channel.id in (*constants.MODERATION_CHANNELS, constants.Channels.dev_core, constants.Channels.dev_contrib)): features = f' Features: {', '.join(ctx.guild.features)}' else: features = py_invite = (await self.bot.fetch_invite(constants.Guild.invite)) online_presences = py_invite.approximate_presence_count offline_presences = (py_invite.approximate_member_count - online_presences) member_status = f'{constants.Emojis.status_online} {online_presences} {constants.Emojis.status_offline} {offline_presences}' embed.description = textwrap.dedent(f' Created: {created} Voice region: {region} {features} Roles: {num_roles} Member status: {member_status} ') embed.set_thumbnail(url=ctx.guild.icon_url) total_members = ctx.guild.member_count member_counts = self.get_member_counts(ctx.guild) member_info = '\n'.join((f'{role}: {count}' for (role, count) in member_counts.items())) embed.add_field(name=f'Members: {total_members}', value=member_info) total_channels = len(ctx.guild.channels) channel_counts = self.get_channel_type_counts(ctx.guild) channel_info = '\n'.join((f'{channel.title()}: {count}' for (channel, count) in sorted(channel_counts.items()))) embed.add_field(name=f'Channels: {total_channels}', value=channel_info) if is_mod_channel(ctx.channel): embed.add_field(name='Moderation:', value=self.get_extended_server_info(ctx)) (await ctx.send(embed=embed))
@command(name='server', aliases=['server_info', 'guild', 'guild_info']) async def server_info(self, ctx: Context) -> None: embed = Embed(colour=Colour.blurple(), title='Server Information') created = discord_timestamp(ctx.guild.created_at, TimestampFormats.RELATIVE) region = ctx.guild.region num_roles = (len(ctx.guild.roles) - 1) if (ctx.channel.id in (*constants.MODERATION_CHANNELS, constants.Channels.dev_core, constants.Channels.dev_contrib)): features = f' Features: {', '.join(ctx.guild.features)}' else: features = py_invite = (await self.bot.fetch_invite(constants.Guild.invite)) online_presences = py_invite.approximate_presence_count offline_presences = (py_invite.approximate_member_count - online_presences) member_status = f'{constants.Emojis.status_online} {online_presences} {constants.Emojis.status_offline} {offline_presences}' embed.description = textwrap.dedent(f' Created: {created} Voice region: {region} {features} Roles: {num_roles} Member status: {member_status} ') embed.set_thumbnail(url=ctx.guild.icon_url) total_members = ctx.guild.member_count member_counts = self.get_member_counts(ctx.guild) member_info = '\n'.join((f'{role}: {count}' for (role, count) in member_counts.items())) embed.add_field(name=f'Members: {total_members}', value=member_info) total_channels = len(ctx.guild.channels) channel_counts = self.get_channel_type_counts(ctx.guild) channel_info = '\n'.join((f'{channel.title()}: {count}' for (channel, count) in sorted(channel_counts.items()))) embed.add_field(name=f'Channels: {total_channels}', value=channel_info) if is_mod_channel(ctx.channel): embed.add_field(name='Moderation:', value=self.get_extended_server_info(ctx)) (await ctx.send(embed=embed))<|docstring|>Returns an embed full of server information.<|endoftext|>
ede5ddca53d4b7fcba91db9ce73ee8b3f18ca67c47b500fd384393b1c780cc44
@command(name='user', aliases=['user_info', 'member', 'member_info', 'u']) async def user_info(self, ctx: Context, user: FetchedMember=None) -> None: 'Returns info about a user.' if (user is None): user = ctx.author elif ((user != ctx.author) and (await has_no_roles_check(ctx, *constants.MODERATION_ROLES))): (await ctx.send('You may not use this command on users other than yourself.')) return if in_whitelist_check(ctx, roles=constants.STAFF_ROLES): embed = (await self.create_user_embed(ctx, user)) (await ctx.send(embed=embed))
Returns info about a user.
bot/exts/info/information.py
user_info
Zedeldi/bot
2
python
@command(name='user', aliases=['user_info', 'member', 'member_info', 'u']) async def user_info(self, ctx: Context, user: FetchedMember=None) -> None: if (user is None): user = ctx.author elif ((user != ctx.author) and (await has_no_roles_check(ctx, *constants.MODERATION_ROLES))): (await ctx.send('You may not use this command on users other than yourself.')) return if in_whitelist_check(ctx, roles=constants.STAFF_ROLES): embed = (await self.create_user_embed(ctx, user)) (await ctx.send(embed=embed))
@command(name='user', aliases=['user_info', 'member', 'member_info', 'u']) async def user_info(self, ctx: Context, user: FetchedMember=None) -> None: if (user is None): user = ctx.author elif ((user != ctx.author) and (await has_no_roles_check(ctx, *constants.MODERATION_ROLES))): (await ctx.send('You may not use this command on users other than yourself.')) return if in_whitelist_check(ctx, roles=constants.STAFF_ROLES): embed = (await self.create_user_embed(ctx, user)) (await ctx.send(embed=embed))<|docstring|>Returns info about a user.<|endoftext|>
2e9bf0c12f6a911be3a80701b87f7bf03e17d7e488406b632900a66b0d5f557c
async def create_user_embed(self, ctx: Context, user: FetchedMember) -> Embed: 'Creates an embed containing information on the `user`.' on_server = bool(ctx.guild.get_member(user.id)) created = discord_timestamp(user.created_at, TimestampFormats.RELATIVE) name = str(user) if (on_server and user.nick): name = f'{user.nick} ({name})' if user.public_flags.verified_bot: name += f' {constants.Emojis.verified_bot}' elif user.bot: name += f' {constants.Emojis.bot}' badges = [] for (badge, is_set) in user.public_flags: if (is_set and (emoji := getattr(constants.Emojis, f'badge_{badge}', None))): badges.append(emoji) if on_server: joined = discord_timestamp(user.joined_at, TimestampFormats.RELATIVE) roles = ', '.join((role.mention for role in user.roles[1:])) membership = {'Joined': joined, 'Verified': (not user.pending), 'Roles': (roles or None)} if (not is_mod_channel(ctx.channel)): membership.pop('Verified') membership = textwrap.dedent('\n'.join([f'{key}: {value}' for (key, value) in membership.items()])) else: roles = None membership = 'The user is not a member of the server' fields = [('User information', textwrap.dedent(f''' Created: {created} Profile: {user.mention} ID: {user.id} ''').strip()), ('Member information', membership)] if is_mod_channel(ctx.channel): fields.append((await self.user_messages(user))) fields.append((await self.expanded_user_infraction_counts(user))) fields.append((await self.user_nomination_counts(user))) else: fields.append((await self.basic_user_infraction_counts(user))) embed = Embed(title=name, description=' '.join(badges)) for (field_name, field_content) in fields: embed.add_field(name=field_name, value=field_content, inline=False) embed.set_thumbnail(url=user.avatar_url_as(static_format='png')) embed.colour = (user.colour if (user.colour != Colour.default()) else Colour.blurple()) return embed
Creates an embed containing information on the `user`.
bot/exts/info/information.py
create_user_embed
Zedeldi/bot
2
python
async def create_user_embed(self, ctx: Context, user: FetchedMember) -> Embed: on_server = bool(ctx.guild.get_member(user.id)) created = discord_timestamp(user.created_at, TimestampFormats.RELATIVE) name = str(user) if (on_server and user.nick): name = f'{user.nick} ({name})' if user.public_flags.verified_bot: name += f' {constants.Emojis.verified_bot}' elif user.bot: name += f' {constants.Emojis.bot}' badges = [] for (badge, is_set) in user.public_flags: if (is_set and (emoji := getattr(constants.Emojis, f'badge_{badge}', None))): badges.append(emoji) if on_server: joined = discord_timestamp(user.joined_at, TimestampFormats.RELATIVE) roles = ', '.join((role.mention for role in user.roles[1:])) membership = {'Joined': joined, 'Verified': (not user.pending), 'Roles': (roles or None)} if (not is_mod_channel(ctx.channel)): membership.pop('Verified') membership = textwrap.dedent('\n'.join([f'{key}: {value}' for (key, value) in membership.items()])) else: roles = None membership = 'The user is not a member of the server' fields = [('User information', textwrap.dedent(f' Created: {created} Profile: {user.mention} ID: {user.id} ').strip()), ('Member information', membership)] if is_mod_channel(ctx.channel): fields.append((await self.user_messages(user))) fields.append((await self.expanded_user_infraction_counts(user))) fields.append((await self.user_nomination_counts(user))) else: fields.append((await self.basic_user_infraction_counts(user))) embed = Embed(title=name, description=' '.join(badges)) for (field_name, field_content) in fields: embed.add_field(name=field_name, value=field_content, inline=False) embed.set_thumbnail(url=user.avatar_url_as(static_format='png')) embed.colour = (user.colour if (user.colour != Colour.default()) else Colour.blurple()) return embed
async def create_user_embed(self, ctx: Context, user: FetchedMember) -> Embed: on_server = bool(ctx.guild.get_member(user.id)) created = discord_timestamp(user.created_at, TimestampFormats.RELATIVE) name = str(user) if (on_server and user.nick): name = f'{user.nick} ({name})' if user.public_flags.verified_bot: name += f' {constants.Emojis.verified_bot}' elif user.bot: name += f' {constants.Emojis.bot}' badges = [] for (badge, is_set) in user.public_flags: if (is_set and (emoji := getattr(constants.Emojis, f'badge_{badge}', None))): badges.append(emoji) if on_server: joined = discord_timestamp(user.joined_at, TimestampFormats.RELATIVE) roles = ', '.join((role.mention for role in user.roles[1:])) membership = {'Joined': joined, 'Verified': (not user.pending), 'Roles': (roles or None)} if (not is_mod_channel(ctx.channel)): membership.pop('Verified') membership = textwrap.dedent('\n'.join([f'{key}: {value}' for (key, value) in membership.items()])) else: roles = None membership = 'The user is not a member of the server' fields = [('User information', textwrap.dedent(f' Created: {created} Profile: {user.mention} ID: {user.id} ').strip()), ('Member information', membership)] if is_mod_channel(ctx.channel): fields.append((await self.user_messages(user))) fields.append((await self.expanded_user_infraction_counts(user))) fields.append((await self.user_nomination_counts(user))) else: fields.append((await self.basic_user_infraction_counts(user))) embed = Embed(title=name, description=' '.join(badges)) for (field_name, field_content) in fields: embed.add_field(name=field_name, value=field_content, inline=False) embed.set_thumbnail(url=user.avatar_url_as(static_format='png')) embed.colour = (user.colour if (user.colour != Colour.default()) else Colour.blurple()) return embed<|docstring|>Creates an embed containing information on the `user`.<|endoftext|>
f366387ac891cad88180e9ad0fd2f50f54c6e80366d03436c02c0003b2f68c5b
async def basic_user_infraction_counts(self, user: FetchedMember) -> Tuple[(str, str)]: 'Gets the total and active infraction counts for the given `member`.' infractions = (await self.bot.api_client.get('bot/infractions', params={'hidden': 'False', 'user__id': str(user.id)})) total_infractions = len(infractions) active_infractions = sum((infraction['active'] for infraction in infractions)) infraction_output = f'''Total: {total_infractions} Active: {active_infractions}''' return ('Infractions', infraction_output)
Gets the total and active infraction counts for the given `member`.
bot/exts/info/information.py
basic_user_infraction_counts
Zedeldi/bot
2
python
async def basic_user_infraction_counts(self, user: FetchedMember) -> Tuple[(str, str)]: infractions = (await self.bot.api_client.get('bot/infractions', params={'hidden': 'False', 'user__id': str(user.id)})) total_infractions = len(infractions) active_infractions = sum((infraction['active'] for infraction in infractions)) infraction_output = f'Total: {total_infractions} Active: {active_infractions}' return ('Infractions', infraction_output)
async def basic_user_infraction_counts(self, user: FetchedMember) -> Tuple[(str, str)]: infractions = (await self.bot.api_client.get('bot/infractions', params={'hidden': 'False', 'user__id': str(user.id)})) total_infractions = len(infractions) active_infractions = sum((infraction['active'] for infraction in infractions)) infraction_output = f'Total: {total_infractions} Active: {active_infractions}' return ('Infractions', infraction_output)<|docstring|>Gets the total and active infraction counts for the given `member`.<|endoftext|>
9411489fe773250c5e72b62b48e62402bb99db363f4a64b791c05fe28862ff79
async def expanded_user_infraction_counts(self, user: FetchedMember) -> Tuple[(str, str)]: '\n Gets expanded infraction counts for the given `member`.\n\n The counts will be split by infraction type and the number of active infractions for each type will indicated\n in the output as well.\n ' infractions = (await self.bot.api_client.get('bot/infractions', params={'user__id': str(user.id)})) infraction_output = [] if (not infractions): infraction_output.append('No infractions') else: infraction_types = set() infraction_counter = defaultdict(int) for infraction in infractions: infraction_type = infraction['type'] infraction_active = ('active' if infraction['active'] else 'inactive') infraction_types.add(infraction_type) infraction_counter[f'{infraction_active} {infraction_type}'] += 1 for infraction_type in sorted(infraction_types): active_count = infraction_counter[f'active {infraction_type}'] total_count = (active_count + infraction_counter[f'inactive {infraction_type}']) line = f'{infraction_type.capitalize()}s: {total_count}' if active_count: line += f' ({active_count} active)' infraction_output.append(line) return ('Infractions', '\n'.join(infraction_output))
Gets expanded infraction counts for the given `member`. The counts will be split by infraction type and the number of active infractions for each type will indicated in the output as well.
bot/exts/info/information.py
expanded_user_infraction_counts
Zedeldi/bot
2
python
async def expanded_user_infraction_counts(self, user: FetchedMember) -> Tuple[(str, str)]: '\n Gets expanded infraction counts for the given `member`.\n\n The counts will be split by infraction type and the number of active infractions for each type will indicated\n in the output as well.\n ' infractions = (await self.bot.api_client.get('bot/infractions', params={'user__id': str(user.id)})) infraction_output = [] if (not infractions): infraction_output.append('No infractions') else: infraction_types = set() infraction_counter = defaultdict(int) for infraction in infractions: infraction_type = infraction['type'] infraction_active = ('active' if infraction['active'] else 'inactive') infraction_types.add(infraction_type) infraction_counter[f'{infraction_active} {infraction_type}'] += 1 for infraction_type in sorted(infraction_types): active_count = infraction_counter[f'active {infraction_type}'] total_count = (active_count + infraction_counter[f'inactive {infraction_type}']) line = f'{infraction_type.capitalize()}s: {total_count}' if active_count: line += f' ({active_count} active)' infraction_output.append(line) return ('Infractions', '\n'.join(infraction_output))
async def expanded_user_infraction_counts(self, user: FetchedMember) -> Tuple[(str, str)]: '\n Gets expanded infraction counts for the given `member`.\n\n The counts will be split by infraction type and the number of active infractions for each type will indicated\n in the output as well.\n ' infractions = (await self.bot.api_client.get('bot/infractions', params={'user__id': str(user.id)})) infraction_output = [] if (not infractions): infraction_output.append('No infractions') else: infraction_types = set() infraction_counter = defaultdict(int) for infraction in infractions: infraction_type = infraction['type'] infraction_active = ('active' if infraction['active'] else 'inactive') infraction_types.add(infraction_type) infraction_counter[f'{infraction_active} {infraction_type}'] += 1 for infraction_type in sorted(infraction_types): active_count = infraction_counter[f'active {infraction_type}'] total_count = (active_count + infraction_counter[f'inactive {infraction_type}']) line = f'{infraction_type.capitalize()}s: {total_count}' if active_count: line += f' ({active_count} active)' infraction_output.append(line) return ('Infractions', '\n'.join(infraction_output))<|docstring|>Gets expanded infraction counts for the given `member`. The counts will be split by infraction type and the number of active infractions for each type will indicated in the output as well.<|endoftext|>
392e7645a90bb782f57eaaee71e6c121b14adbc15d881672fb2915f6ca5ffe32
async def user_nomination_counts(self, user: FetchedMember) -> Tuple[(str, str)]: 'Gets the active and historical nomination counts for the given `member`.' nominations = (await self.bot.api_client.get('bot/nominations', params={'user__id': str(user.id)})) output = [] if (not nominations): output.append('No nominations') else: count = len(nominations) is_currently_nominated = any((nomination['active'] for nomination in nominations)) nomination_noun = ('nomination' if (count == 1) else 'nominations') if is_currently_nominated: output.append(f'''This user is **currently** nominated ({count} {nomination_noun} in total)''') else: output.append(f'This user has {count} historical {nomination_noun}, but is currently not nominated.') return ('Nominations', '\n'.join(output))
Gets the active and historical nomination counts for the given `member`.
bot/exts/info/information.py
user_nomination_counts
Zedeldi/bot
2
python
async def user_nomination_counts(self, user: FetchedMember) -> Tuple[(str, str)]: nominations = (await self.bot.api_client.get('bot/nominations', params={'user__id': str(user.id)})) output = [] if (not nominations): output.append('No nominations') else: count = len(nominations) is_currently_nominated = any((nomination['active'] for nomination in nominations)) nomination_noun = ('nomination' if (count == 1) else 'nominations') if is_currently_nominated: output.append(f'This user is **currently** nominated ({count} {nomination_noun} in total)') else: output.append(f'This user has {count} historical {nomination_noun}, but is currently not nominated.') return ('Nominations', '\n'.join(output))
async def user_nomination_counts(self, user: FetchedMember) -> Tuple[(str, str)]: nominations = (await self.bot.api_client.get('bot/nominations', params={'user__id': str(user.id)})) output = [] if (not nominations): output.append('No nominations') else: count = len(nominations) is_currently_nominated = any((nomination['active'] for nomination in nominations)) nomination_noun = ('nomination' if (count == 1) else 'nominations') if is_currently_nominated: output.append(f'This user is **currently** nominated ({count} {nomination_noun} in total)') else: output.append(f'This user has {count} historical {nomination_noun}, but is currently not nominated.') return ('Nominations', '\n'.join(output))<|docstring|>Gets the active and historical nomination counts for the given `member`.<|endoftext|>
f8720bc1c0a2a4a3c26659cc1b72c17a7a07998416ad41916c8cb8072b32a7b8
async def user_messages(self, user: FetchedMember) -> Tuple[(Union[(bool, str)], Tuple[(str, str)])]: "\n Gets the amount of messages for `member`.\n\n Fetches information from the metricity database that's hosted by the site.\n If the database returns a code besides a 404, then many parts of the bot are broken including this one.\n " activity_output = [] try: user_activity = (await self.bot.api_client.get(f'bot/users/{user.id}/metricity_data')) except ResponseCodeError as e: if (e.status == 404): activity_output = 'No activity' else: activity_output.append((user_activity['total_messages'] or 'No messages')) activity_output.append((user_activity['activity_blocks'] or 'No activity')) activity_output = '\n'.join((f'{name}: {metric}' for (name, metric) in zip(['Messages', 'Activity blocks'], activity_output))) return ('Activity', activity_output)
Gets the amount of messages for `member`. Fetches information from the metricity database that's hosted by the site. If the database returns a code besides a 404, then many parts of the bot are broken including this one.
bot/exts/info/information.py
user_messages
Zedeldi/bot
2
python
async def user_messages(self, user: FetchedMember) -> Tuple[(Union[(bool, str)], Tuple[(str, str)])]: "\n Gets the amount of messages for `member`.\n\n Fetches information from the metricity database that's hosted by the site.\n If the database returns a code besides a 404, then many parts of the bot are broken including this one.\n " activity_output = [] try: user_activity = (await self.bot.api_client.get(f'bot/users/{user.id}/metricity_data')) except ResponseCodeError as e: if (e.status == 404): activity_output = 'No activity' else: activity_output.append((user_activity['total_messages'] or 'No messages')) activity_output.append((user_activity['activity_blocks'] or 'No activity')) activity_output = '\n'.join((f'{name}: {metric}' for (name, metric) in zip(['Messages', 'Activity blocks'], activity_output))) return ('Activity', activity_output)
async def user_messages(self, user: FetchedMember) -> Tuple[(Union[(bool, str)], Tuple[(str, str)])]: "\n Gets the amount of messages for `member`.\n\n Fetches information from the metricity database that's hosted by the site.\n If the database returns a code besides a 404, then many parts of the bot are broken including this one.\n " activity_output = [] try: user_activity = (await self.bot.api_client.get(f'bot/users/{user.id}/metricity_data')) except ResponseCodeError as e: if (e.status == 404): activity_output = 'No activity' else: activity_output.append((user_activity['total_messages'] or 'No messages')) activity_output.append((user_activity['activity_blocks'] or 'No activity')) activity_output = '\n'.join((f'{name}: {metric}' for (name, metric) in zip(['Messages', 'Activity blocks'], activity_output))) return ('Activity', activity_output)<|docstring|>Gets the amount of messages for `member`. Fetches information from the metricity database that's hosted by the site. If the database returns a code besides a 404, then many parts of the bot are broken including this one.<|endoftext|>
b44d9e5d2dcbd7e742bcc759c71454fc3c91e72b960c835de846998d1d8c22f7
def format_fields(self, mapping: Mapping[(str, Any)], field_width: Optional[int]=None) -> str: 'Format a mapping to be readable to a human.' fields = sorted(mapping.items(), key=(lambda item: item[0])) if (field_width is None): field_width = len(max(mapping.keys(), key=len)) out = '' for (key, val) in fields: if isinstance(val, dict): inner_width = int((field_width * 1.6)) val = ('\n' + self.format_fields(val, field_width=inner_width)) elif isinstance(val, str): text = textwrap.fill(val, width=100, replace_whitespace=False) val = textwrap.indent(text, (' ' * (field_width + len(': ')))) val = val.lstrip() if (key == 'color'): val = hex(val) out += '{0:>{width}}: {1}\n'.format(key, val, width=field_width) return out.rstrip()
Format a mapping to be readable to a human.
bot/exts/info/information.py
format_fields
Zedeldi/bot
2
python
def format_fields(self, mapping: Mapping[(str, Any)], field_width: Optional[int]=None) -> str: fields = sorted(mapping.items(), key=(lambda item: item[0])) if (field_width is None): field_width = len(max(mapping.keys(), key=len)) out = for (key, val) in fields: if isinstance(val, dict): inner_width = int((field_width * 1.6)) val = ('\n' + self.format_fields(val, field_width=inner_width)) elif isinstance(val, str): text = textwrap.fill(val, width=100, replace_whitespace=False) val = textwrap.indent(text, (' ' * (field_width + len(': ')))) val = val.lstrip() if (key == 'color'): val = hex(val) out += '{0:>{width}}: {1}\n'.format(key, val, width=field_width) return out.rstrip()
def format_fields(self, mapping: Mapping[(str, Any)], field_width: Optional[int]=None) -> str: fields = sorted(mapping.items(), key=(lambda item: item[0])) if (field_width is None): field_width = len(max(mapping.keys(), key=len)) out = for (key, val) in fields: if isinstance(val, dict): inner_width = int((field_width * 1.6)) val = ('\n' + self.format_fields(val, field_width=inner_width)) elif isinstance(val, str): text = textwrap.fill(val, width=100, replace_whitespace=False) val = textwrap.indent(text, (' ' * (field_width + len(': ')))) val = val.lstrip() if (key == 'color'): val = hex(val) out += '{0:>{width}}: {1}\n'.format(key, val, width=field_width) return out.rstrip()<|docstring|>Format a mapping to be readable to a human.<|endoftext|>
a28f585d1f6e82e45c35aaa7f32689f08700ed6b3022077a0f129f4eb798e5aa
@cooldown_with_role_bypass(2, (60 * 3), BucketType.member, bypass_roles=constants.STAFF_ROLES) @group(invoke_without_command=True) @in_whitelist(channels=(constants.Channels.bot_commands,), roles=constants.STAFF_ROLES) async def raw(self, ctx: Context, *, message: Message, json: bool=False) -> None: 'Shows information about the raw API response.' if (ctx.author not in message.channel.members): (await ctx.send(':x: You do not have permissions to see the channel this message is in.')) return raw_data = (await ctx.bot.http.get_message(message.channel.id, message.id)) paginator = Paginator() def add_content(title: str, content: str) -> None: paginator.add_line(f'''== {title} == ''') paginator.add_line(content.replace('`', '`\u200b')) paginator.close_page() if message.content: add_content('Raw message', message.content) transformer = (pprint.pformat if json else self.format_fields) for field_name in ('embeds', 'attachments'): data = raw_data[field_name] if (not data): continue total = len(data) for (current, item) in enumerate(data, start=1): title = f'Raw {field_name} ({current}/{total})' add_content(title, transformer(item)) for page in paginator.pages: (await ctx.send(page, allowed_mentions=AllowedMentions.none()))
Shows information about the raw API response.
bot/exts/info/information.py
raw
Zedeldi/bot
2
python
@cooldown_with_role_bypass(2, (60 * 3), BucketType.member, bypass_roles=constants.STAFF_ROLES) @group(invoke_without_command=True) @in_whitelist(channels=(constants.Channels.bot_commands,), roles=constants.STAFF_ROLES) async def raw(self, ctx: Context, *, message: Message, json: bool=False) -> None: if (ctx.author not in message.channel.members): (await ctx.send(':x: You do not have permissions to see the channel this message is in.')) return raw_data = (await ctx.bot.http.get_message(message.channel.id, message.id)) paginator = Paginator() def add_content(title: str, content: str) -> None: paginator.add_line(f'== {title} == ') paginator.add_line(content.replace('`', '`\u200b')) paginator.close_page() if message.content: add_content('Raw message', message.content) transformer = (pprint.pformat if json else self.format_fields) for field_name in ('embeds', 'attachments'): data = raw_data[field_name] if (not data): continue total = len(data) for (current, item) in enumerate(data, start=1): title = f'Raw {field_name} ({current}/{total})' add_content(title, transformer(item)) for page in paginator.pages: (await ctx.send(page, allowed_mentions=AllowedMentions.none()))
@cooldown_with_role_bypass(2, (60 * 3), BucketType.member, bypass_roles=constants.STAFF_ROLES) @group(invoke_without_command=True) @in_whitelist(channels=(constants.Channels.bot_commands,), roles=constants.STAFF_ROLES) async def raw(self, ctx: Context, *, message: Message, json: bool=False) -> None: if (ctx.author not in message.channel.members): (await ctx.send(':x: You do not have permissions to see the channel this message is in.')) return raw_data = (await ctx.bot.http.get_message(message.channel.id, message.id)) paginator = Paginator() def add_content(title: str, content: str) -> None: paginator.add_line(f'== {title} == ') paginator.add_line(content.replace('`', '`\u200b')) paginator.close_page() if message.content: add_content('Raw message', message.content) transformer = (pprint.pformat if json else self.format_fields) for field_name in ('embeds', 'attachments'): data = raw_data[field_name] if (not data): continue total = len(data) for (current, item) in enumerate(data, start=1): title = f'Raw {field_name} ({current}/{total})' add_content(title, transformer(item)) for page in paginator.pages: (await ctx.send(page, allowed_mentions=AllowedMentions.none()))<|docstring|>Shows information about the raw API response.<|endoftext|>
e26eee32d4733b5c94a2b089e982789205674a6fadb5975b4326f6b2a299f99e
@raw.command() async def json(self, ctx: Context, message: Message) -> None: 'Shows information about the raw API response in a copy-pasteable Python format.' (await ctx.invoke(self.raw, message=message, json=True))
Shows information about the raw API response in a copy-pasteable Python format.
bot/exts/info/information.py
json
Zedeldi/bot
2
python
@raw.command() async def json(self, ctx: Context, message: Message) -> None: (await ctx.invoke(self.raw, message=message, json=True))
@raw.command() async def json(self, ctx: Context, message: Message) -> None: (await ctx.invoke(self.raw, message=message, json=True))<|docstring|>Shows information about the raw API response in a copy-pasteable Python format.<|endoftext|>
d413feb2da389047bc286243bf152b0e79ffcdc6d61ca450c1668f58e0c91350
def setUp(self): 'Initialize test values.' self.type = 'user' self.name = 'test' self.principal = 'user:test' self.repr_type = "<Principal(type='user')>" self.repr_name = "<Principal(name='test')>" self.repr_all = "<Principal(type='user', name='test')>"
Initialize test values.
tests/test_principal.py
setUp
BlueDragonX/pyramid_couchauth
1
python
def setUp(self): self.type = 'user' self.name = 'test' self.principal = 'user:test' self.repr_type = "<Principal(type='user')>" self.repr_name = "<Principal(name='test')>" self.repr_all = "<Principal(type='user', name='test')>"
def setUp(self): self.type = 'user' self.name = 'test' self.principal = 'user:test' self.repr_type = "<Principal(type='user')>" self.repr_name = "<Principal(name='test')>" self.repr_all = "<Principal(type='user', name='test')>"<|docstring|>Initialize test values.<|endoftext|>
bba8f4111810ea3c86d4902d053d8c1deba4d15fbfdf6c758c88c7e50c325b31
def test_init_empty(self): 'Test __init__ method with no params.' pobj = Principal() self.assertTrue((pobj.type is None)) self.assertTrue((pobj.name is None))
Test __init__ method with no params.
tests/test_principal.py
test_init_empty
BlueDragonX/pyramid_couchauth
1
python
def test_init_empty(self): pobj = Principal() self.assertTrue((pobj.type is None)) self.assertTrue((pobj.name is None))
def test_init_empty(self): pobj = Principal() self.assertTrue((pobj.type is None)) self.assertTrue((pobj.name is None))<|docstring|>Test __init__ method with no params.<|endoftext|>
b6c0b17c735b8f400c05c6178a7c5af9a6462ae8a465017ad6ff5b64a10d1b37
def test_init_type_name(self): 'Test __init__ method with type and name params.' pobj = Principal(type=self.type, name=self.name) self.assertEqual(pobj.type, self.type) self.assertEqual(pobj.name, self.name)
Test __init__ method with type and name params.
tests/test_principal.py
test_init_type_name
BlueDragonX/pyramid_couchauth
1
python
def test_init_type_name(self): pobj = Principal(type=self.type, name=self.name) self.assertEqual(pobj.type, self.type) self.assertEqual(pobj.name, self.name)
def test_init_type_name(self): pobj = Principal(type=self.type, name=self.name) self.assertEqual(pobj.type, self.type) self.assertEqual(pobj.name, self.name)<|docstring|>Test __init__ method with type and name params.<|endoftext|>
5430a4c7b32f48181596cbdac041c48450a8a765642e43c541c18550658f3adc
def test_init_principal(self): 'Test __init__ method with principal param.' pobj = Principal(self.principal) self.assertEqual(pobj.type, self.type) self.assertEqual(pobj.name, self.name)
Test __init__ method with principal param.
tests/test_principal.py
test_init_principal
BlueDragonX/pyramid_couchauth
1
python
def test_init_principal(self): pobj = Principal(self.principal) self.assertEqual(pobj.type, self.type) self.assertEqual(pobj.name, self.name)
def test_init_principal(self): pobj = Principal(self.principal) self.assertEqual(pobj.type, self.type) self.assertEqual(pobj.name, self.name)<|docstring|>Test __init__ method with principal param.<|endoftext|>
9b80a3bdb6a9a3ad78f78bfdd7efb52130fa8c84097519a1e1eda612ba685614
def test_init_type_principal(self): 'Test __init__ method with type and principal params.' pobj = Principal(self.name, self.type) self.assertEqual(pobj.type, self.type) self.assertEqual(pobj.name, self.name)
Test __init__ method with type and principal params.
tests/test_principal.py
test_init_type_principal
BlueDragonX/pyramid_couchauth
1
python
def test_init_type_principal(self): pobj = Principal(self.name, self.type) self.assertEqual(pobj.type, self.type) self.assertEqual(pobj.name, self.name)
def test_init_type_principal(self): pobj = Principal(self.name, self.type) self.assertEqual(pobj.type, self.type) self.assertEqual(pobj.name, self.name)<|docstring|>Test __init__ method with type and principal params.<|endoftext|>
d8f7c9cb7b0ec683bd69ae8bdaabc89e651039017d6bcc0321b90e49bd8b44ef
def test_parse(self): 'Test parse method.' pobj = Principal() pobj.parse(self.principal) self.assertEqual(pobj.type, self.type) self.assertEqual(pobj.name, self.name)
Test parse method.
tests/test_principal.py
test_parse
BlueDragonX/pyramid_couchauth
1
python
def test_parse(self): pobj = Principal() pobj.parse(self.principal) self.assertEqual(pobj.type, self.type) self.assertEqual(pobj.name, self.name)
def test_parse(self): pobj = Principal() pobj.parse(self.principal) self.assertEqual(pobj.type, self.type) self.assertEqual(pobj.name, self.name)<|docstring|>Test parse method.<|endoftext|>
1bf7c05cf3c42703b07ba35c17d30f933d213894e035e92683110cecebe61419
def test_str(self): 'Test __str__ method.' pobj = Principal(self.principal) self.assertEqual(pobj.__str__(), self.principal)
Test __str__ method.
tests/test_principal.py
test_str
BlueDragonX/pyramid_couchauth
1
python
def test_str(self): pobj = Principal(self.principal) self.assertEqual(pobj.__str__(), self.principal)
def test_str(self): pobj = Principal(self.principal) self.assertEqual(pobj.__str__(), self.principal)<|docstring|>Test __str__ method.<|endoftext|>
4b36e27c7cbff4183dc130df57ee1244fb953d49003f64362b49c62dead766ac
def test_repr_type(self): 'Test __repr__ method when type is set.' pobj = Principal(type=self.type) self.assertEqual(pobj.__repr__(), self.repr_type)
Test __repr__ method when type is set.
tests/test_principal.py
test_repr_type
BlueDragonX/pyramid_couchauth
1
python
def test_repr_type(self): pobj = Principal(type=self.type) self.assertEqual(pobj.__repr__(), self.repr_type)
def test_repr_type(self): pobj = Principal(type=self.type) self.assertEqual(pobj.__repr__(), self.repr_type)<|docstring|>Test __repr__ method when type is set.<|endoftext|>
c8d48cd081992a2f877da40ff9fbbe58ccc240009a6f4284aa44ff3d72f2557a
def test_repr_name(self): 'Test __repr__ method when name is set.' pobj = Principal(name=self.name) self.assertEqual(pobj.__repr__(), self.repr_name)
Test __repr__ method when name is set.
tests/test_principal.py
test_repr_name
BlueDragonX/pyramid_couchauth
1
python
def test_repr_name(self): pobj = Principal(name=self.name) self.assertEqual(pobj.__repr__(), self.repr_name)
def test_repr_name(self): pobj = Principal(name=self.name) self.assertEqual(pobj.__repr__(), self.repr_name)<|docstring|>Test __repr__ method when name is set.<|endoftext|>
e16e932f99b1913bd1e06628ad134e370969c73b31b90c50168bd428ba636877
def test_repr_all(self): 'Test __repr__ method when principal is set.' pobj = Principal(self.principal) self.assertEqual(pobj.__repr__(), self.repr_all)
Test __repr__ method when principal is set.
tests/test_principal.py
test_repr_all
BlueDragonX/pyramid_couchauth
1
python
def test_repr_all(self): pobj = Principal(self.principal) self.assertEqual(pobj.__repr__(), self.repr_all)
def test_repr_all(self): pobj = Principal(self.principal) self.assertEqual(pobj.__repr__(), self.repr_all)<|docstring|>Test __repr__ method when principal is set.<|endoftext|>
dad3516a1fe334dcf3fbd65dd70e97d25d1cd661383dabb03572a9cbbed278a3
def session_preparation(self): 'Prepare the session after the connection has been established.' self.ansi_escape_codes = True self._test_channel_read() self.set_base_prompt() self.disable_paging() time.sleep((0.3 * self.global_delay_factor)) self.clear_buffer()
Prepare the session after the connection has been established.
netmiko/dlink/dlink_ds.py
session_preparation
benmcbenben/netmiko
2
python
def session_preparation(self): self.ansi_escape_codes = True self._test_channel_read() self.set_base_prompt() self.disable_paging() time.sleep((0.3 * self.global_delay_factor)) self.clear_buffer()
def session_preparation(self): self.ansi_escape_codes = True self._test_channel_read() self.set_base_prompt() self.disable_paging() time.sleep((0.3 * self.global_delay_factor)) self.clear_buffer()<|docstring|>Prepare the session after the connection has been established.<|endoftext|>
603a8de8b6580a89d9a64ca68b5a9744f7b1d37fbaf11bbc1cfa794c364d4a34
def enable(self, *args, **kwargs): 'No implemented enable mode on D-Link yet' return ''
No implemented enable mode on D-Link yet
netmiko/dlink/dlink_ds.py
enable
benmcbenben/netmiko
2
python
def enable(self, *args, **kwargs): return
def enable(self, *args, **kwargs): return <|docstring|>No implemented enable mode on D-Link yet<|endoftext|>
7189c7ef3a93d99860dff801bec4c986b909839b52fb63c4f9862841de4b5b70
def check_enable_mode(self, *args, **kwargs): 'No implemented enable mode on D-Link yet' return True
No implemented enable mode on D-Link yet
netmiko/dlink/dlink_ds.py
check_enable_mode
benmcbenben/netmiko
2
python
def check_enable_mode(self, *args, **kwargs): return True
def check_enable_mode(self, *args, **kwargs): return True<|docstring|>No implemented enable mode on D-Link yet<|endoftext|>
e8004935534715e6a928b176ab23254cc92e6f71af0230d1dc93952425023083
def exit_enable_mode(self, *args, **kwargs): 'No implemented enable mode on D-Link yet' return ''
No implemented enable mode on D-Link yet
netmiko/dlink/dlink_ds.py
exit_enable_mode
benmcbenben/netmiko
2
python
def exit_enable_mode(self, *args, **kwargs): return
def exit_enable_mode(self, *args, **kwargs): return <|docstring|>No implemented enable mode on D-Link yet<|endoftext|>
b6e4cbe928cb0b2834e25a78207a2debbc44f75f893c3cc2b549a7182dbb86cd
def check_config_mode(self, *args, **kwargs): 'No config mode on D-Link' return False
No config mode on D-Link
netmiko/dlink/dlink_ds.py
check_config_mode
benmcbenben/netmiko
2
python
def check_config_mode(self, *args, **kwargs): return False
def check_config_mode(self, *args, **kwargs): return False<|docstring|>No config mode on D-Link<|endoftext|>
77a4597659ca8963d5a4e49d28da3733366a44daf2c045862f79b24701e7b965
def config_mode(self, *args, **kwargs): 'No config mode on D-Link' return ''
No config mode on D-Link
netmiko/dlink/dlink_ds.py
config_mode
benmcbenben/netmiko
2
python
def config_mode(self, *args, **kwargs): return
def config_mode(self, *args, **kwargs): return <|docstring|>No config mode on D-Link<|endoftext|>
91f871f2c481b51a097e5a26efdb974dba8ffff89d3a78575b3f3b3dde1ce7de
def exit_config_mode(self, *args, **kwargs): 'No config mode on D-Link' return ''
No config mode on D-Link
netmiko/dlink/dlink_ds.py
exit_config_mode
benmcbenben/netmiko
2
python
def exit_config_mode(self, *args, **kwargs): return
def exit_config_mode(self, *args, **kwargs): return <|docstring|>No config mode on D-Link<|endoftext|>
91bebc59c8d889783919998dde4483ef2ede0cf357ef77006d5d73f6b3b948ae
def save_config(self, cmd='save', confirm=False, confirm_response=''): 'Saves configuration.' return super().save_config(cmd=cmd, confirm=confirm, confirm_response=confirm_response)
Saves configuration.
netmiko/dlink/dlink_ds.py
save_config
benmcbenben/netmiko
2
python
def save_config(self, cmd='save', confirm=False, confirm_response=): return super().save_config(cmd=cmd, confirm=confirm, confirm_response=confirm_response)
def save_config(self, cmd='save', confirm=False, confirm_response=): return super().save_config(cmd=cmd, confirm=confirm, confirm_response=confirm_response)<|docstring|>Saves configuration.<|endoftext|>
70d65a8001f401ba4e27db90670392d7805ad76584d7333369294e916bb90f96
def cleanup(self): 'Return paging before disconnect' self.send_command_timing('enable clipaging') return super().cleanup()
Return paging before disconnect
netmiko/dlink/dlink_ds.py
cleanup
benmcbenben/netmiko
2
python
def cleanup(self): self.send_command_timing('enable clipaging') return super().cleanup()
def cleanup(self): self.send_command_timing('enable clipaging') return super().cleanup()<|docstring|>Return paging before disconnect<|endoftext|>
1bf1e4a9ae1d2818658fb54d36e0f7f3a8783b5a4adb02af03e2dade16e3ab23
def neighbors(adata, n_neighbors=15, key_added=None, **kwargs): '\n Wrapper function for sc.pp.neighbors(), for supporting multiple n_neighbors\n ' if (not isinstance(n_neighbors, (list, tuple))): sc.pp.neighbors(adata, n_neighbors=n_neighbors, key_added=key_added, **kwargs) else: for (i, n_nb) in enumerate(n_neighbors): if (key_added is None): graph_key = f'k{n_nb}' elif (not isinstance(key_added, (list, tuple))): graph_key = f'{key_added}_k{n_nb}' elif (len(key_added) == len(n_neighbors)): graph_key = key_added[i] else: raise ValueError('`key_added` can only be None, a scalar, or an iterable of the same length as `n_neighbors`.') neighbors(adata, n_neighbors=n_nb, key_added=graph_key, **kwargs) return adata
Wrapper function for sc.pp.neighbors(), for supporting multiple n_neighbors
scanpy_scripts/lib/_neighbors.py
neighbors
ksenia007/scanpy-scripts
21
python
def neighbors(adata, n_neighbors=15, key_added=None, **kwargs): '\n \n ' if (not isinstance(n_neighbors, (list, tuple))): sc.pp.neighbors(adata, n_neighbors=n_neighbors, key_added=key_added, **kwargs) else: for (i, n_nb) in enumerate(n_neighbors): if (key_added is None): graph_key = f'k{n_nb}' elif (not isinstance(key_added, (list, tuple))): graph_key = f'{key_added}_k{n_nb}' elif (len(key_added) == len(n_neighbors)): graph_key = key_added[i] else: raise ValueError('`key_added` can only be None, a scalar, or an iterable of the same length as `n_neighbors`.') neighbors(adata, n_neighbors=n_nb, key_added=graph_key, **kwargs) return adata
def neighbors(adata, n_neighbors=15, key_added=None, **kwargs): '\n \n ' if (not isinstance(n_neighbors, (list, tuple))): sc.pp.neighbors(adata, n_neighbors=n_neighbors, key_added=key_added, **kwargs) else: for (i, n_nb) in enumerate(n_neighbors): if (key_added is None): graph_key = f'k{n_nb}' elif (not isinstance(key_added, (list, tuple))): graph_key = f'{key_added}_k{n_nb}' elif (len(key_added) == len(n_neighbors)): graph_key = key_added[i] else: raise ValueError('`key_added` can only be None, a scalar, or an iterable of the same length as `n_neighbors`.') neighbors(adata, n_neighbors=n_nb, key_added=graph_key, **kwargs) return adata<|docstring|>Wrapper function for sc.pp.neighbors(), for supporting multiple n_neighbors<|endoftext|>
0d1f45b02ccd7ebe99d7b0563b1fc5e69312ae3871b3c4c006c5bd3f69b2ee1e
def plot_samples(X_images, img_height, img_width, figsize=(5, 5), transpose=True, shuffle=True): '\n Args:\n X_images: A 2-D ndarray (matrix) each row of which holds the pixels as features\n of one image. The row number will be the number of all input images.\n img_height: The pixel numbers of the input image in height.\n img_width: The pixel numbers of the input image in width.\n figsize: Optional. The size of each small figure.\n transpose: Optional. Whether to transpose the image array. When the image attributes\n come from matlab, it needs to be transposed by default.\n shuffle: Optional. Whether to shuffle the input array.\n ' (img_cnt, feature_cnt) = X_images.shape assert (feature_cnt == (img_height * img_width)) if shuffle: images = np.random.permutation(X_images) else: images = X_images if (img_cnt >= 100): (n_row, n_col, samp_cnt) = (10, 10, 100) elif (img_cnt >= 64): (n_row, n_col, samp_cnt) = (8, 8, 64) else: (n_row, n_col, samp_cnt) = (0, 0, 0) if (img_cnt >= samp_cnt > 0): samps = images[0:samp_cnt] plt.figure(figsize=figsize) gs = gridspec.GridSpec(n_row, n_col, wspace=0.0, hspace=0.0) for i in range(0, n_row): for j in range(0, n_col): ax = plt.subplot(gs[(i, j)]) idx = ((i * n_col) + j) img = samps[idx].reshape(img_height, img_width) if transpose: img = img.T fig = ax.imshow(img, interpolation='nearest') fig.axes.get_xaxis().set_visible(False) fig.axes.get_yaxis().set_visible(False) plt.suptitle('{} out of {} Samples'.format(samp_cnt, img_cnt), size=12, x=0.515, y=0.935) plt.show() else: samps = images n_col = math.ceil(math.sqrt(img_cnt)) n_row = math.ceil((img_cnt / n_col)) fig = plt.figure(figsize=figsize) for i in range(0, img_cnt): ax = fig.add_subplot(n_row, n_col, (i + 1)) if transpose: img = ax.imshow(samps[i].reshape(img_height, img_width).T) else: img = ax.imshow(samps[i].reshape(img_height, img_width)) img.axes.get_xaxis().set_visible(False) img.axes.get_yaxis().set_visible(False) plt.suptitle('All {} Samples'.format(img_cnt), size=12, x=0.518, y=0.935) plt.show()
Args: X_images: A 2-D ndarray (matrix) each row of which holds the pixels as features of one image. The row number will be the number of all input images. img_height: The pixel numbers of the input image in height. img_width: The pixel numbers of the input image in width. figsize: Optional. The size of each small figure. transpose: Optional. Whether to transpose the image array. When the image attributes come from matlab, it needs to be transposed by default. shuffle: Optional. Whether to shuffle the input array.
misc/imgreader.py
plot_samples
bshao001/DmsMsgRcg
19
python
def plot_samples(X_images, img_height, img_width, figsize=(5, 5), transpose=True, shuffle=True): '\n Args:\n X_images: A 2-D ndarray (matrix) each row of which holds the pixels as features\n of one image. The row number will be the number of all input images.\n img_height: The pixel numbers of the input image in height.\n img_width: The pixel numbers of the input image in width.\n figsize: Optional. The size of each small figure.\n transpose: Optional. Whether to transpose the image array. When the image attributes\n come from matlab, it needs to be transposed by default.\n shuffle: Optional. Whether to shuffle the input array.\n ' (img_cnt, feature_cnt) = X_images.shape assert (feature_cnt == (img_height * img_width)) if shuffle: images = np.random.permutation(X_images) else: images = X_images if (img_cnt >= 100): (n_row, n_col, samp_cnt) = (10, 10, 100) elif (img_cnt >= 64): (n_row, n_col, samp_cnt) = (8, 8, 64) else: (n_row, n_col, samp_cnt) = (0, 0, 0) if (img_cnt >= samp_cnt > 0): samps = images[0:samp_cnt] plt.figure(figsize=figsize) gs = gridspec.GridSpec(n_row, n_col, wspace=0.0, hspace=0.0) for i in range(0, n_row): for j in range(0, n_col): ax = plt.subplot(gs[(i, j)]) idx = ((i * n_col) + j) img = samps[idx].reshape(img_height, img_width) if transpose: img = img.T fig = ax.imshow(img, interpolation='nearest') fig.axes.get_xaxis().set_visible(False) fig.axes.get_yaxis().set_visible(False) plt.suptitle('{} out of {} Samples'.format(samp_cnt, img_cnt), size=12, x=0.515, y=0.935) plt.show() else: samps = images n_col = math.ceil(math.sqrt(img_cnt)) n_row = math.ceil((img_cnt / n_col)) fig = plt.figure(figsize=figsize) for i in range(0, img_cnt): ax = fig.add_subplot(n_row, n_col, (i + 1)) if transpose: img = ax.imshow(samps[i].reshape(img_height, img_width).T) else: img = ax.imshow(samps[i].reshape(img_height, img_width)) img.axes.get_xaxis().set_visible(False) img.axes.get_yaxis().set_visible(False) plt.suptitle('All {} Samples'.format(img_cnt), size=12, x=0.518, y=0.935) plt.show()
def plot_samples(X_images, img_height, img_width, figsize=(5, 5), transpose=True, shuffle=True): '\n Args:\n X_images: A 2-D ndarray (matrix) each row of which holds the pixels as features\n of one image. The row number will be the number of all input images.\n img_height: The pixel numbers of the input image in height.\n img_width: The pixel numbers of the input image in width.\n figsize: Optional. The size of each small figure.\n transpose: Optional. Whether to transpose the image array. When the image attributes\n come from matlab, it needs to be transposed by default.\n shuffle: Optional. Whether to shuffle the input array.\n ' (img_cnt, feature_cnt) = X_images.shape assert (feature_cnt == (img_height * img_width)) if shuffle: images = np.random.permutation(X_images) else: images = X_images if (img_cnt >= 100): (n_row, n_col, samp_cnt) = (10, 10, 100) elif (img_cnt >= 64): (n_row, n_col, samp_cnt) = (8, 8, 64) else: (n_row, n_col, samp_cnt) = (0, 0, 0) if (img_cnt >= samp_cnt > 0): samps = images[0:samp_cnt] plt.figure(figsize=figsize) gs = gridspec.GridSpec(n_row, n_col, wspace=0.0, hspace=0.0) for i in range(0, n_row): for j in range(0, n_col): ax = plt.subplot(gs[(i, j)]) idx = ((i * n_col) + j) img = samps[idx].reshape(img_height, img_width) if transpose: img = img.T fig = ax.imshow(img, interpolation='nearest') fig.axes.get_xaxis().set_visible(False) fig.axes.get_yaxis().set_visible(False) plt.suptitle('{} out of {} Samples'.format(samp_cnt, img_cnt), size=12, x=0.515, y=0.935) plt.show() else: samps = images n_col = math.ceil(math.sqrt(img_cnt)) n_row = math.ceil((img_cnt / n_col)) fig = plt.figure(figsize=figsize) for i in range(0, img_cnt): ax = fig.add_subplot(n_row, n_col, (i + 1)) if transpose: img = ax.imshow(samps[i].reshape(img_height, img_width).T) else: img = ax.imshow(samps[i].reshape(img_height, img_width)) img.axes.get_xaxis().set_visible(False) img.axes.get_yaxis().set_visible(False) plt.suptitle('All {} Samples'.format(img_cnt), size=12, x=0.518, y=0.935) plt.show()<|docstring|>Args: X_images: A 2-D ndarray (matrix) each row of which holds the pixels as features of one image. The row number will be the number of all input images. img_height: The pixel numbers of the input image in height. img_width: The pixel numbers of the input image in width. figsize: Optional. The size of each small figure. transpose: Optional. Whether to transpose the image array. When the image attributes come from matlab, it needs to be transposed by default. shuffle: Optional. Whether to shuffle the input array.<|endoftext|>
66d414c2bdc85ec5e0bb73a42e95985efd726897fef5b2a2a417c1971ae07062
def get_features_all_images(self, img_dir, ext_filter=['.jpg', '.png'], stride=5, padding=True, data_augm=False): '\n Output the features extracted from all images in one folder. This method is designed only\n for the trainers.\n Args:\n img_dir: The full path to the images to be feature-extracted.\n ext_filter: Optional. File name filter.\n stride: Optional. The stride of the sliding.\n padding: Optional. Whether to pad the image to fit the feature space size or to\n discard the extra pixels if padding is False.\n data_augm: Optional. Whether to perform data augmentation for the given image. The\n only data augmentation approach applied here is to rotate 20 degree clockwise\n and rotate 20 degree anti-clockwise so that 1 image becomes 3 images.\n Returns:\n A matrix (python list), in which each row contains the features of the sampling sliding\n window, while the number of rows depends on the number of the images in the given folder\n and the image size of the input, and other parameters.\n ' all_features = [] for img_file in os.listdir(img_dir): full_path_name = os.path.join(img_dir, img_file) if (os.path.isfile(full_path_name) and img_file.lower().endswith(tuple(ext_filter))): img = cv2.imread(full_path_name) img_arr = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) (_, features) = self.get_image_array_features(img_arr, stride, padding) if (len(features) > 0): all_features.extend(features) if data_augm: (rows, cols) = img_arr.shape mat1 = cv2.getRotationMatrix2D(((cols / 2), (rows / 2)), (- 25), 1) left_arr = cv2.warpAffine(img_arr, mat1, (cols, rows)) (_, left_feats) = self.get_image_array_features(left_arr, stride, padding) if (len(left_feats) > 0): all_features.extend(left_feats) mat2 = cv2.getRotationMatrix2D(((cols / 2), (rows / 2)), 25, 1) right_arr = cv2.warpAffine(img_arr, mat2, (cols, rows)) (_, right_feats) = self.get_image_array_features(right_arr, stride, padding) if (len(right_feats) > 0): all_features.extend(right_feats) return all_features
Output the features extracted from all images in one folder. This method is designed only for the trainers. Args: img_dir: The full path to the images to be feature-extracted. ext_filter: Optional. File name filter. stride: Optional. The stride of the sliding. padding: Optional. Whether to pad the image to fit the feature space size or to discard the extra pixels if padding is False. data_augm: Optional. Whether to perform data augmentation for the given image. The only data augmentation approach applied here is to rotate 20 degree clockwise and rotate 20 degree anti-clockwise so that 1 image becomes 3 images. Returns: A matrix (python list), in which each row contains the features of the sampling sliding window, while the number of rows depends on the number of the images in the given folder and the image size of the input, and other parameters.
misc/imgreader.py
get_features_all_images
bshao001/DmsMsgRcg
19
python
def get_features_all_images(self, img_dir, ext_filter=['.jpg', '.png'], stride=5, padding=True, data_augm=False): '\n Output the features extracted from all images in one folder. This method is designed only\n for the trainers.\n Args:\n img_dir: The full path to the images to be feature-extracted.\n ext_filter: Optional. File name filter.\n stride: Optional. The stride of the sliding.\n padding: Optional. Whether to pad the image to fit the feature space size or to\n discard the extra pixels if padding is False.\n data_augm: Optional. Whether to perform data augmentation for the given image. The\n only data augmentation approach applied here is to rotate 20 degree clockwise\n and rotate 20 degree anti-clockwise so that 1 image becomes 3 images.\n Returns:\n A matrix (python list), in which each row contains the features of the sampling sliding\n window, while the number of rows depends on the number of the images in the given folder\n and the image size of the input, and other parameters.\n ' all_features = [] for img_file in os.listdir(img_dir): full_path_name = os.path.join(img_dir, img_file) if (os.path.isfile(full_path_name) and img_file.lower().endswith(tuple(ext_filter))): img = cv2.imread(full_path_name) img_arr = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) (_, features) = self.get_image_array_features(img_arr, stride, padding) if (len(features) > 0): all_features.extend(features) if data_augm: (rows, cols) = img_arr.shape mat1 = cv2.getRotationMatrix2D(((cols / 2), (rows / 2)), (- 25), 1) left_arr = cv2.warpAffine(img_arr, mat1, (cols, rows)) (_, left_feats) = self.get_image_array_features(left_arr, stride, padding) if (len(left_feats) > 0): all_features.extend(left_feats) mat2 = cv2.getRotationMatrix2D(((cols / 2), (rows / 2)), 25, 1) right_arr = cv2.warpAffine(img_arr, mat2, (cols, rows)) (_, right_feats) = self.get_image_array_features(right_arr, stride, padding) if (len(right_feats) > 0): all_features.extend(right_feats) return all_features
def get_features_all_images(self, img_dir, ext_filter=['.jpg', '.png'], stride=5, padding=True, data_augm=False): '\n Output the features extracted from all images in one folder. This method is designed only\n for the trainers.\n Args:\n img_dir: The full path to the images to be feature-extracted.\n ext_filter: Optional. File name filter.\n stride: Optional. The stride of the sliding.\n padding: Optional. Whether to pad the image to fit the feature space size or to\n discard the extra pixels if padding is False.\n data_augm: Optional. Whether to perform data augmentation for the given image. The\n only data augmentation approach applied here is to rotate 20 degree clockwise\n and rotate 20 degree anti-clockwise so that 1 image becomes 3 images.\n Returns:\n A matrix (python list), in which each row contains the features of the sampling sliding\n window, while the number of rows depends on the number of the images in the given folder\n and the image size of the input, and other parameters.\n ' all_features = [] for img_file in os.listdir(img_dir): full_path_name = os.path.join(img_dir, img_file) if (os.path.isfile(full_path_name) and img_file.lower().endswith(tuple(ext_filter))): img = cv2.imread(full_path_name) img_arr = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) (_, features) = self.get_image_array_features(img_arr, stride, padding) if (len(features) > 0): all_features.extend(features) if data_augm: (rows, cols) = img_arr.shape mat1 = cv2.getRotationMatrix2D(((cols / 2), (rows / 2)), (- 25), 1) left_arr = cv2.warpAffine(img_arr, mat1, (cols, rows)) (_, left_feats) = self.get_image_array_features(left_arr, stride, padding) if (len(left_feats) > 0): all_features.extend(left_feats) mat2 = cv2.getRotationMatrix2D(((cols / 2), (rows / 2)), 25, 1) right_arr = cv2.warpAffine(img_arr, mat2, (cols, rows)) (_, right_feats) = self.get_image_array_features(right_arr, stride, padding) if (len(right_feats) > 0): all_features.extend(right_feats) return all_features<|docstring|>Output the features extracted from all images in one folder. This method is designed only for the trainers. Args: img_dir: The full path to the images to be feature-extracted. ext_filter: Optional. File name filter. stride: Optional. The stride of the sliding. padding: Optional. Whether to pad the image to fit the feature space size or to discard the extra pixels if padding is False. data_augm: Optional. Whether to perform data augmentation for the given image. The only data augmentation approach applied here is to rotate 20 degree clockwise and rotate 20 degree anti-clockwise so that 1 image becomes 3 images. Returns: A matrix (python list), in which each row contains the features of the sampling sliding window, while the number of rows depends on the number of the images in the given folder and the image size of the input, and other parameters.<|endoftext|>
26008085a73bc8fd3209f5f010686830faf426c68fad7ce3df1f052ad411a825
def get_image_features(self, img_file, stride=5, padding=True): '\n Take an image file as input, and output an array of image features whose matrix size is\n based on the image size. When no padding, and the image size is smaller than the required\n feature space size (in x or y direction), the image is not checked, and this method will\n return a tuple of two empty lists; When padding is True, and the image size is more than\n 4 pixels smaller than the require feature space size (in x or y direction), the image is\n not checked either. This method can be used by both the trainer and predictor.\n Args:\n img_file: The file name of the image.\n stride: Optional. The stride of the sliding.\n padding: Optional. Whether to pad the image to fit the feature space size or to\n discard the extra pixels if padding is False.\n Returns:\n coordinates: A list of coordinates, each of which contains y and x that are the top\n left corner offsets of the sliding window.\n features: A matrix (python list), in which each row contains the features of the\n sampling sliding window, while the number of rows depends on the image size of\n the input.\n ' img = cv2.imread(img_file) img_arr = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) return self.get_image_array_features(img_arr, stride, padding)
Take an image file as input, and output an array of image features whose matrix size is based on the image size. When no padding, and the image size is smaller than the required feature space size (in x or y direction), the image is not checked, and this method will return a tuple of two empty lists; When padding is True, and the image size is more than 4 pixels smaller than the require feature space size (in x or y direction), the image is not checked either. This method can be used by both the trainer and predictor. Args: img_file: The file name of the image. stride: Optional. The stride of the sliding. padding: Optional. Whether to pad the image to fit the feature space size or to discard the extra pixels if padding is False. Returns: coordinates: A list of coordinates, each of which contains y and x that are the top left corner offsets of the sliding window. features: A matrix (python list), in which each row contains the features of the sampling sliding window, while the number of rows depends on the image size of the input.
misc/imgreader.py
get_image_features
bshao001/DmsMsgRcg
19
python
def get_image_features(self, img_file, stride=5, padding=True): '\n Take an image file as input, and output an array of image features whose matrix size is\n based on the image size. When no padding, and the image size is smaller than the required\n feature space size (in x or y direction), the image is not checked, and this method will\n return a tuple of two empty lists; When padding is True, and the image size is more than\n 4 pixels smaller than the require feature space size (in x or y direction), the image is\n not checked either. This method can be used by both the trainer and predictor.\n Args:\n img_file: The file name of the image.\n stride: Optional. The stride of the sliding.\n padding: Optional. Whether to pad the image to fit the feature space size or to\n discard the extra pixels if padding is False.\n Returns:\n coordinates: A list of coordinates, each of which contains y and x that are the top\n left corner offsets of the sliding window.\n features: A matrix (python list), in which each row contains the features of the\n sampling sliding window, while the number of rows depends on the image size of\n the input.\n ' img = cv2.imread(img_file) img_arr = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) return self.get_image_array_features(img_arr, stride, padding)
def get_image_features(self, img_file, stride=5, padding=True): '\n Take an image file as input, and output an array of image features whose matrix size is\n based on the image size. When no padding, and the image size is smaller than the required\n feature space size (in x or y direction), the image is not checked, and this method will\n return a tuple of two empty lists; When padding is True, and the image size is more than\n 4 pixels smaller than the require feature space size (in x or y direction), the image is\n not checked either. This method can be used by both the trainer and predictor.\n Args:\n img_file: The file name of the image.\n stride: Optional. The stride of the sliding.\n padding: Optional. Whether to pad the image to fit the feature space size or to\n discard the extra pixels if padding is False.\n Returns:\n coordinates: A list of coordinates, each of which contains y and x that are the top\n left corner offsets of the sliding window.\n features: A matrix (python list), in which each row contains the features of the\n sampling sliding window, while the number of rows depends on the image size of\n the input.\n ' img = cv2.imread(img_file) img_arr = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) return self.get_image_array_features(img_arr, stride, padding)<|docstring|>Take an image file as input, and output an array of image features whose matrix size is based on the image size. When no padding, and the image size is smaller than the required feature space size (in x or y direction), the image is not checked, and this method will return a tuple of two empty lists; When padding is True, and the image size is more than 4 pixels smaller than the require feature space size (in x or y direction), the image is not checked either. This method can be used by both the trainer and predictor. Args: img_file: The file name of the image. stride: Optional. The stride of the sliding. padding: Optional. Whether to pad the image to fit the feature space size or to discard the extra pixels if padding is False. Returns: coordinates: A list of coordinates, each of which contains y and x that are the top left corner offsets of the sliding window. features: A matrix (python list), in which each row contains the features of the sampling sliding window, while the number of rows depends on the image size of the input.<|endoftext|>
3013104b6a5a2bbb44f32070f29af9faf57c4683ff69edd926b471359c7f98c6
def get_image_array_features(self, img_arr, stride=5, padding=True): '\n Take an image file as input, and output an array of image features whose matrix size is\n based on the image size. When no padding, and the image size is smaller than the required\n feature space size (in x or y direction), the image is not checked, and this method will\n return a tuple of two empty lists; When padding is True, and the image size is more than\n 4 pixels smaller than the require feature space size (in x or y direction), the image is\n not checked either. This method can be used by both the trainer and predictor.\n Note that when stride is greater than 5, padding is not supported, and it will be reset\n to False regardless of the input.\n Args:\n img_arr: The image array (a numpy ndarray) read from the image file. It has already\n been changed to gray scale.\n stride: Optional. The stride of the sliding.\n padding: Optional. Whether to pad the image to fit the feature space size or to\n discard the extra pixels if padding is False.\n Returns:\n coordinates: A list of coordinates, each of which contains y and x that are the top\n left corner offsets of the sliding window.\n features: A matrix (python list), in which each row contains the features of the\n sampling sliding window, while the number of rows depends on the image size of\n the input.\n ' assert (stride >= 1) if (stride > 5): padding = False (coordinates, features) = ([], []) (img_height, img_width) = img_arr.shape (padding_top, padding_left) = (0, 0) if (not padding): if ((img_height < self.feature_height) or (img_width < self.feature_width)): print('Image with size: {}x{} is too small. Ignored in when no padding.'.format(img_width, img_height)) return (coordinates, features) else: if (((img_height + 4) < self.feature_height) or ((img_width + 4) < self.feature_width)): print('Image with size: {}x{} is too small. Ignored in padding mode.'.format(img_width, img_height)) return (coordinates, features) if (img_height > self.feature_height): extra_y = ((img_height - self.feature_height) % stride) if (extra_y > 0): padding_y = (stride - extra_y) else: padding_y = 0 elif (img_height < self.feature_height): padding_y = (self.feature_height - img_height) else: padding_y = 0 if (img_width > self.feature_width): extra_x = ((img_width - self.feature_width) % stride) if (extra_x > 0): padding_x = (stride - extra_x) else: padding_x = 0 elif (img_width < self.feature_width): padding_x = (self.feature_width - img_width) else: padding_x = 0 if ((padding_y > 0) or (padding_x > 0)): padding_top = math.floor((padding_y / 2)) padding_left = math.floor((padding_x / 2)) (new_y, new_x) = ((img_height + padding_y), (img_width + padding_x)) new_img = np.zeros((new_y, new_x)) new_img[(padding_top:(padding_top + img_height), padding_left:(padding_left + img_width))] = img_arr img_arr = new_img (img_height, img_width) = img_arr.shape for y in range(0, ((img_height - self.feature_height) + 1), stride): for x in range(0, ((img_width - self.feature_width) + 1), stride): orig_x = (x - padding_left) orig_y = (y - padding_top) coordinates.append((orig_y, orig_x)) this_win = img_arr[(y:(y + self.feature_height), x:(x + self.feature_width))] features.append(this_win.reshape((- 1))) return (coordinates, features)
Take an image file as input, and output an array of image features whose matrix size is based on the image size. When no padding, and the image size is smaller than the required feature space size (in x or y direction), the image is not checked, and this method will return a tuple of two empty lists; When padding is True, and the image size is more than 4 pixels smaller than the require feature space size (in x or y direction), the image is not checked either. This method can be used by both the trainer and predictor. Note that when stride is greater than 5, padding is not supported, and it will be reset to False regardless of the input. Args: img_arr: The image array (a numpy ndarray) read from the image file. It has already been changed to gray scale. stride: Optional. The stride of the sliding. padding: Optional. Whether to pad the image to fit the feature space size or to discard the extra pixels if padding is False. Returns: coordinates: A list of coordinates, each of which contains y and x that are the top left corner offsets of the sliding window. features: A matrix (python list), in which each row contains the features of the sampling sliding window, while the number of rows depends on the image size of the input.
misc/imgreader.py
get_image_array_features
bshao001/DmsMsgRcg
19
python
def get_image_array_features(self, img_arr, stride=5, padding=True): '\n Take an image file as input, and output an array of image features whose matrix size is\n based on the image size. When no padding, and the image size is smaller than the required\n feature space size (in x or y direction), the image is not checked, and this method will\n return a tuple of two empty lists; When padding is True, and the image size is more than\n 4 pixels smaller than the require feature space size (in x or y direction), the image is\n not checked either. This method can be used by both the trainer and predictor.\n Note that when stride is greater than 5, padding is not supported, and it will be reset\n to False regardless of the input.\n Args:\n img_arr: The image array (a numpy ndarray) read from the image file. It has already\n been changed to gray scale.\n stride: Optional. The stride of the sliding.\n padding: Optional. Whether to pad the image to fit the feature space size or to\n discard the extra pixels if padding is False.\n Returns:\n coordinates: A list of coordinates, each of which contains y and x that are the top\n left corner offsets of the sliding window.\n features: A matrix (python list), in which each row contains the features of the\n sampling sliding window, while the number of rows depends on the image size of\n the input.\n ' assert (stride >= 1) if (stride > 5): padding = False (coordinates, features) = ([], []) (img_height, img_width) = img_arr.shape (padding_top, padding_left) = (0, 0) if (not padding): if ((img_height < self.feature_height) or (img_width < self.feature_width)): print('Image with size: {}x{} is too small. Ignored in when no padding.'.format(img_width, img_height)) return (coordinates, features) else: if (((img_height + 4) < self.feature_height) or ((img_width + 4) < self.feature_width)): print('Image with size: {}x{} is too small. Ignored in padding mode.'.format(img_width, img_height)) return (coordinates, features) if (img_height > self.feature_height): extra_y = ((img_height - self.feature_height) % stride) if (extra_y > 0): padding_y = (stride - extra_y) else: padding_y = 0 elif (img_height < self.feature_height): padding_y = (self.feature_height - img_height) else: padding_y = 0 if (img_width > self.feature_width): extra_x = ((img_width - self.feature_width) % stride) if (extra_x > 0): padding_x = (stride - extra_x) else: padding_x = 0 elif (img_width < self.feature_width): padding_x = (self.feature_width - img_width) else: padding_x = 0 if ((padding_y > 0) or (padding_x > 0)): padding_top = math.floor((padding_y / 2)) padding_left = math.floor((padding_x / 2)) (new_y, new_x) = ((img_height + padding_y), (img_width + padding_x)) new_img = np.zeros((new_y, new_x)) new_img[(padding_top:(padding_top + img_height), padding_left:(padding_left + img_width))] = img_arr img_arr = new_img (img_height, img_width) = img_arr.shape for y in range(0, ((img_height - self.feature_height) + 1), stride): for x in range(0, ((img_width - self.feature_width) + 1), stride): orig_x = (x - padding_left) orig_y = (y - padding_top) coordinates.append((orig_y, orig_x)) this_win = img_arr[(y:(y + self.feature_height), x:(x + self.feature_width))] features.append(this_win.reshape((- 1))) return (coordinates, features)
def get_image_array_features(self, img_arr, stride=5, padding=True): '\n Take an image file as input, and output an array of image features whose matrix size is\n based on the image size. When no padding, and the image size is smaller than the required\n feature space size (in x or y direction), the image is not checked, and this method will\n return a tuple of two empty lists; When padding is True, and the image size is more than\n 4 pixels smaller than the require feature space size (in x or y direction), the image is\n not checked either. This method can be used by both the trainer and predictor.\n Note that when stride is greater than 5, padding is not supported, and it will be reset\n to False regardless of the input.\n Args:\n img_arr: The image array (a numpy ndarray) read from the image file. It has already\n been changed to gray scale.\n stride: Optional. The stride of the sliding.\n padding: Optional. Whether to pad the image to fit the feature space size or to\n discard the extra pixels if padding is False.\n Returns:\n coordinates: A list of coordinates, each of which contains y and x that are the top\n left corner offsets of the sliding window.\n features: A matrix (python list), in which each row contains the features of the\n sampling sliding window, while the number of rows depends on the image size of\n the input.\n ' assert (stride >= 1) if (stride > 5): padding = False (coordinates, features) = ([], []) (img_height, img_width) = img_arr.shape (padding_top, padding_left) = (0, 0) if (not padding): if ((img_height < self.feature_height) or (img_width < self.feature_width)): print('Image with size: {}x{} is too small. Ignored in when no padding.'.format(img_width, img_height)) return (coordinates, features) else: if (((img_height + 4) < self.feature_height) or ((img_width + 4) < self.feature_width)): print('Image with size: {}x{} is too small. Ignored in padding mode.'.format(img_width, img_height)) return (coordinates, features) if (img_height > self.feature_height): extra_y = ((img_height - self.feature_height) % stride) if (extra_y > 0): padding_y = (stride - extra_y) else: padding_y = 0 elif (img_height < self.feature_height): padding_y = (self.feature_height - img_height) else: padding_y = 0 if (img_width > self.feature_width): extra_x = ((img_width - self.feature_width) % stride) if (extra_x > 0): padding_x = (stride - extra_x) else: padding_x = 0 elif (img_width < self.feature_width): padding_x = (self.feature_width - img_width) else: padding_x = 0 if ((padding_y > 0) or (padding_x > 0)): padding_top = math.floor((padding_y / 2)) padding_left = math.floor((padding_x / 2)) (new_y, new_x) = ((img_height + padding_y), (img_width + padding_x)) new_img = np.zeros((new_y, new_x)) new_img[(padding_top:(padding_top + img_height), padding_left:(padding_left + img_width))] = img_arr img_arr = new_img (img_height, img_width) = img_arr.shape for y in range(0, ((img_height - self.feature_height) + 1), stride): for x in range(0, ((img_width - self.feature_width) + 1), stride): orig_x = (x - padding_left) orig_y = (y - padding_top) coordinates.append((orig_y, orig_x)) this_win = img_arr[(y:(y + self.feature_height), x:(x + self.feature_width))] features.append(this_win.reshape((- 1))) return (coordinates, features)<|docstring|>Take an image file as input, and output an array of image features whose matrix size is based on the image size. When no padding, and the image size is smaller than the required feature space size (in x or y direction), the image is not checked, and this method will return a tuple of two empty lists; When padding is True, and the image size is more than 4 pixels smaller than the require feature space size (in x or y direction), the image is not checked either. This method can be used by both the trainer and predictor. Note that when stride is greater than 5, padding is not supported, and it will be reset to False regardless of the input. Args: img_arr: The image array (a numpy ndarray) read from the image file. It has already been changed to gray scale. stride: Optional. The stride of the sliding. padding: Optional. Whether to pad the image to fit the feature space size or to discard the extra pixels if padding is False. Returns: coordinates: A list of coordinates, each of which contains y and x that are the top left corner offsets of the sliding window. features: A matrix (python list), in which each row contains the features of the sampling sliding window, while the number of rows depends on the image size of the input.<|endoftext|>
24b18eb0b6c624c820d1b3777f2fd9c393bdf870c82a086f517ce793d0bdd357
def shuffle_data(data, labels): ' Shuffle data and labels.\n Input:\n data: B,N,... numpy array\n label: B,... numpy array\n Return:\n shuffled data, label and shuffle indices\n ' idx = np.arange(len(labels)) np.random.shuffle(idx) return (data[(idx, ...)], labels[idx], idx)
Shuffle data and labels. Input: data: B,N,... numpy array label: B,... numpy array Return: shuffled data, label and shuffle indices
torchpcp/datasets/S3DIS/utils/provider.py
shuffle_data
Obarads/torch_point_cloud
1
python
def shuffle_data(data, labels): ' Shuffle data and labels.\n Input:\n data: B,N,... numpy array\n label: B,... numpy array\n Return:\n shuffled data, label and shuffle indices\n ' idx = np.arange(len(labels)) np.random.shuffle(idx) return (data[(idx, ...)], labels[idx], idx)
def shuffle_data(data, labels): ' Shuffle data and labels.\n Input:\n data: B,N,... numpy array\n label: B,... numpy array\n Return:\n shuffled data, label and shuffle indices\n ' idx = np.arange(len(labels)) np.random.shuffle(idx) return (data[(idx, ...)], labels[idx], idx)<|docstring|>Shuffle data and labels. Input: data: B,N,... numpy array label: B,... numpy array Return: shuffled data, label and shuffle indices<|endoftext|>
cc4e9e89a324ac40add425d25b2b61c623d1408d98a8472d1a32dbdb6966ec46
def rotate_point_cloud_ndim(batch_xyzc, degree=None): ' Randomly rotate the point clouds to augument the dataset\n rotation is per shape based along up direction\n Input:\n BxNxC array, original batch of point clouds\n Return:\n BxNxC array, rotated batch of point clouds\n ' batch_data = batch_xyzc[(:, :, :3)] rotated_data = np.zeros(batch_data.shape, dtype=np.float32) for k in range(batch_data.shape[0]): if (degree is not None): rotation_angle = ((degree / 180) * np.pi) else: rotation_angle = ((np.random.uniform() * 2) * np.pi) cosval = np.cos(rotation_angle) sinval = np.sin(rotation_angle) rotation_matrix = np.array([[cosval, (- sinval), 0], [sinval, cosval, 0], [0, 0, 1]]) shape_pc = batch_data[(k, ...)] rotated_data[(k, ...)] = np.dot(shape_pc.reshape(((- 1), 3)), rotation_matrix) res = np.concatenate([rotated_data[(:, :, :3)], batch_xyzc[(:, :, 3:)]], axis=(- 1)) return res
Randomly rotate the point clouds to augument the dataset rotation is per shape based along up direction Input: BxNxC array, original batch of point clouds Return: BxNxC array, rotated batch of point clouds
torchpcp/datasets/S3DIS/utils/provider.py
rotate_point_cloud_ndim
Obarads/torch_point_cloud
1
python
def rotate_point_cloud_ndim(batch_xyzc, degree=None): ' Randomly rotate the point clouds to augument the dataset\n rotation is per shape based along up direction\n Input:\n BxNxC array, original batch of point clouds\n Return:\n BxNxC array, rotated batch of point clouds\n ' batch_data = batch_xyzc[(:, :, :3)] rotated_data = np.zeros(batch_data.shape, dtype=np.float32) for k in range(batch_data.shape[0]): if (degree is not None): rotation_angle = ((degree / 180) * np.pi) else: rotation_angle = ((np.random.uniform() * 2) * np.pi) cosval = np.cos(rotation_angle) sinval = np.sin(rotation_angle) rotation_matrix = np.array([[cosval, (- sinval), 0], [sinval, cosval, 0], [0, 0, 1]]) shape_pc = batch_data[(k, ...)] rotated_data[(k, ...)] = np.dot(shape_pc.reshape(((- 1), 3)), rotation_matrix) res = np.concatenate([rotated_data[(:, :, :3)], batch_xyzc[(:, :, 3:)]], axis=(- 1)) return res
def rotate_point_cloud_ndim(batch_xyzc, degree=None): ' Randomly rotate the point clouds to augument the dataset\n rotation is per shape based along up direction\n Input:\n BxNxC array, original batch of point clouds\n Return:\n BxNxC array, rotated batch of point clouds\n ' batch_data = batch_xyzc[(:, :, :3)] rotated_data = np.zeros(batch_data.shape, dtype=np.float32) for k in range(batch_data.shape[0]): if (degree is not None): rotation_angle = ((degree / 180) * np.pi) else: rotation_angle = ((np.random.uniform() * 2) * np.pi) cosval = np.cos(rotation_angle) sinval = np.sin(rotation_angle) rotation_matrix = np.array([[cosval, (- sinval), 0], [sinval, cosval, 0], [0, 0, 1]]) shape_pc = batch_data[(k, ...)] rotated_data[(k, ...)] = np.dot(shape_pc.reshape(((- 1), 3)), rotation_matrix) res = np.concatenate([rotated_data[(:, :, :3)], batch_xyzc[(:, :, 3:)]], axis=(- 1)) return res<|docstring|>Randomly rotate the point clouds to augument the dataset rotation is per shape based along up direction Input: BxNxC array, original batch of point clouds Return: BxNxC array, rotated batch of point clouds<|endoftext|>
bc3a9d784919c08a53ebbd0fa3cf512e012cb65e021f05d4c8266f706d0e126e
def rotate_point_cloud(batch_data): ' Randomly rotate the point clouds to augument the dataset\n rotation is per shape based along up direction\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, rotated batch of point clouds\n ' rotated_data = np.zeros(batch_data.shape, dtype=np.float32) for k in range(batch_data.shape[0]): rotation_angle = ((np.random.uniform() * 2) * np.pi) cosval = np.cos(rotation_angle) sinval = np.sin(rotation_angle) rotation_matrix = np.array([[cosval, 0, sinval], [0, 1, 0], [(- sinval), 0, cosval]]) shape_pc = batch_data[(k, ...)] rotated_data[(k, ...)] = np.dot(shape_pc.reshape(((- 1), 3)), rotation_matrix) return rotated_data
Randomly rotate the point clouds to augument the dataset rotation is per shape based along up direction Input: BxNx3 array, original batch of point clouds Return: BxNx3 array, rotated batch of point clouds
torchpcp/datasets/S3DIS/utils/provider.py
rotate_point_cloud
Obarads/torch_point_cloud
1
python
def rotate_point_cloud(batch_data): ' Randomly rotate the point clouds to augument the dataset\n rotation is per shape based along up direction\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, rotated batch of point clouds\n ' rotated_data = np.zeros(batch_data.shape, dtype=np.float32) for k in range(batch_data.shape[0]): rotation_angle = ((np.random.uniform() * 2) * np.pi) cosval = np.cos(rotation_angle) sinval = np.sin(rotation_angle) rotation_matrix = np.array([[cosval, 0, sinval], [0, 1, 0], [(- sinval), 0, cosval]]) shape_pc = batch_data[(k, ...)] rotated_data[(k, ...)] = np.dot(shape_pc.reshape(((- 1), 3)), rotation_matrix) return rotated_data
def rotate_point_cloud(batch_data): ' Randomly rotate the point clouds to augument the dataset\n rotation is per shape based along up direction\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, rotated batch of point clouds\n ' rotated_data = np.zeros(batch_data.shape, dtype=np.float32) for k in range(batch_data.shape[0]): rotation_angle = ((np.random.uniform() * 2) * np.pi) cosval = np.cos(rotation_angle) sinval = np.sin(rotation_angle) rotation_matrix = np.array([[cosval, 0, sinval], [0, 1, 0], [(- sinval), 0, cosval]]) shape_pc = batch_data[(k, ...)] rotated_data[(k, ...)] = np.dot(shape_pc.reshape(((- 1), 3)), rotation_matrix) return rotated_data<|docstring|>Randomly rotate the point clouds to augument the dataset rotation is per shape based along up direction Input: BxNx3 array, original batch of point clouds Return: BxNx3 array, rotated batch of point clouds<|endoftext|>
260a163552aa67b0009b0a0d44379fd73cf6dbee4faef3a6af339fefac03187c
def rotate_point_cloud_by_angle(batch_data, rotation_angle): ' Rotate the point cloud along up direction with certain angle.\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, rotated batch of point clouds\n ' rotated_data = np.zeros(batch_data.shape, dtype=np.float32) for k in range(batch_data.shape[0]): cosval = np.cos(rotation_angle) sinval = np.sin(rotation_angle) rotation_matrix = np.array([[cosval, 0, sinval], [0, 1, 0], [(- sinval), 0, cosval]]) shape_pc = batch_data[(k, ...)] rotated_data[(k, ...)] = np.dot(shape_pc.reshape(((- 1), 3)), rotation_matrix) return rotated_data
Rotate the point cloud along up direction with certain angle. Input: BxNx3 array, original batch of point clouds Return: BxNx3 array, rotated batch of point clouds
torchpcp/datasets/S3DIS/utils/provider.py
rotate_point_cloud_by_angle
Obarads/torch_point_cloud
1
python
def rotate_point_cloud_by_angle(batch_data, rotation_angle): ' Rotate the point cloud along up direction with certain angle.\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, rotated batch of point clouds\n ' rotated_data = np.zeros(batch_data.shape, dtype=np.float32) for k in range(batch_data.shape[0]): cosval = np.cos(rotation_angle) sinval = np.sin(rotation_angle) rotation_matrix = np.array([[cosval, 0, sinval], [0, 1, 0], [(- sinval), 0, cosval]]) shape_pc = batch_data[(k, ...)] rotated_data[(k, ...)] = np.dot(shape_pc.reshape(((- 1), 3)), rotation_matrix) return rotated_data
def rotate_point_cloud_by_angle(batch_data, rotation_angle): ' Rotate the point cloud along up direction with certain angle.\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, rotated batch of point clouds\n ' rotated_data = np.zeros(batch_data.shape, dtype=np.float32) for k in range(batch_data.shape[0]): cosval = np.cos(rotation_angle) sinval = np.sin(rotation_angle) rotation_matrix = np.array([[cosval, 0, sinval], [0, 1, 0], [(- sinval), 0, cosval]]) shape_pc = batch_data[(k, ...)] rotated_data[(k, ...)] = np.dot(shape_pc.reshape(((- 1), 3)), rotation_matrix) return rotated_data<|docstring|>Rotate the point cloud along up direction with certain angle. Input: BxNx3 array, original batch of point clouds Return: BxNx3 array, rotated batch of point clouds<|endoftext|>