body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
798b1475d8aa777a56d55c73fd7a6a0f64e5ca402420a7ff3213f09a5bf3f075 | @staticmethod
def route(database):
'Route handler with database parameter.'
return dict(ok=True) | Route handler with database parameter. | components/server/tests/external/routes/plugins/test_route_auth_plugin.py | route | ICTU/quality-time | 33 | python | @staticmethod
def route(database):
return dict(ok=True) | @staticmethod
def route(database):
return dict(ok=True)<|docstring|>Route handler with database parameter.<|endoftext|> |
f8d8b5cd45b6223acfc56e816e61ee7acf3aad1d7c2a2f722693c95d909261b8 | def test_route_without_specified_auth(self):
'Test that the auth plugin will crash.'
route = bottle.Route(bottle.app(), '/', 'POST', self.route)
with self.assertRaises(AttributeError):
route.call() | Test that the auth plugin will crash. | components/server/tests/external/routes/plugins/test_route_auth_plugin.py | test_route_without_specified_auth | ICTU/quality-time | 33 | python | def test_route_without_specified_auth(self):
route = bottle.Route(bottle.app(), '/', 'POST', self.route)
with self.assertRaises(AttributeError):
route.call() | def test_route_without_specified_auth(self):
route = bottle.Route(bottle.app(), '/', 'POST', self.route)
with self.assertRaises(AttributeError):
route.call()<|docstring|>Test that the auth plugin will crash.<|endoftext|> |
f6461fb802e6cd7c162c2e410038e8c7ef08d0289bdb4f18aa5b3bebb13b96b4 | def test_valid_session(self):
'Test that session ids are authenticated.'
self.database.sessions.find_one.return_value = dict(session_expiration_datetime=datetime.max.replace(tzinfo=timezone.utc))
route = bottle.Route(bottle.app(), '/', 'POST', self.route, authentication_required=True)
self.assertEqual(self.success, route.call()) | Test that session ids are authenticated. | components/server/tests/external/routes/plugins/test_route_auth_plugin.py | test_valid_session | ICTU/quality-time | 33 | python | def test_valid_session(self):
self.database.sessions.find_one.return_value = dict(session_expiration_datetime=datetime.max.replace(tzinfo=timezone.utc))
route = bottle.Route(bottle.app(), '/', 'POST', self.route, authentication_required=True)
self.assertEqual(self.success, route.call()) | def test_valid_session(self):
self.database.sessions.find_one.return_value = dict(session_expiration_datetime=datetime.max.replace(tzinfo=timezone.utc))
route = bottle.Route(bottle.app(), '/', 'POST', self.route, authentication_required=True)
self.assertEqual(self.success, route.call())<|docstring|>Test that session ids are authenticated.<|endoftext|> |
b9e2a5c197638fb1237dbfa8db580b3a26ea2865f575994ed57c105e597b38f8 | def test_expired_session(self):
"Test that the session is invalid when it's expired."
self.database.sessions.find_one.return_value = dict(session_expiration_datetime=datetime.min.replace(tzinfo=timezone.utc))
route = bottle.Route(bottle.app(), '/', 'POST', self.route, authentication_required=True)
self.assertEqual(401, route.call().status_code) | Test that the session is invalid when it's expired. | components/server/tests/external/routes/plugins/test_route_auth_plugin.py | test_expired_session | ICTU/quality-time | 33 | python | def test_expired_session(self):
self.database.sessions.find_one.return_value = dict(session_expiration_datetime=datetime.min.replace(tzinfo=timezone.utc))
route = bottle.Route(bottle.app(), '/', 'POST', self.route, authentication_required=True)
self.assertEqual(401, route.call().status_code) | def test_expired_session(self):
self.database.sessions.find_one.return_value = dict(session_expiration_datetime=datetime.min.replace(tzinfo=timezone.utc))
route = bottle.Route(bottle.app(), '/', 'POST', self.route, authentication_required=True)
self.assertEqual(401, route.call().status_code)<|docstring|>Test that the session is invalid when it's expired.<|endoftext|> |
aba10f7f15fa7020985826a646055b65afa8d2b2cb50f2e1104a30e4a8676d19 | def test_missing_session(self):
"Test that the session is invalid when it's missing."
route = bottle.Route(bottle.app(), '/', 'POST', self.route, authentication_required=True)
self.assertEqual(401, route.call().status_code) | Test that the session is invalid when it's missing. | components/server/tests/external/routes/plugins/test_route_auth_plugin.py | test_missing_session | ICTU/quality-time | 33 | python | def test_missing_session(self):
route = bottle.Route(bottle.app(), '/', 'POST', self.route, authentication_required=True)
self.assertEqual(401, route.call().status_code) | def test_missing_session(self):
route = bottle.Route(bottle.app(), '/', 'POST', self.route, authentication_required=True)
self.assertEqual(401, route.call().status_code)<|docstring|>Test that the session is invalid when it's missing.<|endoftext|> |
432872f0e79a2c0151403a45f214b17708bb363c2451efe7bf12a57e372a0de2 | def test_unauthorized_session(self):
'Test that an unauthorized user cannot post.'
self.database.reports_overviews.find_one.return_value = dict(_id='id', permissions={EDIT_REPORT_PERMISSION: ['jodoe']})
self.database.sessions.find_one.return_value = self.session
route = bottle.Route(bottle.app(), '/', 'POST', self.route, permissions_required=[EDIT_REPORT_PERMISSION])
self.assertEqual(403, route.call().status_code) | Test that an unauthorized user cannot post. | components/server/tests/external/routes/plugins/test_route_auth_plugin.py | test_unauthorized_session | ICTU/quality-time | 33 | python | def test_unauthorized_session(self):
self.database.reports_overviews.find_one.return_value = dict(_id='id', permissions={EDIT_REPORT_PERMISSION: ['jodoe']})
self.database.sessions.find_one.return_value = self.session
route = bottle.Route(bottle.app(), '/', 'POST', self.route, permissions_required=[EDIT_REPORT_PERMISSION])
self.assertEqual(403, route.call().status_code) | def test_unauthorized_session(self):
self.database.reports_overviews.find_one.return_value = dict(_id='id', permissions={EDIT_REPORT_PERMISSION: ['jodoe']})
self.database.sessions.find_one.return_value = self.session
route = bottle.Route(bottle.app(), '/', 'POST', self.route, permissions_required=[EDIT_REPORT_PERMISSION])
self.assertEqual(403, route.call().status_code)<|docstring|>Test that an unauthorized user cannot post.<|endoftext|> |
7e95b955092c0ca39d036783b2855f813f3a975e6ce257e8e4b65aa304ae0b04 | def test_post_route_with_permissions_required_when_everyone_has_permission(self):
'Test that an authenticated user can post if permissions have not been restricted.'
self.database.reports_overviews.find_one.return_value = dict(_id='id', permissions={})
self.database.sessions.find_one.return_value = self.session
route = bottle.Route(bottle.app(), '/', 'POST', self.route, permissions_required=[EDIT_REPORT_PERMISSION])
self.assertEqual(self.success, route.call()) | Test that an authenticated user can post if permissions have not been restricted. | components/server/tests/external/routes/plugins/test_route_auth_plugin.py | test_post_route_with_permissions_required_when_everyone_has_permission | ICTU/quality-time | 33 | python | def test_post_route_with_permissions_required_when_everyone_has_permission(self):
self.database.reports_overviews.find_one.return_value = dict(_id='id', permissions={})
self.database.sessions.find_one.return_value = self.session
route = bottle.Route(bottle.app(), '/', 'POST', self.route, permissions_required=[EDIT_REPORT_PERMISSION])
self.assertEqual(self.success, route.call()) | def test_post_route_with_permissions_required_when_everyone_has_permission(self):
self.database.reports_overviews.find_one.return_value = dict(_id='id', permissions={})
self.database.sessions.find_one.return_value = self.session
route = bottle.Route(bottle.app(), '/', 'POST', self.route, permissions_required=[EDIT_REPORT_PERMISSION])
self.assertEqual(self.success, route.call())<|docstring|>Test that an authenticated user can post if permissions have not been restricted.<|endoftext|> |
839dc66c268264df9ac9462c357f4346e6b9236c1320e664861de30df251daf7 | def test_post_route_with_permissions_required(self):
'Test that an authenticated user can post if they have the required permissions.'
self.database.reports_overviews.find_one.return_value = dict(_id='id', permissions={EDIT_REPORT_PERMISSION: ['jadoe']})
self.database.sessions.find_one.return_value = self.session
route = bottle.Route(bottle.app(), '/', 'POST', self.route, permissions_required=[EDIT_REPORT_PERMISSION])
self.assertEqual(self.success, route.call()) | Test that an authenticated user can post if they have the required permissions. | components/server/tests/external/routes/plugins/test_route_auth_plugin.py | test_post_route_with_permissions_required | ICTU/quality-time | 33 | python | def test_post_route_with_permissions_required(self):
self.database.reports_overviews.find_one.return_value = dict(_id='id', permissions={EDIT_REPORT_PERMISSION: ['jadoe']})
self.database.sessions.find_one.return_value = self.session
route = bottle.Route(bottle.app(), '/', 'POST', self.route, permissions_required=[EDIT_REPORT_PERMISSION])
self.assertEqual(self.success, route.call()) | def test_post_route_with_permissions_required(self):
self.database.reports_overviews.find_one.return_value = dict(_id='id', permissions={EDIT_REPORT_PERMISSION: ['jadoe']})
self.database.sessions.find_one.return_value = self.session
route = bottle.Route(bottle.app(), '/', 'POST', self.route, permissions_required=[EDIT_REPORT_PERMISSION])
self.assertEqual(self.success, route.call())<|docstring|>Test that an authenticated user can post if they have the required permissions.<|endoftext|> |
5615f6b3011316b21be3a63f3369faec6e59e9073094543af60e9ac12f9d85e1 | def test_post_route_without_authentication_required(self):
'Test that unauthenticated users can POST if no authentication is required.'
route = bottle.Route(bottle.app(), '/', 'POST', self.route, authentication_required=False)
self.assertEqual(self.success, route.call()) | Test that unauthenticated users can POST if no authentication is required. | components/server/tests/external/routes/plugins/test_route_auth_plugin.py | test_post_route_without_authentication_required | ICTU/quality-time | 33 | python | def test_post_route_without_authentication_required(self):
route = bottle.Route(bottle.app(), '/', 'POST', self.route, authentication_required=False)
self.assertEqual(self.success, route.call()) | def test_post_route_without_authentication_required(self):
route = bottle.Route(bottle.app(), '/', 'POST', self.route, authentication_required=False)
self.assertEqual(self.success, route.call())<|docstring|>Test that unauthenticated users can POST if no authentication is required.<|endoftext|> |
ac85220ccd21f7f4e16ab1f8ca1e7372e1d507dfb6c328a4fc596e94cb51edce | def test_get_route_without_authentication_required(self):
'Test that unauthenticated users can GET if no authentication is required.'
route = bottle.Route(bottle.app(), '/', 'GET', self.route, authentication_required=False)
self.assertEqual(self.success, route.call()) | Test that unauthenticated users can GET if no authentication is required. | components/server/tests/external/routes/plugins/test_route_auth_plugin.py | test_get_route_without_authentication_required | ICTU/quality-time | 33 | python | def test_get_route_without_authentication_required(self):
route = bottle.Route(bottle.app(), '/', 'GET', self.route, authentication_required=False)
self.assertEqual(self.success, route.call()) | def test_get_route_without_authentication_required(self):
route = bottle.Route(bottle.app(), '/', 'GET', self.route, authentication_required=False)
self.assertEqual(self.success, route.call())<|docstring|>Test that unauthenticated users can GET if no authentication is required.<|endoftext|> |
0685be51dd87e0f354a827daff4ad557fb8e238740d7ff81b9b9691a2b3d47b3 | def identity(domain, range_, dual_to_range, parameters=None, device_interface=None, precision=None):
'Assemble the L^2 identity operator.'
return _common.create_operator('l2_identity', domain, range_, dual_to_range, parameters, 'sparse', [], 'l2_identity', 'default_sparse', device_interface, precision, False) | Assemble the L^2 identity operator. | bempp/api/operators/boundary/sparse.py | identity | tbetcke/bempp-cl | 0 | python | def identity(domain, range_, dual_to_range, parameters=None, device_interface=None, precision=None):
return _common.create_operator('l2_identity', domain, range_, dual_to_range, parameters, 'sparse', [], 'l2_identity', 'default_sparse', device_interface, precision, False) | def identity(domain, range_, dual_to_range, parameters=None, device_interface=None, precision=None):
return _common.create_operator('l2_identity', domain, range_, dual_to_range, parameters, 'sparse', [], 'l2_identity', 'default_sparse', device_interface, precision, False)<|docstring|>Assemble the L^2 identity operator.<|endoftext|> |
98fea80ea186297fcd8d7dbb24ad7dafec09de6a05bb9d906d5008d08f37c69e | def multitrace_identity(multitrace_operator, parameters=None, device_interface=None, precision=None):
'\n Create a multitrace identity operator.\n\n Parameters\n ----------\n multitrace_operator : Bempp Operator\n A 2 x 2 multitrace operator object whose spaces are used to define\n the identity operator.\n\n Output\n ------\n A block-diagonal multitrace identity operator.\n\n '
from bempp.api.assembly.blocked_operator import BlockedOperator
(domain0, domain1) = multitrace_operator.domain_spaces
(dual_to_range0, dual_to_range1) = multitrace_operator.dual_to_range_spaces
(range0, range1) = multitrace_operator.range_spaces
blocked_operator = BlockedOperator(2, 2)
blocked_operator[(0, 0)] = identity(domain0, range0, dual_to_range0)
blocked_operator[(1, 1)] = identity(domain1, range1, dual_to_range1)
return blocked_operator | Create a multitrace identity operator.
Parameters
----------
multitrace_operator : Bempp Operator
A 2 x 2 multitrace operator object whose spaces are used to define
the identity operator.
Output
------
A block-diagonal multitrace identity operator. | bempp/api/operators/boundary/sparse.py | multitrace_identity | tbetcke/bempp-cl | 0 | python | def multitrace_identity(multitrace_operator, parameters=None, device_interface=None, precision=None):
'\n Create a multitrace identity operator.\n\n Parameters\n ----------\n multitrace_operator : Bempp Operator\n A 2 x 2 multitrace operator object whose spaces are used to define\n the identity operator.\n\n Output\n ------\n A block-diagonal multitrace identity operator.\n\n '
from bempp.api.assembly.blocked_operator import BlockedOperator
(domain0, domain1) = multitrace_operator.domain_spaces
(dual_to_range0, dual_to_range1) = multitrace_operator.dual_to_range_spaces
(range0, range1) = multitrace_operator.range_spaces
blocked_operator = BlockedOperator(2, 2)
blocked_operator[(0, 0)] = identity(domain0, range0, dual_to_range0)
blocked_operator[(1, 1)] = identity(domain1, range1, dual_to_range1)
return blocked_operator | def multitrace_identity(multitrace_operator, parameters=None, device_interface=None, precision=None):
'\n Create a multitrace identity operator.\n\n Parameters\n ----------\n multitrace_operator : Bempp Operator\n A 2 x 2 multitrace operator object whose spaces are used to define\n the identity operator.\n\n Output\n ------\n A block-diagonal multitrace identity operator.\n\n '
from bempp.api.assembly.blocked_operator import BlockedOperator
(domain0, domain1) = multitrace_operator.domain_spaces
(dual_to_range0, dual_to_range1) = multitrace_operator.dual_to_range_spaces
(range0, range1) = multitrace_operator.range_spaces
blocked_operator = BlockedOperator(2, 2)
blocked_operator[(0, 0)] = identity(domain0, range0, dual_to_range0)
blocked_operator[(1, 1)] = identity(domain1, range1, dual_to_range1)
return blocked_operator<|docstring|>Create a multitrace identity operator.
Parameters
----------
multitrace_operator : Bempp Operator
A 2 x 2 multitrace operator object whose spaces are used to define
the identity operator.
Output
------
A block-diagonal multitrace identity operator.<|endoftext|> |
d0b7cbe0b81043a31709eeb3de216cb9c348da799b4216ff3fe052d23b48a347 | def sigma_identity(domain, range_, dual_to_range, parameters=None, device_interface=None, precision=None):
'\n Evaluate the sigma identity operator.\n\n For Galerkin methods this operator is equivalent to .5 * identity. For\n collocation methods the value may differ from .5 on piecewise smooth\n domains.\n '
from bempp.api.utils.helpers import assign_parameters
parameters = assign_parameters(parameters)
if (parameters.assembly.discretization_type == 'galerkin'):
return (0.5 * identity(domain, range_, dual_to_range, parameters=parameters, device_interface=device_interface, precision=precision))
elif (parameters.assembly.discretization_type == 'collocation'):
raise ValueError('Not yet implemented.') | Evaluate the sigma identity operator.
For Galerkin methods this operator is equivalent to .5 * identity. For
collocation methods the value may differ from .5 on piecewise smooth
domains. | bempp/api/operators/boundary/sparse.py | sigma_identity | tbetcke/bempp-cl | 0 | python | def sigma_identity(domain, range_, dual_to_range, parameters=None, device_interface=None, precision=None):
'\n Evaluate the sigma identity operator.\n\n For Galerkin methods this operator is equivalent to .5 * identity. For\n collocation methods the value may differ from .5 on piecewise smooth\n domains.\n '
from bempp.api.utils.helpers import assign_parameters
parameters = assign_parameters(parameters)
if (parameters.assembly.discretization_type == 'galerkin'):
return (0.5 * identity(domain, range_, dual_to_range, parameters=parameters, device_interface=device_interface, precision=precision))
elif (parameters.assembly.discretization_type == 'collocation'):
raise ValueError('Not yet implemented.') | def sigma_identity(domain, range_, dual_to_range, parameters=None, device_interface=None, precision=None):
'\n Evaluate the sigma identity operator.\n\n For Galerkin methods this operator is equivalent to .5 * identity. For\n collocation methods the value may differ from .5 on piecewise smooth\n domains.\n '
from bempp.api.utils.helpers import assign_parameters
parameters = assign_parameters(parameters)
if (parameters.assembly.discretization_type == 'galerkin'):
return (0.5 * identity(domain, range_, dual_to_range, parameters=parameters, device_interface=device_interface, precision=precision))
elif (parameters.assembly.discretization_type == 'collocation'):
raise ValueError('Not yet implemented.')<|docstring|>Evaluate the sigma identity operator.
For Galerkin methods this operator is equivalent to .5 * identity. For
collocation methods the value may differ from .5 on piecewise smooth
domains.<|endoftext|> |
54c8aee06a0c4fa2709bb56c84b3e5397e3fd11ff25c35a33fa4da54114a56a8 | def laplace_beltrami(domain, range_, dual_to_range, parameters=None, device_interface=None, precision=None):
'Assemble the negative Laplace-Beltrami operator.'
if (domain.shapeset.identifier != 'p1_discontinuous'):
raise ValueError("Domain shapeset must be of type 'p1_discontinuous'.")
if (dual_to_range.shapeset.identifier != 'p1_discontinuous'):
raise ValueError("Dual to range shapeset must be of type 'p1_discontinuous'.")
return _common.create_operator('laplace_beltrami', domain, range_, dual_to_range, parameters, 'sparse', [], 'laplace_beltrami', 'default_sparse', device_interface, precision, False) | Assemble the negative Laplace-Beltrami operator. | bempp/api/operators/boundary/sparse.py | laplace_beltrami | tbetcke/bempp-cl | 0 | python | def laplace_beltrami(domain, range_, dual_to_range, parameters=None, device_interface=None, precision=None):
if (domain.shapeset.identifier != 'p1_discontinuous'):
raise ValueError("Domain shapeset must be of type 'p1_discontinuous'.")
if (dual_to_range.shapeset.identifier != 'p1_discontinuous'):
raise ValueError("Dual to range shapeset must be of type 'p1_discontinuous'.")
return _common.create_operator('laplace_beltrami', domain, range_, dual_to_range, parameters, 'sparse', [], 'laplace_beltrami', 'default_sparse', device_interface, precision, False) | def laplace_beltrami(domain, range_, dual_to_range, parameters=None, device_interface=None, precision=None):
if (domain.shapeset.identifier != 'p1_discontinuous'):
raise ValueError("Domain shapeset must be of type 'p1_discontinuous'.")
if (dual_to_range.shapeset.identifier != 'p1_discontinuous'):
raise ValueError("Dual to range shapeset must be of type 'p1_discontinuous'.")
return _common.create_operator('laplace_beltrami', domain, range_, dual_to_range, parameters, 'sparse', [], 'laplace_beltrami', 'default_sparse', device_interface, precision, False)<|docstring|>Assemble the negative Laplace-Beltrami operator.<|endoftext|> |
b819164e64384f2d529c55e7bf0f7d4a0df3e78c585a2e93b51d199d5a310220 | def sample(self):
'Samples an unsampled instance, by estimating the classification features,\n and determining the instance with the highest utility.'
if (len(self.dataset.complete.index) < self.initial_batch_size):
return self.initial_sample()
self.update()
best = ((- (10 ** 9)), None)
for instance in self.dataset.incomplete.index:
predicted_cf = self.imp_model.impute([pd.concat([self.dataset.incomplete.loc[instance][self.dataset.get_sf_names()], pd.Series(self.dataset.incomplete.loc[instance][self.dataset.get_y_name()], index=[self.dataset.get_y_name()])])])[0]
utility = self.estimate_utility(instance, pd.Series(predicted_cf, index=self.dataset.get_cf_names()))
if (utility > best[0]):
best = (utility, instance)
return best[1] | Samples an unsampled instance, by estimating the classification features,
and determining the instance with the highest utility. | src/approaches/acfa/acfa.py | sample | thomastkok/active-selection-of-classification-features | 0 | python | def sample(self):
'Samples an unsampled instance, by estimating the classification features,\n and determining the instance with the highest utility.'
if (len(self.dataset.complete.index) < self.initial_batch_size):
return self.initial_sample()
self.update()
best = ((- (10 ** 9)), None)
for instance in self.dataset.incomplete.index:
predicted_cf = self.imp_model.impute([pd.concat([self.dataset.incomplete.loc[instance][self.dataset.get_sf_names()], pd.Series(self.dataset.incomplete.loc[instance][self.dataset.get_y_name()], index=[self.dataset.get_y_name()])])])[0]
utility = self.estimate_utility(instance, pd.Series(predicted_cf, index=self.dataset.get_cf_names()))
if (utility > best[0]):
best = (utility, instance)
return best[1] | def sample(self):
'Samples an unsampled instance, by estimating the classification features,\n and determining the instance with the highest utility.'
if (len(self.dataset.complete.index) < self.initial_batch_size):
return self.initial_sample()
self.update()
best = ((- (10 ** 9)), None)
for instance in self.dataset.incomplete.index:
predicted_cf = self.imp_model.impute([pd.concat([self.dataset.incomplete.loc[instance][self.dataset.get_sf_names()], pd.Series(self.dataset.incomplete.loc[instance][self.dataset.get_y_name()], index=[self.dataset.get_y_name()])])])[0]
utility = self.estimate_utility(instance, pd.Series(predicted_cf, index=self.dataset.get_cf_names()))
if (utility > best[0]):
best = (utility, instance)
return best[1]<|docstring|>Samples an unsampled instance, by estimating the classification features,
and determining the instance with the highest utility.<|endoftext|> |
c77322d9b563abb02ecf2679b66b5c7c41b92126aa4e8c594f1e1d12c0a98506 | def update(self):
'Retrains the imputation model and the classifier.'
self.imp_model.train(pd.concat([self.dataset.complete[self.dataset.get_sf_names()], self.dataset.complete[self.dataset.get_y_name()]], axis=1), self.dataset.complete[self.dataset.get_cf_names()])
self.clf.fit(self.dataset.complete[self.dataset.get_cf_names()], self.dataset.complete[self.dataset.get_y_name()]) | Retrains the imputation model and the classifier. | src/approaches/acfa/acfa.py | update | thomastkok/active-selection-of-classification-features | 0 | python | def update(self):
self.imp_model.train(pd.concat([self.dataset.complete[self.dataset.get_sf_names()], self.dataset.complete[self.dataset.get_y_name()]], axis=1), self.dataset.complete[self.dataset.get_cf_names()])
self.clf.fit(self.dataset.complete[self.dataset.get_cf_names()], self.dataset.complete[self.dataset.get_y_name()]) | def update(self):
self.imp_model.train(pd.concat([self.dataset.complete[self.dataset.get_sf_names()], self.dataset.complete[self.dataset.get_y_name()]], axis=1), self.dataset.complete[self.dataset.get_cf_names()])
self.clf.fit(self.dataset.complete[self.dataset.get_cf_names()], self.dataset.complete[self.dataset.get_y_name()])<|docstring|>Retrains the imputation model and the classifier.<|endoftext|> |
9ce51f234eb133f5127a0f0f27c9f0d70752502baa4979edfb3058f23a758930 | def fix_missing_velo(self):
'Fix issue with pykitti not handling missing velodyne data (files)\n '
velo_frames = [int(Path(fpath).stem) for fpath in self.data_kitti.velo_files]
missing_frames = sorted((set(range(velo_frames[0], velo_frames[(- 1)])) - set(velo_frames)))
velo_files = self.data_kitti.velo_files.copy()
for missing_frame in missing_frames:
velo_files.insert(missing_frame, 'BOGUS_FILE_WILL_THROW_EXCEPTION')
self.data_kitti.velo_files = velo_files | Fix issue with pykitti not handling missing velodyne data (files) | kittiground/kittiground/__init__.py | fix_missing_velo | JeremyBYU/polylidar-kitti | 0 | python | def fix_missing_velo(self):
'\n '
velo_frames = [int(Path(fpath).stem) for fpath in self.data_kitti.velo_files]
missing_frames = sorted((set(range(velo_frames[0], velo_frames[(- 1)])) - set(velo_frames)))
velo_files = self.data_kitti.velo_files.copy()
for missing_frame in missing_frames:
velo_files.insert(missing_frame, 'BOGUS_FILE_WILL_THROW_EXCEPTION')
self.data_kitti.velo_files = velo_files | def fix_missing_velo(self):
'\n '
velo_frames = [int(Path(fpath).stem) for fpath in self.data_kitti.velo_files]
missing_frames = sorted((set(range(velo_frames[0], velo_frames[(- 1)])) - set(velo_frames)))
velo_files = self.data_kitti.velo_files.copy()
for missing_frame in missing_frames:
velo_files.insert(missing_frame, 'BOGUS_FILE_WILL_THROW_EXCEPTION')
self.data_kitti.velo_files = velo_files<|docstring|>Fix issue with pykitti not handling missing velodyne data (files)<|endoftext|> |
967531c22ad387ed3e0aa32bf3d21a2bfc6c569e80051c293557cf1ffffc9e3b | def load_projections(self, camN: str):
'Loads projections for the camera of interest\n\n Arguments:\n camN {str} -- Camera cam0, cam1, cam2, cam3\n '
self.R00 = self.data_kitti.calib.R_rect_00
if (camN == 'cam0'):
T_cam_velo = self.data_kitti.calib.T_cam0_velo
T_cam_imu = self.data_kitti.calib.T_cam0_imu
P_rect = self.data_kitti.calib.P_rect_00
get_cam_fn = getattr(self.data_kitti, 'get_cam0')
elif (camN == 'cam1'):
T_cam_velo = self.data_kitti.calib.T_cam1_velo
T_cam_imu = self.data_kitti.calib.T_cam2_imu
P_rect = self.data_kitti.calib.P_rect_10
get_cam_fn = getattr(self.data_kitti, 'get_cam1')
elif (camN == 'cam2'):
T_cam_velo = self.data_kitti.calib.T_cam2_velo
T_cam_imu = self.data_kitti.calib.T_cam2_imu
P_rect = self.data_kitti.calib.P_rect_20
get_cam_fn = getattr(self.data_kitti, 'get_cam2')
elif (camN == 'cam3'):
T_cam_velo = self.data_kitti.calib.T_cam3_velo
T_cam_imu = self.data_kitti.calib.T_cam3_imu
P_rect = self.data_kitti.calib.P_rect_30
get_cam_fn = getattr(self.data_kitti, 'get_cam3')
self.T_cam_velo = T_cam_velo
self.P_rect = P_rect
self.get_cam_fn = get_cam_fn
self.T_cam_imu = T_cam_imu
self.T_velo_imu = self.data_kitti.calib.T_velo_imu | Loads projections for the camera of interest
Arguments:
camN {str} -- Camera cam0, cam1, cam2, cam3 | kittiground/kittiground/__init__.py | load_projections | JeremyBYU/polylidar-kitti | 0 | python | def load_projections(self, camN: str):
'Loads projections for the camera of interest\n\n Arguments:\n camN {str} -- Camera cam0, cam1, cam2, cam3\n '
self.R00 = self.data_kitti.calib.R_rect_00
if (camN == 'cam0'):
T_cam_velo = self.data_kitti.calib.T_cam0_velo
T_cam_imu = self.data_kitti.calib.T_cam0_imu
P_rect = self.data_kitti.calib.P_rect_00
get_cam_fn = getattr(self.data_kitti, 'get_cam0')
elif (camN == 'cam1'):
T_cam_velo = self.data_kitti.calib.T_cam1_velo
T_cam_imu = self.data_kitti.calib.T_cam2_imu
P_rect = self.data_kitti.calib.P_rect_10
get_cam_fn = getattr(self.data_kitti, 'get_cam1')
elif (camN == 'cam2'):
T_cam_velo = self.data_kitti.calib.T_cam2_velo
T_cam_imu = self.data_kitti.calib.T_cam2_imu
P_rect = self.data_kitti.calib.P_rect_20
get_cam_fn = getattr(self.data_kitti, 'get_cam2')
elif (camN == 'cam3'):
T_cam_velo = self.data_kitti.calib.T_cam3_velo
T_cam_imu = self.data_kitti.calib.T_cam3_imu
P_rect = self.data_kitti.calib.P_rect_30
get_cam_fn = getattr(self.data_kitti, 'get_cam3')
self.T_cam_velo = T_cam_velo
self.P_rect = P_rect
self.get_cam_fn = get_cam_fn
self.T_cam_imu = T_cam_imu
self.T_velo_imu = self.data_kitti.calib.T_velo_imu | def load_projections(self, camN: str):
'Loads projections for the camera of interest\n\n Arguments:\n camN {str} -- Camera cam0, cam1, cam2, cam3\n '
self.R00 = self.data_kitti.calib.R_rect_00
if (camN == 'cam0'):
T_cam_velo = self.data_kitti.calib.T_cam0_velo
T_cam_imu = self.data_kitti.calib.T_cam0_imu
P_rect = self.data_kitti.calib.P_rect_00
get_cam_fn = getattr(self.data_kitti, 'get_cam0')
elif (camN == 'cam1'):
T_cam_velo = self.data_kitti.calib.T_cam1_velo
T_cam_imu = self.data_kitti.calib.T_cam2_imu
P_rect = self.data_kitti.calib.P_rect_10
get_cam_fn = getattr(self.data_kitti, 'get_cam1')
elif (camN == 'cam2'):
T_cam_velo = self.data_kitti.calib.T_cam2_velo
T_cam_imu = self.data_kitti.calib.T_cam2_imu
P_rect = self.data_kitti.calib.P_rect_20
get_cam_fn = getattr(self.data_kitti, 'get_cam2')
elif (camN == 'cam3'):
T_cam_velo = self.data_kitti.calib.T_cam3_velo
T_cam_imu = self.data_kitti.calib.T_cam3_imu
P_rect = self.data_kitti.calib.P_rect_30
get_cam_fn = getattr(self.data_kitti, 'get_cam3')
self.T_cam_velo = T_cam_velo
self.P_rect = P_rect
self.get_cam_fn = get_cam_fn
self.T_cam_imu = T_cam_imu
self.T_velo_imu = self.data_kitti.calib.T_velo_imu<|docstring|>Loads projections for the camera of interest
Arguments:
camN {str} -- Camera cam0, cam1, cam2, cam3<|endoftext|> |
3571adc9946956b86dbac4e93cca2c3034e250883f27346614671529cc34ff42 | def get_velo(self, frame_idx):
'Gets veldoyne data of the frame index of interest\n\n Arguments:\n frame_idx {int} -- Frame index\n\n Returns:\n (ndarray, ndarray) -- Point cloud in the camN frame, corresponding intensity\n '
pts3D_velo_unrectified = self.data_kitti.get_velo(frame_idx)
pts3D_velo_unrectified = self.downsamle_pc(pts3D_velo_unrectified, self.pointcloud['downsample'])
intensity = np.copy(pts3D_velo_unrectified[(:, 3)])
pts3D_velo_unrectified[(:, 3)] = 1
pts3D_velo_unrectified = pts3D_velo_unrectified.transpose()
pts3D_cam_rect = (self.T_cam_velo @ pts3D_velo_unrectified)
idx = (pts3D_cam_rect[(2, :)] > 0)
pts3D_cam_rect_filt = np.ascontiguousarray(pts3D_cam_rect[(:, idx)])
intensity_filt = np.ascontiguousarray(intensity[idx])
return (pts3D_cam_rect_filt, intensity_filt) | Gets veldoyne data of the frame index of interest
Arguments:
frame_idx {int} -- Frame index
Returns:
(ndarray, ndarray) -- Point cloud in the camN frame, corresponding intensity | kittiground/kittiground/__init__.py | get_velo | JeremyBYU/polylidar-kitti | 0 | python | def get_velo(self, frame_idx):
'Gets veldoyne data of the frame index of interest\n\n Arguments:\n frame_idx {int} -- Frame index\n\n Returns:\n (ndarray, ndarray) -- Point cloud in the camN frame, corresponding intensity\n '
pts3D_velo_unrectified = self.data_kitti.get_velo(frame_idx)
pts3D_velo_unrectified = self.downsamle_pc(pts3D_velo_unrectified, self.pointcloud['downsample'])
intensity = np.copy(pts3D_velo_unrectified[(:, 3)])
pts3D_velo_unrectified[(:, 3)] = 1
pts3D_velo_unrectified = pts3D_velo_unrectified.transpose()
pts3D_cam_rect = (self.T_cam_velo @ pts3D_velo_unrectified)
idx = (pts3D_cam_rect[(2, :)] > 0)
pts3D_cam_rect_filt = np.ascontiguousarray(pts3D_cam_rect[(:, idx)])
intensity_filt = np.ascontiguousarray(intensity[idx])
return (pts3D_cam_rect_filt, intensity_filt) | def get_velo(self, frame_idx):
'Gets veldoyne data of the frame index of interest\n\n Arguments:\n frame_idx {int} -- Frame index\n\n Returns:\n (ndarray, ndarray) -- Point cloud in the camN frame, corresponding intensity\n '
pts3D_velo_unrectified = self.data_kitti.get_velo(frame_idx)
pts3D_velo_unrectified = self.downsamle_pc(pts3D_velo_unrectified, self.pointcloud['downsample'])
intensity = np.copy(pts3D_velo_unrectified[(:, 3)])
pts3D_velo_unrectified[(:, 3)] = 1
pts3D_velo_unrectified = pts3D_velo_unrectified.transpose()
pts3D_cam_rect = (self.T_cam_velo @ pts3D_velo_unrectified)
idx = (pts3D_cam_rect[(2, :)] > 0)
pts3D_cam_rect_filt = np.ascontiguousarray(pts3D_cam_rect[(:, idx)])
intensity_filt = np.ascontiguousarray(intensity[idx])
return (pts3D_cam_rect_filt, intensity_filt)<|docstring|>Gets veldoyne data of the frame index of interest
Arguments:
frame_idx {int} -- Frame index
Returns:
(ndarray, ndarray) -- Point cloud in the camN frame, corresponding intensity<|endoftext|> |
fa6e543d0af9b14c70e12c07f128c93a5fb8fb915de65b9f6ed504a0963b7b32 | def load_frame(self, frame_idx: int):
'Load frame from kitti\n\n Arguments:\n frame_idx {int} -- Frame index\n\n Returns:\n (img, pts2D, pts2D_color, pts3D) -- M X N ndarray image, projected lidar points, color for points, velodyne points\n '
imgN = cv2.cvtColor(np.asarray(self.get_cam_fn(frame_idx)), cv2.COLOR_RGB2BGR)
(pts3D_cam, intensity) = self.get_velo(frame_idx)
oxts = self.data_kitti.oxts[frame_idx]
(pts2D_cam, idx) = project_points(pts3D_cam, self.P_rect, self.img_m, self.img_n)
pts3D_cam = pts3D_cam[(:, idx)].T[(:, :3)]
intensity = intensity[idx]
pose_cam = (self.T_velo_imu @ oxts.T_w_imu)
logging.debug('Roll: %.3f; Pitch: %.3f; Yaw: %.3f', np.degrees(oxts.packet.roll), np.degrees(oxts.packet.pitch), np.degrees(oxts.packet.yaw))
if (self.pointcloud['color'] == 'intensity'):
color = intensity
data_min = 0.0
data_max = 1.0
elif (self.pointcloud['color'] == 'distance'):
distance = np.sqrt((((pts3D_cam[(:, 0)] ** 2) + (pts3D_cam[(:, 1)] ** 2)) + (pts3D_cam[(:, 2)] ** 2)))
color = distance
data_min = 0
data_max = 57
else:
z_height = (- pts3D_cam[(:, 1)])
color = z_height
data_min = (- 2.3)
data_max = 2
color = self.get_colors(color, vmin=data_min, vmax=data_max)
if self.pointcloud['outlier_removal']:
t0 = time.time()
mask = outlier_removal(pts3D_cam)
pts3D_cam = pts3D_cam[((~ mask), :)]
pts2D_cam = pts2D_cam[(:, (~ mask))]
color = color[((~ mask), :)]
t1 = time.time()
t_outlierpc = ((t1 - t0) * 1000)
else:
mask = np.zeros(color.shape, dtype=np.bool)
return (imgN, pts2D_cam, color, pts3D_cam, mask, t_outlierpc) | Load frame from kitti
Arguments:
frame_idx {int} -- Frame index
Returns:
(img, pts2D, pts2D_color, pts3D) -- M X N ndarray image, projected lidar points, color for points, velodyne points | kittiground/kittiground/__init__.py | load_frame | JeremyBYU/polylidar-kitti | 0 | python | def load_frame(self, frame_idx: int):
'Load frame from kitti\n\n Arguments:\n frame_idx {int} -- Frame index\n\n Returns:\n (img, pts2D, pts2D_color, pts3D) -- M X N ndarray image, projected lidar points, color for points, velodyne points\n '
imgN = cv2.cvtColor(np.asarray(self.get_cam_fn(frame_idx)), cv2.COLOR_RGB2BGR)
(pts3D_cam, intensity) = self.get_velo(frame_idx)
oxts = self.data_kitti.oxts[frame_idx]
(pts2D_cam, idx) = project_points(pts3D_cam, self.P_rect, self.img_m, self.img_n)
pts3D_cam = pts3D_cam[(:, idx)].T[(:, :3)]
intensity = intensity[idx]
pose_cam = (self.T_velo_imu @ oxts.T_w_imu)
logging.debug('Roll: %.3f; Pitch: %.3f; Yaw: %.3f', np.degrees(oxts.packet.roll), np.degrees(oxts.packet.pitch), np.degrees(oxts.packet.yaw))
if (self.pointcloud['color'] == 'intensity'):
color = intensity
data_min = 0.0
data_max = 1.0
elif (self.pointcloud['color'] == 'distance'):
distance = np.sqrt((((pts3D_cam[(:, 0)] ** 2) + (pts3D_cam[(:, 1)] ** 2)) + (pts3D_cam[(:, 2)] ** 2)))
color = distance
data_min = 0
data_max = 57
else:
z_height = (- pts3D_cam[(:, 1)])
color = z_height
data_min = (- 2.3)
data_max = 2
color = self.get_colors(color, vmin=data_min, vmax=data_max)
if self.pointcloud['outlier_removal']:
t0 = time.time()
mask = outlier_removal(pts3D_cam)
pts3D_cam = pts3D_cam[((~ mask), :)]
pts2D_cam = pts2D_cam[(:, (~ mask))]
color = color[((~ mask), :)]
t1 = time.time()
t_outlierpc = ((t1 - t0) * 1000)
else:
mask = np.zeros(color.shape, dtype=np.bool)
return (imgN, pts2D_cam, color, pts3D_cam, mask, t_outlierpc) | def load_frame(self, frame_idx: int):
'Load frame from kitti\n\n Arguments:\n frame_idx {int} -- Frame index\n\n Returns:\n (img, pts2D, pts2D_color, pts3D) -- M X N ndarray image, projected lidar points, color for points, velodyne points\n '
imgN = cv2.cvtColor(np.asarray(self.get_cam_fn(frame_idx)), cv2.COLOR_RGB2BGR)
(pts3D_cam, intensity) = self.get_velo(frame_idx)
oxts = self.data_kitti.oxts[frame_idx]
(pts2D_cam, idx) = project_points(pts3D_cam, self.P_rect, self.img_m, self.img_n)
pts3D_cam = pts3D_cam[(:, idx)].T[(:, :3)]
intensity = intensity[idx]
pose_cam = (self.T_velo_imu @ oxts.T_w_imu)
logging.debug('Roll: %.3f; Pitch: %.3f; Yaw: %.3f', np.degrees(oxts.packet.roll), np.degrees(oxts.packet.pitch), np.degrees(oxts.packet.yaw))
if (self.pointcloud['color'] == 'intensity'):
color = intensity
data_min = 0.0
data_max = 1.0
elif (self.pointcloud['color'] == 'distance'):
distance = np.sqrt((((pts3D_cam[(:, 0)] ** 2) + (pts3D_cam[(:, 1)] ** 2)) + (pts3D_cam[(:, 2)] ** 2)))
color = distance
data_min = 0
data_max = 57
else:
z_height = (- pts3D_cam[(:, 1)])
color = z_height
data_min = (- 2.3)
data_max = 2
color = self.get_colors(color, vmin=data_min, vmax=data_max)
if self.pointcloud['outlier_removal']:
t0 = time.time()
mask = outlier_removal(pts3D_cam)
pts3D_cam = pts3D_cam[((~ mask), :)]
pts2D_cam = pts2D_cam[(:, (~ mask))]
color = color[((~ mask), :)]
t1 = time.time()
t_outlierpc = ((t1 - t0) * 1000)
else:
mask = np.zeros(color.shape, dtype=np.bool)
return (imgN, pts2D_cam, color, pts3D_cam, mask, t_outlierpc)<|docstring|>Load frame from kitti
Arguments:
frame_idx {int} -- Frame index
Returns:
(img, pts2D, pts2D_color, pts3D) -- M X N ndarray image, projected lidar points, color for points, velodyne points<|endoftext|> |
b85bd09509bb0931034038d4547848ce0efb9d2d50af8c0e65b35b996d246f2b | @staticmethod
def from_json(json_content: Dict, single_tables: Dict[(str, SingleTable)]) -> 'FlatTable':
'\n Build flat table from metadata.\n\n Parameters\n ----------\n json_content : Dict, flat table part in metadata.\n single_tables: Dict, single tables in this flat table.\n\n See Also\n --------\n FlatTableCollection.from_json(json_file) : FlatTableCollection.\n SingleTable.from_json(json_content) : SingleTable.\n\n Notes\n -----\n Generally, this method is called by FlatTableCollection.from_json(json_file).\n We got a flat table by FlatTableCollection.get(flat_table_name).\n '
path = '{}/{}'.format(json_content['output_path'], json_content['output_table'])
return FlatTable(json_content['output_table'], read_data_frame(path), json_content['output_table'], json_content['join_keys'], single_tables) | Build flat table from metadata.
Parameters
----------
json_content : Dict, flat table part in metadata.
single_tables: Dict, single tables in this flat table.
See Also
--------
FlatTableCollection.from_json(json_file) : FlatTableCollection.
SingleTable.from_json(json_content) : SingleTable.
Notes
-----
Generally, this method is called by FlatTableCollection.from_json(json_file).
We got a flat table by FlatTableCollection.get(flat_table_name). | scalpel/flattening/flat_table.py | from_json | X-DataInitiative/SCALPEL-Analysis | 7 | python | @staticmethod
def from_json(json_content: Dict, single_tables: Dict[(str, SingleTable)]) -> 'FlatTable':
'\n Build flat table from metadata.\n\n Parameters\n ----------\n json_content : Dict, flat table part in metadata.\n single_tables: Dict, single tables in this flat table.\n\n See Also\n --------\n FlatTableCollection.from_json(json_file) : FlatTableCollection.\n SingleTable.from_json(json_content) : SingleTable.\n\n Notes\n -----\n Generally, this method is called by FlatTableCollection.from_json(json_file).\n We got a flat table by FlatTableCollection.get(flat_table_name).\n '
path = '{}/{}'.format(json_content['output_path'], json_content['output_table'])
return FlatTable(json_content['output_table'], read_data_frame(path), json_content['output_table'], json_content['join_keys'], single_tables) | @staticmethod
def from_json(json_content: Dict, single_tables: Dict[(str, SingleTable)]) -> 'FlatTable':
'\n Build flat table from metadata.\n\n Parameters\n ----------\n json_content : Dict, flat table part in metadata.\n single_tables: Dict, single tables in this flat table.\n\n See Also\n --------\n FlatTableCollection.from_json(json_file) : FlatTableCollection.\n SingleTable.from_json(json_content) : SingleTable.\n\n Notes\n -----\n Generally, this method is called by FlatTableCollection.from_json(json_file).\n We got a flat table by FlatTableCollection.get(flat_table_name).\n '
path = '{}/{}'.format(json_content['output_path'], json_content['output_table'])
return FlatTable(json_content['output_table'], read_data_frame(path), json_content['output_table'], json_content['join_keys'], single_tables)<|docstring|>Build flat table from metadata.
Parameters
----------
json_content : Dict, flat table part in metadata.
single_tables: Dict, single tables in this flat table.
See Also
--------
FlatTableCollection.from_json(json_file) : FlatTableCollection.
SingleTable.from_json(json_content) : SingleTable.
Notes
-----
Generally, this method is called by FlatTableCollection.from_json(json_file).
We got a flat table by FlatTableCollection.get(flat_table_name).<|endoftext|> |
871fce1c4675aeb61308a19461769b2da9f96aa917aa2965266d4401ce6c0505 | def union(self, other: 'FlatTable') -> 'FlatTable':
'\n Return a new flat table containing union of values in self and another.\n\n Parameters\n ----------\n other: FlatTable, a flat table that will be united.\n '
return _union(self, other) | Return a new flat table containing union of values in self and another.
Parameters
----------
other: FlatTable, a flat table that will be united. | scalpel/flattening/flat_table.py | union | X-DataInitiative/SCALPEL-Analysis | 7 | python | def union(self, other: 'FlatTable') -> 'FlatTable':
'\n Return a new flat table containing union of values in self and another.\n\n Parameters\n ----------\n other: FlatTable, a flat table that will be united.\n '
return _union(self, other) | def union(self, other: 'FlatTable') -> 'FlatTable':
'\n Return a new flat table containing union of values in self and another.\n\n Parameters\n ----------\n other: FlatTable, a flat table that will be united.\n '
return _union(self, other)<|docstring|>Return a new flat table containing union of values in self and another.
Parameters
----------
other: FlatTable, a flat table that will be united.<|endoftext|> |
5ea1ae226907185638a41a07de8c852b9c9b9f40150cf6579d999aedfc82d894 | def intersection(self, other: 'FlatTable') -> 'FlatTable':
'\n Return a new flat table containing rows only in both self and another.\n\n Parameters\n ----------\n other: FlatTable, a FlatTable that will be joined with self.\n '
return _intersection(self, other, self._jk) | Return a new flat table containing rows only in both self and another.
Parameters
----------
other: FlatTable, a FlatTable that will be joined with self. | scalpel/flattening/flat_table.py | intersection | X-DataInitiative/SCALPEL-Analysis | 7 | python | def intersection(self, other: 'FlatTable') -> 'FlatTable':
'\n Return a new flat table containing rows only in both self and another.\n\n Parameters\n ----------\n other: FlatTable, a FlatTable that will be joined with self.\n '
return _intersection(self, other, self._jk) | def intersection(self, other: 'FlatTable') -> 'FlatTable':
'\n Return a new flat table containing rows only in both self and another.\n\n Parameters\n ----------\n other: FlatTable, a FlatTable that will be joined with self.\n '
return _intersection(self, other, self._jk)<|docstring|>Return a new flat table containing rows only in both self and another.
Parameters
----------
other: FlatTable, a FlatTable that will be joined with self.<|endoftext|> |
98d47c709429858e4f32f414ee989a52269351513d97b00dba08642bd4a959eb | def difference(self, other: 'FlatTable') -> 'FlatTable':
'\n Return each value in self that is not contained in another.\n\n Parameters\n ----------\n other: FlatTable, a flat table that will be compared with self.\n '
return _difference(self, other, self._jk) | Return each value in self that is not contained in another.
Parameters
----------
other: FlatTable, a flat table that will be compared with self. | scalpel/flattening/flat_table.py | difference | X-DataInitiative/SCALPEL-Analysis | 7 | python | def difference(self, other: 'FlatTable') -> 'FlatTable':
'\n Return each value in self that is not contained in another.\n\n Parameters\n ----------\n other: FlatTable, a flat table that will be compared with self.\n '
return _difference(self, other, self._jk) | def difference(self, other: 'FlatTable') -> 'FlatTable':
'\n Return each value in self that is not contained in another.\n\n Parameters\n ----------\n other: FlatTable, a flat table that will be compared with self.\n '
return _difference(self, other, self._jk)<|docstring|>Return each value in self that is not contained in another.
Parameters
----------
other: FlatTable, a flat table that will be compared with self.<|endoftext|> |
60f71d4813df25474b9aae6c93d490ec01bad93dc3f144198b181d674d1b227e | @staticmethod
def union_all(flat_tables: Iterable['FlatTable']) -> 'FlatTable':
'\n Return a new flat table containing union of values in an iteration of flat tables.\n\n Parameters\n ----------\n flat_tables: Iterable, an iteration of flat tables that will be united.\n '
return fold_right(_union, flat_tables) | Return a new flat table containing union of values in an iteration of flat tables.
Parameters
----------
flat_tables: Iterable, an iteration of flat tables that will be united. | scalpel/flattening/flat_table.py | union_all | X-DataInitiative/SCALPEL-Analysis | 7 | python | @staticmethod
def union_all(flat_tables: Iterable['FlatTable']) -> 'FlatTable':
'\n Return a new flat table containing union of values in an iteration of flat tables.\n\n Parameters\n ----------\n flat_tables: Iterable, an iteration of flat tables that will be united.\n '
return fold_right(_union, flat_tables) | @staticmethod
def union_all(flat_tables: Iterable['FlatTable']) -> 'FlatTable':
'\n Return a new flat table containing union of values in an iteration of flat tables.\n\n Parameters\n ----------\n flat_tables: Iterable, an iteration of flat tables that will be united.\n '
return fold_right(_union, flat_tables)<|docstring|>Return a new flat table containing union of values in an iteration of flat tables.
Parameters
----------
flat_tables: Iterable, an iteration of flat tables that will be united.<|endoftext|> |
eac19c4be35640fe25a67e392f625de68dbfeb7eeb44730eed199e4b0b0c9032 | @staticmethod
def intersection_all(flat_tables: Iterable['FlatTable'], join_keys: List[str]) -> 'FlatTable':
'\n Return a new flat table containing rows only in each from an iteration.\n\n Parameters\n ----------\n flat_tables: Iterable, an iteration of flat tables that will be joined with self.\n join_keys: List, join keys used to join each in the iteration.\n '
return fold_right(partial(_intersection, join_keys=join_keys), flat_tables) | Return a new flat table containing rows only in each from an iteration.
Parameters
----------
flat_tables: Iterable, an iteration of flat tables that will be joined with self.
join_keys: List, join keys used to join each in the iteration. | scalpel/flattening/flat_table.py | intersection_all | X-DataInitiative/SCALPEL-Analysis | 7 | python | @staticmethod
def intersection_all(flat_tables: Iterable['FlatTable'], join_keys: List[str]) -> 'FlatTable':
'\n Return a new flat table containing rows only in each from an iteration.\n\n Parameters\n ----------\n flat_tables: Iterable, an iteration of flat tables that will be joined with self.\n join_keys: List, join keys used to join each in the iteration.\n '
return fold_right(partial(_intersection, join_keys=join_keys), flat_tables) | @staticmethod
def intersection_all(flat_tables: Iterable['FlatTable'], join_keys: List[str]) -> 'FlatTable':
'\n Return a new flat table containing rows only in each from an iteration.\n\n Parameters\n ----------\n flat_tables: Iterable, an iteration of flat tables that will be joined with self.\n join_keys: List, join keys used to join each in the iteration.\n '
return fold_right(partial(_intersection, join_keys=join_keys), flat_tables)<|docstring|>Return a new flat table containing rows only in each from an iteration.
Parameters
----------
flat_tables: Iterable, an iteration of flat tables that will be joined with self.
join_keys: List, join keys used to join each in the iteration.<|endoftext|> |
c3611a6d83b8c114998ddd3e660a8457c0fc18c8c9624e53a817b266411e4f8f | @staticmethod
def difference_all(flat_tables: Iterable['FlatTable'], join_keys: List[str]) -> 'FlatTable':
'\n Return each values in the first that is not contained in others.\n\n Parameters\n ----------\n flat_tables: Iterable\n An iteration of flat tables that will be compared with self.\n join_keys: List\n Join keys used to join each in the iteration.\n '
return fold_right(partial(_difference, join_keys=join_keys), flat_tables) | Return each values in the first that is not contained in others.
Parameters
----------
flat_tables: Iterable
An iteration of flat tables that will be compared with self.
join_keys: List
Join keys used to join each in the iteration. | scalpel/flattening/flat_table.py | difference_all | X-DataInitiative/SCALPEL-Analysis | 7 | python | @staticmethod
def difference_all(flat_tables: Iterable['FlatTable'], join_keys: List[str]) -> 'FlatTable':
'\n Return each values in the first that is not contained in others.\n\n Parameters\n ----------\n flat_tables: Iterable\n An iteration of flat tables that will be compared with self.\n join_keys: List\n Join keys used to join each in the iteration.\n '
return fold_right(partial(_difference, join_keys=join_keys), flat_tables) | @staticmethod
def difference_all(flat_tables: Iterable['FlatTable'], join_keys: List[str]) -> 'FlatTable':
'\n Return each values in the first that is not contained in others.\n\n Parameters\n ----------\n flat_tables: Iterable\n An iteration of flat tables that will be compared with self.\n join_keys: List\n Join keys used to join each in the iteration.\n '
return fold_right(partial(_difference, join_keys=join_keys), flat_tables)<|docstring|>Return each values in the first that is not contained in others.
Parameters
----------
flat_tables: Iterable
An iteration of flat tables that will be compared with self.
join_keys: List
Join keys used to join each in the iteration.<|endoftext|> |
9791e23d39fec5c8b1455137752d977f0da2f10080cf2c6ca2f5a78ee2c391c1 | @classmethod
def build(cls, name: str, characteristics: str, data: Dict[(str, DataFrame)]) -> 'HistoryTable':
'\n return a history table containing union of stats in a dict\n\n Parameters\n ----------\n name: str\n Flat table name.\n source: str\n Data collection of this flat table.\n characteristics: str\n The flat table name that will show in the pyplot figure.\n data: Dict Dict[str, DataFrame]\n a dict of dataframes which will be united\n '
def _build(path_name: str, df: DataFrame) -> HistoryTable:
if ('history' not in df.columns):
source = df.withColumn('history', lit(path_name))
else:
source = df
return cls(path_name, source, path_name)
flat_table = reduce((lambda a, b: a.union(b)), [_build(name, df) for (name, df) in data.items()])
return cls(name, flat_table.source, characteristics) | return a history table containing union of stats in a dict
Parameters
----------
name: str
Flat table name.
source: str
Data collection of this flat table.
characteristics: str
The flat table name that will show in the pyplot figure.
data: Dict Dict[str, DataFrame]
a dict of dataframes which will be united | scalpel/flattening/flat_table.py | build | X-DataInitiative/SCALPEL-Analysis | 7 | python | @classmethod
def build(cls, name: str, characteristics: str, data: Dict[(str, DataFrame)]) -> 'HistoryTable':
'\n return a history table containing union of stats in a dict\n\n Parameters\n ----------\n name: str\n Flat table name.\n source: str\n Data collection of this flat table.\n characteristics: str\n The flat table name that will show in the pyplot figure.\n data: Dict Dict[str, DataFrame]\n a dict of dataframes which will be united\n '
def _build(path_name: str, df: DataFrame) -> HistoryTable:
if ('history' not in df.columns):
source = df.withColumn('history', lit(path_name))
else:
source = df
return cls(path_name, source, path_name)
flat_table = reduce((lambda a, b: a.union(b)), [_build(name, df) for (name, df) in data.items()])
return cls(name, flat_table.source, characteristics) | @classmethod
def build(cls, name: str, characteristics: str, data: Dict[(str, DataFrame)]) -> 'HistoryTable':
'\n return a history table containing union of stats in a dict\n\n Parameters\n ----------\n name: str\n Flat table name.\n source: str\n Data collection of this flat table.\n characteristics: str\n The flat table name that will show in the pyplot figure.\n data: Dict Dict[str, DataFrame]\n a dict of dataframes which will be united\n '
def _build(path_name: str, df: DataFrame) -> HistoryTable:
if ('history' not in df.columns):
source = df.withColumn('history', lit(path_name))
else:
source = df
return cls(path_name, source, path_name)
flat_table = reduce((lambda a, b: a.union(b)), [_build(name, df) for (name, df) in data.items()])
return cls(name, flat_table.source, characteristics)<|docstring|>return a history table containing union of stats in a dict
Parameters
----------
name: str
Flat table name.
source: str
Data collection of this flat table.
characteristics: str
The flat table name that will show in the pyplot figure.
data: Dict Dict[str, DataFrame]
a dict of dataframes which will be united<|endoftext|> |
a4baa69dffce95196243f95902495bf23a22e67276a2b753da685d89531790c4 | def __init__(self, filename=None, path=None, logger=None):
'Initialize class properties.'
self._filename = (filename or 'layout.json')
self._path = (path or os.getcwd())
self.log = (logger or logging.getLogger('layout_json'))
self._contents = None | Initialize class properties. | tcex/app_config_object/layout_json.py | __init__ | kdeltared/tcex | 18 | python | def __init__(self, filename=None, path=None, logger=None):
self._filename = (filename or 'layout.json')
self._path = (path or os.getcwd())
self.log = (logger or logging.getLogger('layout_json'))
self._contents = None | def __init__(self, filename=None, path=None, logger=None):
self._filename = (filename or 'layout.json')
self._path = (path or os.getcwd())
self.log = (logger or logging.getLogger('layout_json'))
self._contents = None<|docstring|>Initialize class properties.<|endoftext|> |
e55b5aa560d11648096748a7d2150e4ef1d80689ace75a6fb868742da5a7d41b | @staticmethod
def _to_bool(value):
'Convert string value to bool.'
bool_value = False
if (str(value).lower() in ['1', 'true']):
bool_value = True
return bool_value | Convert string value to bool. | tcex/app_config_object/layout_json.py | _to_bool | kdeltared/tcex | 18 | python | @staticmethod
def _to_bool(value):
bool_value = False
if (str(value).lower() in ['1', 'true']):
bool_value = True
return bool_value | @staticmethod
def _to_bool(value):
bool_value = False
if (str(value).lower() in ['1', 'true']):
bool_value = True
return bool_value<|docstring|>Convert string value to bool.<|endoftext|> |
30b5a62aafc4fdbdbc564710e872f0bc058b0a1a411c38bb1fae9511746ce671 | @property
def contents(self):
'Return layout.json contents.'
if ((self._contents is None) and self.has_layout):
with open(self.filename) as fh:
self._contents = json.load(fh, object_pairs_hook=OrderedDict)
return self._contents | Return layout.json contents. | tcex/app_config_object/layout_json.py | contents | kdeltared/tcex | 18 | python | @property
def contents(self):
if ((self._contents is None) and self.has_layout):
with open(self.filename) as fh:
self._contents = json.load(fh, object_pairs_hook=OrderedDict)
return self._contents | @property
def contents(self):
if ((self._contents is None) and self.has_layout):
with open(self.filename) as fh:
self._contents = json.load(fh, object_pairs_hook=OrderedDict)
return self._contents<|docstring|>Return layout.json contents.<|endoftext|> |
0ae740744a0d290d4b209bb6cbc1e3d59c47f2b39465079cfe1bca8f64178276 | def create(self, inputs, outputs):
'Create new layout.json file based on inputs and outputs.'
lj = OrderedDict()
lj['inputs'] = []
step = OrderedDict()
step['parameters'] = []
step['sequence'] = 1
step['title'] = 'Action'
lj['inputs'].append(step)
step = OrderedDict()
step['parameters'] = []
step['sequence'] = 2
step['title'] = 'Connection'
lj['inputs'].append(step)
step = OrderedDict()
step['parameters'] = []
step['sequence'] = 3
step['title'] = 'Configure'
lj['inputs'].append(step)
step = OrderedDict()
step['parameters'] = []
step['sequence'] = 4
step['title'] = 'Advanced'
lj['inputs'].append(step)
lj['outputs'] = []
for i in sorted(inputs):
if (i.get('name') == 'tc_action'):
lj['inputs'][0]['parameters'].append({'name': 'tc_action'})
elif (i.get('hidden') is True):
lj['inputs'][2]['parameters'].append({'display': "'hidden' != 'hidden'", 'hidden': 'true', 'name': i.get('name')})
else:
lj['inputs'][2]['parameters'].append({'display': '', 'name': i.get('name')})
for o in sorted(outputs):
lj['outputs'].append({'display': '', 'name': o.get('name')})
self.write(lj) | Create new layout.json file based on inputs and outputs. | tcex/app_config_object/layout_json.py | create | kdeltared/tcex | 18 | python | def create(self, inputs, outputs):
lj = OrderedDict()
lj['inputs'] = []
step = OrderedDict()
step['parameters'] = []
step['sequence'] = 1
step['title'] = 'Action'
lj['inputs'].append(step)
step = OrderedDict()
step['parameters'] = []
step['sequence'] = 2
step['title'] = 'Connection'
lj['inputs'].append(step)
step = OrderedDict()
step['parameters'] = []
step['sequence'] = 3
step['title'] = 'Configure'
lj['inputs'].append(step)
step = OrderedDict()
step['parameters'] = []
step['sequence'] = 4
step['title'] = 'Advanced'
lj['inputs'].append(step)
lj['outputs'] = []
for i in sorted(inputs):
if (i.get('name') == 'tc_action'):
lj['inputs'][0]['parameters'].append({'name': 'tc_action'})
elif (i.get('hidden') is True):
lj['inputs'][2]['parameters'].append({'display': "'hidden' != 'hidden'", 'hidden': 'true', 'name': i.get('name')})
else:
lj['inputs'][2]['parameters'].append({'display': , 'name': i.get('name')})
for o in sorted(outputs):
lj['outputs'].append({'display': , 'name': o.get('name')})
self.write(lj) | def create(self, inputs, outputs):
lj = OrderedDict()
lj['inputs'] = []
step = OrderedDict()
step['parameters'] = []
step['sequence'] = 1
step['title'] = 'Action'
lj['inputs'].append(step)
step = OrderedDict()
step['parameters'] = []
step['sequence'] = 2
step['title'] = 'Connection'
lj['inputs'].append(step)
step = OrderedDict()
step['parameters'] = []
step['sequence'] = 3
step['title'] = 'Configure'
lj['inputs'].append(step)
step = OrderedDict()
step['parameters'] = []
step['sequence'] = 4
step['title'] = 'Advanced'
lj['inputs'].append(step)
lj['outputs'] = []
for i in sorted(inputs):
if (i.get('name') == 'tc_action'):
lj['inputs'][0]['parameters'].append({'name': 'tc_action'})
elif (i.get('hidden') is True):
lj['inputs'][2]['parameters'].append({'display': "'hidden' != 'hidden'", 'hidden': 'true', 'name': i.get('name')})
else:
lj['inputs'][2]['parameters'].append({'display': , 'name': i.get('name')})
for o in sorted(outputs):
lj['outputs'].append({'display': , 'name': o.get('name')})
self.write(lj)<|docstring|>Create new layout.json file based on inputs and outputs.<|endoftext|> |
4ae5b50117008d7c5bc62beff3e0287c5bbbd52dacb7775a946a7e55341cacf9 | @property
def filename(self):
'Return the fqpn for the layout.json file.'
return os.path.join(self._path, self._filename) | Return the fqpn for the layout.json file. | tcex/app_config_object/layout_json.py | filename | kdeltared/tcex | 18 | python | @property
def filename(self):
return os.path.join(self._path, self._filename) | @property
def filename(self):
return os.path.join(self._path, self._filename)<|docstring|>Return the fqpn for the layout.json file.<|endoftext|> |
b6f27c913d2b7c803a8d43b2f6d2b329bd96257a73fd3cbff377b075697ee9cd | @property
def has_layout(self):
'Return True if App has layout.json file.'
if os.path.isfile(self.filename):
return True
return False | Return True if App has layout.json file. | tcex/app_config_object/layout_json.py | has_layout | kdeltared/tcex | 18 | python | @property
def has_layout(self):
if os.path.isfile(self.filename):
return True
return False | @property
def has_layout(self):
if os.path.isfile(self.filename):
return True
return False<|docstring|>Return True if App has layout.json file.<|endoftext|> |
918a01ad30f69adaa3c295411e93be4f222a2c23e4c48dbda9b88af772da4b69 | @property
def params_dict(self):
'Return layout.json params in a flattened dict with name param as key.'
parameters = {}
for i in self.inputs:
for p in i.get('parameters', []):
parameters.setdefault(p.get('name'), p)
return parameters | Return layout.json params in a flattened dict with name param as key. | tcex/app_config_object/layout_json.py | params_dict | kdeltared/tcex | 18 | python | @property
def params_dict(self):
parameters = {}
for i in self.inputs:
for p in i.get('parameters', []):
parameters.setdefault(p.get('name'), p)
return parameters | @property
def params_dict(self):
parameters = {}
for i in self.inputs:
for p in i.get('parameters', []):
parameters.setdefault(p.get('name'), p)
return parameters<|docstring|>Return layout.json params in a flattened dict with name param as key.<|endoftext|> |
993e8253e90cf0b8360f1a5fa909c587edc26be18809dba8171d6781b5206b8a | @property
def parameters_names(self):
'Return layout.json params in a flattened dict with name param as key.'
return self.params_dict.keys() | Return layout.json params in a flattened dict with name param as key. | tcex/app_config_object/layout_json.py | parameters_names | kdeltared/tcex | 18 | python | @property
def parameters_names(self):
return self.params_dict.keys() | @property
def parameters_names(self):
return self.params_dict.keys()<|docstring|>Return layout.json params in a flattened dict with name param as key.<|endoftext|> |
ddef864971fb22adea44b9c3abe2202f68bd77b78beaf45dbf3e9addadad0b8e | @property
def outputs_dict(self):
'Return layout.json outputs in a flattened dict with name param as key.'
outputs = {}
for o in self.outputs:
outputs.setdefault(o.get('name'), o)
return outputs | Return layout.json outputs in a flattened dict with name param as key. | tcex/app_config_object/layout_json.py | outputs_dict | kdeltared/tcex | 18 | python | @property
def outputs_dict(self):
outputs = {}
for o in self.outputs:
outputs.setdefault(o.get('name'), o)
return outputs | @property
def outputs_dict(self):
outputs = {}
for o in self.outputs:
outputs.setdefault(o.get('name'), o)
return outputs<|docstring|>Return layout.json outputs in a flattened dict with name param as key.<|endoftext|> |
a3863289e59fa3a4eafec8a8b29b85aa015de2e28bf493753f4a481ff73ba6f8 | def update(self, features=None):
'Update the layouts.json file.'
features = (features or [])
layout_data = self.contents
self.update_sort_outputs(layout_data)
self._contents = layout_data
self.write(layout_data) | Update the layouts.json file. | tcex/app_config_object/layout_json.py | update | kdeltared/tcex | 18 | python | def update(self, features=None):
features = (features or [])
layout_data = self.contents
self.update_sort_outputs(layout_data)
self._contents = layout_data
self.write(layout_data) | def update(self, features=None):
features = (features or [])
layout_data = self.contents
self.update_sort_outputs(layout_data)
self._contents = layout_data
self.write(layout_data)<|docstring|>Update the layouts.json file.<|endoftext|> |
1b0717fe570ce07ead63c6d23148fa77dbbda54961ce94ecb04792b434d33dd6 | @staticmethod
def update_sort_outputs(layout_data):
'Sort output field by name.'
layout_data['outputs'] = sorted(layout_data.get('outputs', []), key=(lambda i: i['name'])) | Sort output field by name. | tcex/app_config_object/layout_json.py | update_sort_outputs | kdeltared/tcex | 18 | python | @staticmethod
def update_sort_outputs(layout_data):
layout_data['outputs'] = sorted(layout_data.get('outputs', []), key=(lambda i: i['name'])) | @staticmethod
def update_sort_outputs(layout_data):
layout_data['outputs'] = sorted(layout_data.get('outputs', []), key=(lambda i: i['name']))<|docstring|>Sort output field by name.<|endoftext|> |
b46b207d9c97de84c4cabfd8d0435f39bd1acde4c05ebeada6f93e4593bda4a4 | def write(self, json_data):
'Write updated profile file.\n\n Args:\n json_data (dict): The profile data.\n '
with open(self.filename, 'w') as fh:
json.dump(json_data, fh, indent=2, sort_keys=False)
fh.write('\n') | Write updated profile file.
Args:
json_data (dict): The profile data. | tcex/app_config_object/layout_json.py | write | kdeltared/tcex | 18 | python | def write(self, json_data):
'Write updated profile file.\n\n Args:\n json_data (dict): The profile data.\n '
with open(self.filename, 'w') as fh:
json.dump(json_data, fh, indent=2, sort_keys=False)
fh.write('\n') | def write(self, json_data):
'Write updated profile file.\n\n Args:\n json_data (dict): The profile data.\n '
with open(self.filename, 'w') as fh:
json.dump(json_data, fh, indent=2, sort_keys=False)
fh.write('\n')<|docstring|>Write updated profile file.
Args:
json_data (dict): The profile data.<|endoftext|> |
c09cac31fffa4850b8173c8aaff5220584ff31e5d6a3bd0686bb11f9bff13a06 | @property
def inputs(self):
'Return property.'
return self.contents.get('inputs', []) | Return property. | tcex/app_config_object/layout_json.py | inputs | kdeltared/tcex | 18 | python | @property
def inputs(self):
return self.contents.get('inputs', []) | @property
def inputs(self):
return self.contents.get('inputs', [])<|docstring|>Return property.<|endoftext|> |
15877e565c6e6ec130af8a0017c411e1d4e9da1d807bc6a424775e4d35c0f872 | @property
def outputs(self):
'Return property.'
return self.contents.get('outputs', []) | Return property. | tcex/app_config_object/layout_json.py | outputs | kdeltared/tcex | 18 | python | @property
def outputs(self):
return self.contents.get('outputs', []) | @property
def outputs(self):
return self.contents.get('outputs', [])<|docstring|>Return property.<|endoftext|> |
8d869b5d1ac74565d091e036c938ff7034090892f58e72aaeb915ba5d9baad8a | def __init__(self, managed_window):
'Constructor'
aui.AuiManager.__init__(self)
self.SetManagedWindow(managed_window) | Constructor | vespa/simulation/aui_subclass.py | __init__ | vespa-mrs/vespa | 0 | python | def __init__(self, managed_window):
aui.AuiManager.__init__(self)
self.SetManagedWindow(managed_window) | def __init__(self, managed_window):
aui.AuiManager.__init__(self)
self.SetManagedWindow(managed_window)<|docstring|>Constructor<|endoftext|> |
73cc18c75d554bc8f24a2be45a7c6a46ba39cfb4023bce67acc82de84e7069bf | def test_generate_vnfd(self):
' Function to verify vnfd generation based on template '
self.maxDiff = None
generated_vnfd = vnfdgen.generate_vnfd(TREX_VNFD_TEMPLATE, NODE_CFG)
self.assertDictEqual(COMPLETE_TREX_VNFD, generated_vnfd) | Function to verify vnfd generation based on template | yardstick/tests/unit/network_services/vnf_generic/test_vnfdgen.py | test_generate_vnfd | beefyamoeba5/yardstick | 28 | python | def test_generate_vnfd(self):
' '
self.maxDiff = None
generated_vnfd = vnfdgen.generate_vnfd(TREX_VNFD_TEMPLATE, NODE_CFG)
self.assertDictEqual(COMPLETE_TREX_VNFD, generated_vnfd) | def test_generate_vnfd(self):
' '
self.maxDiff = None
generated_vnfd = vnfdgen.generate_vnfd(TREX_VNFD_TEMPLATE, NODE_CFG)
self.assertDictEqual(COMPLETE_TREX_VNFD, generated_vnfd)<|docstring|>Function to verify vnfd generation based on template<|endoftext|> |
5a64d237d79ece2468eb3943ac469f3a515de0b53fdfd99027c93a42c24ca443 | def test_generate_tp_no_vars(self):
' Function to verify traffic profile generation without imix '
self.maxDiff = None
generated_tp = vnfdgen.generate_vnfd(TRAFFIC_PROFILE_TPL, {'imix': {}})
self.assertDictEqual(TRAFFIC_PROFILE, generated_tp) | Function to verify traffic profile generation without imix | yardstick/tests/unit/network_services/vnf_generic/test_vnfdgen.py | test_generate_tp_no_vars | beefyamoeba5/yardstick | 28 | python | def test_generate_tp_no_vars(self):
' '
self.maxDiff = None
generated_tp = vnfdgen.generate_vnfd(TRAFFIC_PROFILE_TPL, {'imix': {}})
self.assertDictEqual(TRAFFIC_PROFILE, generated_tp) | def test_generate_tp_no_vars(self):
' '
self.maxDiff = None
generated_tp = vnfdgen.generate_vnfd(TRAFFIC_PROFILE_TPL, {'imix': {}})
self.assertDictEqual(TRAFFIC_PROFILE, generated_tp)<|docstring|>Function to verify traffic profile generation without imix<|endoftext|> |
7513886a3bbe7eddc969f53ba390fea9effa2e1a03afe0295c6ca43102074f1e | def test_generate_tp_single_var(self):
' Function to verify traffic profile generation with imix '
generated_tp = vnfdgen.generate_vnfd(TRAFFIC_PROFILE_TPL, {'imix': {UPLINK: {'imix_small': '20'}}})
self.maxDiff = None
tp2 = dict(TRAFFIC_PROFILE)
tp2[UPLINK][0]['ipv4']['outer_l2']['framesize']['64B'] = '20'
self.assertDictEqual(tp2, generated_tp) | Function to verify traffic profile generation with imix | yardstick/tests/unit/network_services/vnf_generic/test_vnfdgen.py | test_generate_tp_single_var | beefyamoeba5/yardstick | 28 | python | def test_generate_tp_single_var(self):
' '
generated_tp = vnfdgen.generate_vnfd(TRAFFIC_PROFILE_TPL, {'imix': {UPLINK: {'imix_small': '20'}}})
self.maxDiff = None
tp2 = dict(TRAFFIC_PROFILE)
tp2[UPLINK][0]['ipv4']['outer_l2']['framesize']['64B'] = '20'
self.assertDictEqual(tp2, generated_tp) | def test_generate_tp_single_var(self):
' '
generated_tp = vnfdgen.generate_vnfd(TRAFFIC_PROFILE_TPL, {'imix': {UPLINK: {'imix_small': '20'}}})
self.maxDiff = None
tp2 = dict(TRAFFIC_PROFILE)
tp2[UPLINK][0]['ipv4']['outer_l2']['framesize']['64B'] = '20'
self.assertDictEqual(tp2, generated_tp)<|docstring|>Function to verify traffic profile generation with imix<|endoftext|> |
dddc3408e3094d3d01ba5d316c744bb7cc9ad4c396fac9c0b7b0262b435cc93a | def add_resource(self, resource: Resource):
' Count a resource if it passes the filters '
resource = self.filter_resource(resource)
if (resource is None):
return
count = len(resource.instances)
if (count == 0):
return
if (resource.type not in self.table):
self.table[resource.type] = 0
self.table[resource.type] += count
self.total_count += count | Count a resource if it passes the filters | denull_ops_tfstats/stats.py | add_resource | ordenull/denull-ops-tfstats | 0 | python | def add_resource(self, resource: Resource):
' '
resource = self.filter_resource(resource)
if (resource is None):
return
count = len(resource.instances)
if (count == 0):
return
if (resource.type not in self.table):
self.table[resource.type] = 0
self.table[resource.type] += count
self.total_count += count | def add_resource(self, resource: Resource):
' '
resource = self.filter_resource(resource)
if (resource is None):
return
count = len(resource.instances)
if (count == 0):
return
if (resource.type not in self.table):
self.table[resource.type] = 0
self.table[resource.type] += count
self.total_count += count<|docstring|>Count a resource if it passes the filters<|endoftext|> |
901987415cd5d39f54494358f44b8816a4d15c48637e143a20a132b66ff53e65 | def filter_resource(self, resource: Resource):
'\n Filters out resources that are not interesting to us.\n Returns None if it has been excluded.\n '
if (len(self._include_filters) > 0):
matched_include = False
for filtr in self._include_filters:
if (resource.raw[filtr['name']] == filtr['value']):
matched_include = True
break
if (not matched_include):
logging.debug("Excluding: %s because it didn't match any include filter", resource)
return None
for filtr in self._exclude_filters:
if (resource.raw[filtr['name']] == filtr['value']):
logging.debug('Excluding %s: %s=%s', resource, filtr['name'], resource.raw[filtr['name']])
return None
return resource | Filters out resources that are not interesting to us.
Returns None if it has been excluded. | denull_ops_tfstats/stats.py | filter_resource | ordenull/denull-ops-tfstats | 0 | python | def filter_resource(self, resource: Resource):
'\n Filters out resources that are not interesting to us.\n Returns None if it has been excluded.\n '
if (len(self._include_filters) > 0):
matched_include = False
for filtr in self._include_filters:
if (resource.raw[filtr['name']] == filtr['value']):
matched_include = True
break
if (not matched_include):
logging.debug("Excluding: %s because it didn't match any include filter", resource)
return None
for filtr in self._exclude_filters:
if (resource.raw[filtr['name']] == filtr['value']):
logging.debug('Excluding %s: %s=%s', resource, filtr['name'], resource.raw[filtr['name']])
return None
return resource | def filter_resource(self, resource: Resource):
'\n Filters out resources that are not interesting to us.\n Returns None if it has been excluded.\n '
if (len(self._include_filters) > 0):
matched_include = False
for filtr in self._include_filters:
if (resource.raw[filtr['name']] == filtr['value']):
matched_include = True
break
if (not matched_include):
logging.debug("Excluding: %s because it didn't match any include filter", resource)
return None
for filtr in self._exclude_filters:
if (resource.raw[filtr['name']] == filtr['value']):
logging.debug('Excluding %s: %s=%s', resource, filtr['name'], resource.raw[filtr['name']])
return None
return resource<|docstring|>Filters out resources that are not interesting to us.
Returns None if it has been excluded.<|endoftext|> |
371e5094836613951685997ed54fab0313f20bd33c95158f3aa7a17855e4c48f | def main(project_name=None):
'Main function to start a project using django-safe-project.\n\n Finds the location of the project template in the available django package.\n Creates a temp directory to copy and modify the template.\n Calls `django-admin startproject <project_name>` and supplies it with\n the modified template.\n\n :param project_name: The name of the project to create.\n (If None, uses the first command line argument)\n\n '
if (project_name is None):
project_name = sys.argv[1]
dj_root = os.path.dirname(django.__file__)
source_dir = os.path.join(dj_root, 'conf', 'project_template')
tmp_dir = tempfile.mkdtemp()
dest_dir = os.path.join(tmp_dir, 'project_template')
try:
build_template(source=source_dir, dest=dest_dir)
start_project(project_name, dest_dir)
finally:
shutil.rmtree(tmp_dir) | Main function to start a project using django-safe-project.
Finds the location of the project template in the available django package.
Creates a temp directory to copy and modify the template.
Calls `django-admin startproject <project_name>` and supplies it with
the modified template.
:param project_name: The name of the project to create.
(If None, uses the first command line argument) | startproject.py | main | Ketibansapi/sensitivity-djtest | 0 | python | def main(project_name=None):
'Main function to start a project using django-safe-project.\n\n Finds the location of the project template in the available django package.\n Creates a temp directory to copy and modify the template.\n Calls `django-admin startproject <project_name>` and supplies it with\n the modified template.\n\n :param project_name: The name of the project to create.\n (If None, uses the first command line argument)\n\n '
if (project_name is None):
project_name = sys.argv[1]
dj_root = os.path.dirname(django.__file__)
source_dir = os.path.join(dj_root, 'conf', 'project_template')
tmp_dir = tempfile.mkdtemp()
dest_dir = os.path.join(tmp_dir, 'project_template')
try:
build_template(source=source_dir, dest=dest_dir)
start_project(project_name, dest_dir)
finally:
shutil.rmtree(tmp_dir) | def main(project_name=None):
'Main function to start a project using django-safe-project.\n\n Finds the location of the project template in the available django package.\n Creates a temp directory to copy and modify the template.\n Calls `django-admin startproject <project_name>` and supplies it with\n the modified template.\n\n :param project_name: The name of the project to create.\n (If None, uses the first command line argument)\n\n '
if (project_name is None):
project_name = sys.argv[1]
dj_root = os.path.dirname(django.__file__)
source_dir = os.path.join(dj_root, 'conf', 'project_template')
tmp_dir = tempfile.mkdtemp()
dest_dir = os.path.join(tmp_dir, 'project_template')
try:
build_template(source=source_dir, dest=dest_dir)
start_project(project_name, dest_dir)
finally:
shutil.rmtree(tmp_dir)<|docstring|>Main function to start a project using django-safe-project.
Finds the location of the project template in the available django package.
Creates a temp directory to copy and modify the template.
Calls `django-admin startproject <project_name>` and supplies it with
the modified template.
:param project_name: The name of the project to create.
(If None, uses the first command line argument)<|endoftext|> |
58269dc4ee6d7d32af5c44626e6ee6a678cb12ba560726d6b2817da41b0a95d4 | def get_errors_to_process(exclude, app_import_names, extend_ignore, file_or_dir, excluded_errors):
'Get lint errors to process.'
lint_errors = _lint.get_lint_output(format=None, qs_or_vs=None, exclude=exclude, app_import_names=app_import_names, extend_ignore=extend_ignore, file_or_dir=file_or_dir).splitlines()
parsed_errors = map(parse, lint_errors)
parsed_errors = list(filter(None, parsed_errors))
lint_errors_to_process = [error for error in parsed_errors if (error.code not in excluded_errors)]
return lint_errors_to_process | Get lint errors to process. | ni_python_styleguide/_utils/lint.py | get_errors_to_process | ni/ni-python-styleguide | 4 | python | def get_errors_to_process(exclude, app_import_names, extend_ignore, file_or_dir, excluded_errors):
lint_errors = _lint.get_lint_output(format=None, qs_or_vs=None, exclude=exclude, app_import_names=app_import_names, extend_ignore=extend_ignore, file_or_dir=file_or_dir).splitlines()
parsed_errors = map(parse, lint_errors)
parsed_errors = list(filter(None, parsed_errors))
lint_errors_to_process = [error for error in parsed_errors if (error.code not in excluded_errors)]
return lint_errors_to_process | def get_errors_to_process(exclude, app_import_names, extend_ignore, file_or_dir, excluded_errors):
lint_errors = _lint.get_lint_output(format=None, qs_or_vs=None, exclude=exclude, app_import_names=app_import_names, extend_ignore=extend_ignore, file_or_dir=file_or_dir).splitlines()
parsed_errors = map(parse, lint_errors)
parsed_errors = list(filter(None, parsed_errors))
lint_errors_to_process = [error for error in parsed_errors if (error.code not in excluded_errors)]
return lint_errors_to_process<|docstring|>Get lint errors to process.<|endoftext|> |
332e52d22c34552a9b2806bc150c38c5dc7605784d6b643440c4e5746ca3b13b | def parse(line):
'\n Parse line into :class:`LintError`.\n\n >>> parse(r\'source\\arfile.py:55:16: BLK100 Black would make changes.\')\n LintError(file=\'source\\\\arfile.py\', line=55, column=16, code=\'BLK100\', explanation=\'Black would make changes.\')\n\n >>> parse(r"source\\rpmfile\\__init__.py:13:1: F401 \'functools.wraps\' imported but unused")\n LintError(file=\'source\\\\rpmfile\\\\__init__.py\', line=13, column=1, code=\'F401\', explanation="\'functools.wraps\' imported but unused")\n\n >>> parse(r"expected_output.py:77:6: N802 function name \'method_withBadName_with_bad_params_on_multiple_lines_1\' should be lowercase")\n LintError(file=\'expected_output.py\', line=77, column=6, code=\'N802\', explanation="function name \'method_withBadName_with_bad_params_on_multiple_lines_1\' should be lowercase")\n\n >>> parse(r"./tests/test_cli/acknowledge_existing_errors_test_cases__snapshots/doc_line_tests/expected_output.py:1:1: D100 Missing docstring in public module")\n LintError(file=\'./tests/test_cli/acknowledge_existing_errors_test_cases__snapshots/doc_line_tests/expected_output.py\', line=1, column=1, code=\'D100\', explanation=\'Missing docstring in public module\')\n '
p = Parser()
return p.parse(line) | Parse line into :class:`LintError`.
>>> parse(r'source\arfile.py:55:16: BLK100 Black would make changes.')
LintError(file='source\\arfile.py', line=55, column=16, code='BLK100', explanation='Black would make changes.')
>>> parse(r"source\rpmfile\__init__.py:13:1: F401 'functools.wraps' imported but unused")
LintError(file='source\\rpmfile\\__init__.py', line=13, column=1, code='F401', explanation="'functools.wraps' imported but unused")
>>> parse(r"expected_output.py:77:6: N802 function name 'method_withBadName_with_bad_params_on_multiple_lines_1' should be lowercase")
LintError(file='expected_output.py', line=77, column=6, code='N802', explanation="function name 'method_withBadName_with_bad_params_on_multiple_lines_1' should be lowercase")
>>> parse(r"./tests/test_cli/acknowledge_existing_errors_test_cases__snapshots/doc_line_tests/expected_output.py:1:1: D100 Missing docstring in public module")
LintError(file='./tests/test_cli/acknowledge_existing_errors_test_cases__snapshots/doc_line_tests/expected_output.py', line=1, column=1, code='D100', explanation='Missing docstring in public module') | ni_python_styleguide/_utils/lint.py | parse | ni/ni-python-styleguide | 4 | python | def parse(line):
'\n Parse line into :class:`LintError`.\n\n >>> parse(r\'source\\arfile.py:55:16: BLK100 Black would make changes.\')\n LintError(file=\'source\\\\arfile.py\', line=55, column=16, code=\'BLK100\', explanation=\'Black would make changes.\')\n\n >>> parse(r"source\\rpmfile\\__init__.py:13:1: F401 \'functools.wraps\' imported but unused")\n LintError(file=\'source\\\\rpmfile\\\\__init__.py\', line=13, column=1, code=\'F401\', explanation="\'functools.wraps\' imported but unused")\n\n >>> parse(r"expected_output.py:77:6: N802 function name \'method_withBadName_with_bad_params_on_multiple_lines_1\' should be lowercase")\n LintError(file=\'expected_output.py\', line=77, column=6, code=\'N802\', explanation="function name \'method_withBadName_with_bad_params_on_multiple_lines_1\' should be lowercase")\n\n >>> parse(r"./tests/test_cli/acknowledge_existing_errors_test_cases__snapshots/doc_line_tests/expected_output.py:1:1: D100 Missing docstring in public module")\n LintError(file=\'./tests/test_cli/acknowledge_existing_errors_test_cases__snapshots/doc_line_tests/expected_output.py\', line=1, column=1, code=\'D100\', explanation=\'Missing docstring in public module\')\n '
p = Parser()
return p.parse(line) | def parse(line):
'\n Parse line into :class:`LintError`.\n\n >>> parse(r\'source\\arfile.py:55:16: BLK100 Black would make changes.\')\n LintError(file=\'source\\\\arfile.py\', line=55, column=16, code=\'BLK100\', explanation=\'Black would make changes.\')\n\n >>> parse(r"source\\rpmfile\\__init__.py:13:1: F401 \'functools.wraps\' imported but unused")\n LintError(file=\'source\\\\rpmfile\\\\__init__.py\', line=13, column=1, code=\'F401\', explanation="\'functools.wraps\' imported but unused")\n\n >>> parse(r"expected_output.py:77:6: N802 function name \'method_withBadName_with_bad_params_on_multiple_lines_1\' should be lowercase")\n LintError(file=\'expected_output.py\', line=77, column=6, code=\'N802\', explanation="function name \'method_withBadName_with_bad_params_on_multiple_lines_1\' should be lowercase")\n\n >>> parse(r"./tests/test_cli/acknowledge_existing_errors_test_cases__snapshots/doc_line_tests/expected_output.py:1:1: D100 Missing docstring in public module")\n LintError(file=\'./tests/test_cli/acknowledge_existing_errors_test_cases__snapshots/doc_line_tests/expected_output.py\', line=1, column=1, code=\'D100\', explanation=\'Missing docstring in public module\')\n '
p = Parser()
return p.parse(line)<|docstring|>Parse line into :class:`LintError`.
>>> parse(r'source\arfile.py:55:16: BLK100 Black would make changes.')
LintError(file='source\\arfile.py', line=55, column=16, code='BLK100', explanation='Black would make changes.')
>>> parse(r"source\rpmfile\__init__.py:13:1: F401 'functools.wraps' imported but unused")
LintError(file='source\\rpmfile\\__init__.py', line=13, column=1, code='F401', explanation="'functools.wraps' imported but unused")
>>> parse(r"expected_output.py:77:6: N802 function name 'method_withBadName_with_bad_params_on_multiple_lines_1' should be lowercase")
LintError(file='expected_output.py', line=77, column=6, code='N802', explanation="function name 'method_withBadName_with_bad_params_on_multiple_lines_1' should be lowercase")
>>> parse(r"./tests/test_cli/acknowledge_existing_errors_test_cases__snapshots/doc_line_tests/expected_output.py:1:1: D100 Missing docstring in public module")
LintError(file='./tests/test_cli/acknowledge_existing_errors_test_cases__snapshots/doc_line_tests/expected_output.py', line=1, column=1, code='D100', explanation='Missing docstring in public module')<|endoftext|> |
5343558353df9d65a2639c2850998a22c26cc14a541262ca2338b4961aff60d6 | def parse(self, line):
'Parse `line` and return a :class:`LintError`.\n\n :param line: the line to parse\n :return: lint error as metada object\n :rtype: LintError\n '
data = Parser.__MATCHER.search(line)
logging.debug('parsing line: %s, yielded %s', line, data)
if (not data):
return None
result = Parser._to_lint_error(**data.groupdict())
return result | Parse `line` and return a :class:`LintError`.
:param line: the line to parse
:return: lint error as metada object
:rtype: LintError | ni_python_styleguide/_utils/lint.py | parse | ni/ni-python-styleguide | 4 | python | def parse(self, line):
'Parse `line` and return a :class:`LintError`.\n\n :param line: the line to parse\n :return: lint error as metada object\n :rtype: LintError\n '
data = Parser.__MATCHER.search(line)
logging.debug('parsing line: %s, yielded %s', line, data)
if (not data):
return None
result = Parser._to_lint_error(**data.groupdict())
return result | def parse(self, line):
'Parse `line` and return a :class:`LintError`.\n\n :param line: the line to parse\n :return: lint error as metada object\n :rtype: LintError\n '
data = Parser.__MATCHER.search(line)
logging.debug('parsing line: %s, yielded %s', line, data)
if (not data):
return None
result = Parser._to_lint_error(**data.groupdict())
return result<|docstring|>Parse `line` and return a :class:`LintError`.
:param line: the line to parse
:return: lint error as metada object
:rtype: LintError<|endoftext|> |
4a43228d498d8dc6e5361032ff6ff7ada8247984818844d25e850fc6caecf369 | def get_vpn_services_on_host(self, context, host=None):
'Returns info on the VPN services on the host.'
routers = via_cfg_file.get_active_routers_for_host(context, host)
host_vpn_services = []
for router in routers:
vpn_services = self.get_vpn_services_using(context, router['id'])
for vpn_service in vpn_services:
host_vpn_services.append(self.driver._make_vpnservice_dict(context, vpn_service, router))
return host_vpn_services | Returns info on the VPN services on the host. | neutron/services/vpn/service_drivers/cisco_ipsec.py | get_vpn_services_on_host | CingHu/neutron-ustack | 1 | python | def get_vpn_services_on_host(self, context, host=None):
routers = via_cfg_file.get_active_routers_for_host(context, host)
host_vpn_services = []
for router in routers:
vpn_services = self.get_vpn_services_using(context, router['id'])
for vpn_service in vpn_services:
host_vpn_services.append(self.driver._make_vpnservice_dict(context, vpn_service, router))
return host_vpn_services | def get_vpn_services_on_host(self, context, host=None):
routers = via_cfg_file.get_active_routers_for_host(context, host)
host_vpn_services = []
for router in routers:
vpn_services = self.get_vpn_services_using(context, router['id'])
for vpn_service in vpn_services:
host_vpn_services.append(self.driver._make_vpnservice_dict(context, vpn_service, router))
return host_vpn_services<|docstring|>Returns info on the VPN services on the host.<|endoftext|> |
2b51de4e81c9bc3aa716fcd4f5d0af612c8be4e6c485668ad7cfae6475aab56c | def update_status(self, context, status):
'Update status of all vpnservices.'
plugin = self.driver.service_plugin
plugin.update_status_by_agent(context, status) | Update status of all vpnservices. | neutron/services/vpn/service_drivers/cisco_ipsec.py | update_status | CingHu/neutron-ustack | 1 | python | def update_status(self, context, status):
plugin = self.driver.service_plugin
plugin.update_status_by_agent(context, status) | def update_status(self, context, status):
plugin = self.driver.service_plugin
plugin.update_status_by_agent(context, status)<|docstring|>Update status of all vpnservices.<|endoftext|> |
700e8c34b61b4539f2b58ecf87a8dbfd60930128ebd5ba5992f6f2fde828666d | def _agent_notification(self, context, method, router_id, version=None, **kwargs):
'Notify update for the agent.\n\n Find the host for the router being notified and then\n dispatches a notification for the VPN device driver.\n '
admin_context = ((context.is_admin and context) or context.elevated())
if (not version):
version = self.RPC_API_VERSION
host = via_cfg_file.get_host_for_router(admin_context, router_id)
if (not host):
return
LOG.debug(_('Notify agent at %(topic)s.%(host)s the message %(method)s %(args)s for router %(router)s'), {'topic': self.topic, 'host': host, 'method': method, 'args': kwargs, 'router': router_id})
self.cast(context, self.make_msg(method, **kwargs), version=version, topic=('%s.%s' % (self.topic, host))) | Notify update for the agent.
Find the host for the router being notified and then
dispatches a notification for the VPN device driver. | neutron/services/vpn/service_drivers/cisco_ipsec.py | _agent_notification | CingHu/neutron-ustack | 1 | python | def _agent_notification(self, context, method, router_id, version=None, **kwargs):
'Notify update for the agent.\n\n Find the host for the router being notified and then\n dispatches a notification for the VPN device driver.\n '
admin_context = ((context.is_admin and context) or context.elevated())
if (not version):
version = self.RPC_API_VERSION
host = via_cfg_file.get_host_for_router(admin_context, router_id)
if (not host):
return
LOG.debug(_('Notify agent at %(topic)s.%(host)s the message %(method)s %(args)s for router %(router)s'), {'topic': self.topic, 'host': host, 'method': method, 'args': kwargs, 'router': router_id})
self.cast(context, self.make_msg(method, **kwargs), version=version, topic=('%s.%s' % (self.topic, host))) | def _agent_notification(self, context, method, router_id, version=None, **kwargs):
'Notify update for the agent.\n\n Find the host for the router being notified and then\n dispatches a notification for the VPN device driver.\n '
admin_context = ((context.is_admin and context) or context.elevated())
if (not version):
version = self.RPC_API_VERSION
host = via_cfg_file.get_host_for_router(admin_context, router_id)
if (not host):
return
LOG.debug(_('Notify agent at %(topic)s.%(host)s the message %(method)s %(args)s for router %(router)s'), {'topic': self.topic, 'host': host, 'method': method, 'args': kwargs, 'router': router_id})
self.cast(context, self.make_msg(method, **kwargs), version=version, topic=('%s.%s' % (self.topic, host)))<|docstring|>Notify update for the agent.
Find the host for the router being notified and then
dispatches a notification for the VPN device driver.<|endoftext|> |
8eca05fb7cad9f1b693051cfe0ee259b8f848dcf38ba0949bd5c680083d94ba2 | def get_cisco_connection_mappings(self, conn_id, context):
'Obtain persisted mappings for IDs related to connection.'
(tunnel_id, ike_id, ipsec_id) = csr_id_map.get_tunnel_mapping_for(conn_id, context.session)
return {'site_conn_id': (u'Tunnel%d' % tunnel_id), 'ike_policy_id': (u'%d' % ike_id), 'ipsec_policy_id': (u'%s' % ipsec_id)} | Obtain persisted mappings for IDs related to connection. | neutron/services/vpn/service_drivers/cisco_ipsec.py | get_cisco_connection_mappings | CingHu/neutron-ustack | 1 | python | def get_cisco_connection_mappings(self, conn_id, context):
(tunnel_id, ike_id, ipsec_id) = csr_id_map.get_tunnel_mapping_for(conn_id, context.session)
return {'site_conn_id': (u'Tunnel%d' % tunnel_id), 'ike_policy_id': (u'%d' % ike_id), 'ipsec_policy_id': (u'%s' % ipsec_id)} | def get_cisco_connection_mappings(self, conn_id, context):
(tunnel_id, ike_id, ipsec_id) = csr_id_map.get_tunnel_mapping_for(conn_id, context.session)
return {'site_conn_id': (u'Tunnel%d' % tunnel_id), 'ike_policy_id': (u'%d' % ike_id), 'ipsec_policy_id': (u'%s' % ipsec_id)}<|docstring|>Obtain persisted mappings for IDs related to connection.<|endoftext|> |
94b974865be73e3157a16c3b7d43a04d812a9e7ae6f7704a69e59ee5c03fa63e | def _make_vpnservice_dict(self, context, vpnservice, router_info):
'Collect all service info, including Cisco info for IPSec conn.'
vpnservice_dict = dict(vpnservice)
vpnservice_dict['ipsec_conns'] = []
vpnservice_dict['subnet'] = dict(vpnservice.subnet)
vpnservice_dict['router_info'] = self._get_router_info(router_info)
for ipsec_conn in vpnservice.ipsec_site_connections:
ipsec_conn_dict = dict(ipsec_conn)
ipsec_conn_dict['ike_policy'] = dict(ipsec_conn.ikepolicy)
ipsec_conn_dict['ipsec_policy'] = dict(ipsec_conn.ipsecpolicy)
ipsec_conn_dict['peer_cidrs'] = [peer_cidr.cidr for peer_cidr in ipsec_conn.peer_cidrs]
ipsec_conn_dict['cisco'] = self.get_cisco_connection_mappings(ipsec_conn['id'], context)
vpnservice_dict['ipsec_conns'].append(ipsec_conn_dict)
return vpnservice_dict | Collect all service info, including Cisco info for IPSec conn. | neutron/services/vpn/service_drivers/cisco_ipsec.py | _make_vpnservice_dict | CingHu/neutron-ustack | 1 | python | def _make_vpnservice_dict(self, context, vpnservice, router_info):
vpnservice_dict = dict(vpnservice)
vpnservice_dict['ipsec_conns'] = []
vpnservice_dict['subnet'] = dict(vpnservice.subnet)
vpnservice_dict['router_info'] = self._get_router_info(router_info)
for ipsec_conn in vpnservice.ipsec_site_connections:
ipsec_conn_dict = dict(ipsec_conn)
ipsec_conn_dict['ike_policy'] = dict(ipsec_conn.ikepolicy)
ipsec_conn_dict['ipsec_policy'] = dict(ipsec_conn.ipsecpolicy)
ipsec_conn_dict['peer_cidrs'] = [peer_cidr.cidr for peer_cidr in ipsec_conn.peer_cidrs]
ipsec_conn_dict['cisco'] = self.get_cisco_connection_mappings(ipsec_conn['id'], context)
vpnservice_dict['ipsec_conns'].append(ipsec_conn_dict)
return vpnservice_dict | def _make_vpnservice_dict(self, context, vpnservice, router_info):
vpnservice_dict = dict(vpnservice)
vpnservice_dict['ipsec_conns'] = []
vpnservice_dict['subnet'] = dict(vpnservice.subnet)
vpnservice_dict['router_info'] = self._get_router_info(router_info)
for ipsec_conn in vpnservice.ipsec_site_connections:
ipsec_conn_dict = dict(ipsec_conn)
ipsec_conn_dict['ike_policy'] = dict(ipsec_conn.ikepolicy)
ipsec_conn_dict['ipsec_policy'] = dict(ipsec_conn.ipsecpolicy)
ipsec_conn_dict['peer_cidrs'] = [peer_cidr.cidr for peer_cidr in ipsec_conn.peer_cidrs]
ipsec_conn_dict['cisco'] = self.get_cisco_connection_mappings(ipsec_conn['id'], context)
vpnservice_dict['ipsec_conns'].append(ipsec_conn_dict)
return vpnservice_dict<|docstring|>Collect all service info, including Cisco info for IPSec conn.<|endoftext|> |
4170fee7ea21ec6f49cf9aac81ae856435a69ae9d6fdddcd2641bd4e51d59df7 | @patch('installed_clients.CatalogClient.Catalog.get_module_version', autospec=True)
def test_init_job_stress(self, cc_get_mod_ver):
'\n testing initializing 3 different jobs in multiple theads.\n '
cc_get_mod_ver.return_value = {'git_commit_hash': '123'}
thread_count = self.thread_count
ori_job_count = Job.objects.count()
runner = self.method_runner
method_1 = 'app_1.a_method'
method_2 = 'app_1.b_method'
job_params_1 = get_sample_job_params(method=method_1)
job_params_1['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_params_2 = get_sample_job_params(method=method_2)
job_params_2['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
threads = list()
job_ids = list()
que = queue.Queue()
for index in range(thread_count):
x = threading.Thread(target=que.put(runner.get_runjob()._init_job_rec(self.user_id, job_params_1)))
threads.append(x)
x.start()
y = threading.Thread(target=que.put(runner.get_runjob()._init_job_rec(self.user_id, job_params_2)))
threads.append(y)
y.start()
for (index, thread) in enumerate(threads):
thread.join()
while (not que.empty()):
job_ids.append(que.get())
jobs = self.mongo_util.get_jobs(job_ids=job_ids)
methods = [job.job_input.method for job in jobs]
self.assertEqual(len(methods), (thread_count * 2))
self.assertEqual(methods.count(method_1), thread_count)
self.assertEqual(methods.count(method_2), thread_count)
self.assertEqual(len(set(job_ids)), (thread_count * 2))
self.assertEqual(len(job_ids), len(set(job_ids)))
self.assertEqual(ori_job_count, (Job.objects.count() - (thread_count * 2)))
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count()) | testing initializing 3 different jobs in multiple theads. | test/tests_for_sdkmr/ee2_load_test.py | test_init_job_stress | bio-boris/execution_engine2 | 1 | python | @patch('installed_clients.CatalogClient.Catalog.get_module_version', autospec=True)
def test_init_job_stress(self, cc_get_mod_ver):
'\n \n '
cc_get_mod_ver.return_value = {'git_commit_hash': '123'}
thread_count = self.thread_count
ori_job_count = Job.objects.count()
runner = self.method_runner
method_1 = 'app_1.a_method'
method_2 = 'app_1.b_method'
job_params_1 = get_sample_job_params(method=method_1)
job_params_1['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_params_2 = get_sample_job_params(method=method_2)
job_params_2['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
threads = list()
job_ids = list()
que = queue.Queue()
for index in range(thread_count):
x = threading.Thread(target=que.put(runner.get_runjob()._init_job_rec(self.user_id, job_params_1)))
threads.append(x)
x.start()
y = threading.Thread(target=que.put(runner.get_runjob()._init_job_rec(self.user_id, job_params_2)))
threads.append(y)
y.start()
for (index, thread) in enumerate(threads):
thread.join()
while (not que.empty()):
job_ids.append(que.get())
jobs = self.mongo_util.get_jobs(job_ids=job_ids)
methods = [job.job_input.method for job in jobs]
self.assertEqual(len(methods), (thread_count * 2))
self.assertEqual(methods.count(method_1), thread_count)
self.assertEqual(methods.count(method_2), thread_count)
self.assertEqual(len(set(job_ids)), (thread_count * 2))
self.assertEqual(len(job_ids), len(set(job_ids)))
self.assertEqual(ori_job_count, (Job.objects.count() - (thread_count * 2)))
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count()) | @patch('installed_clients.CatalogClient.Catalog.get_module_version', autospec=True)
def test_init_job_stress(self, cc_get_mod_ver):
'\n \n '
cc_get_mod_ver.return_value = {'git_commit_hash': '123'}
thread_count = self.thread_count
ori_job_count = Job.objects.count()
runner = self.method_runner
method_1 = 'app_1.a_method'
method_2 = 'app_1.b_method'
job_params_1 = get_sample_job_params(method=method_1)
job_params_1['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_params_2 = get_sample_job_params(method=method_2)
job_params_2['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
threads = list()
job_ids = list()
que = queue.Queue()
for index in range(thread_count):
x = threading.Thread(target=que.put(runner.get_runjob()._init_job_rec(self.user_id, job_params_1)))
threads.append(x)
x.start()
y = threading.Thread(target=que.put(runner.get_runjob()._init_job_rec(self.user_id, job_params_2)))
threads.append(y)
y.start()
for (index, thread) in enumerate(threads):
thread.join()
while (not que.empty()):
job_ids.append(que.get())
jobs = self.mongo_util.get_jobs(job_ids=job_ids)
methods = [job.job_input.method for job in jobs]
self.assertEqual(len(methods), (thread_count * 2))
self.assertEqual(methods.count(method_1), thread_count)
self.assertEqual(methods.count(method_2), thread_count)
self.assertEqual(len(set(job_ids)), (thread_count * 2))
self.assertEqual(len(job_ids), len(set(job_ids)))
self.assertEqual(ori_job_count, (Job.objects.count() - (thread_count * 2)))
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count())<|docstring|>testing initializing 3 different jobs in multiple theads.<|endoftext|> |
c4067f5f1971804a2eb8208231de4194c685a2e03bb38cc79966f377c5ef8f28 | def test_update_job_status_stress(self):
'\n testing update jobs into different status in multiple threads\n '
ori_job_count = Job.objects.count()
runner = self.method_runner
job_params = get_sample_job_params()
job_params['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
thread_count = self.thread_count
job_ids_queued = list()
job_ids_running = list()
job_ids_completed = list()
for index in range(thread_count):
job_ids_queued.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
job_ids_running.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
job_ids_completed.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
queued_jobs = self.mongo_util.get_jobs(job_ids=job_ids_queued)
for job in queued_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNone(job_rec.get('queued'))
self.assertEqual(job_rec.get('status'), 'created')
running_jobs = self.mongo_util.get_jobs(job_ids=job_ids_running)
for job in running_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNone(job_rec.get('running'))
self.assertEqual(job_rec.get('status'), 'created')
finish_jobs = self.mongo_util.get_jobs(job_ids=job_ids_completed)
for job in finish_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNone(job_rec.get('finished'))
self.assertEqual(job_rec.get('status'), 'created')
threads = list()
def update_states(index, job_ids_queued, job_ids_running, job_ids_finish):
'\n update jobs status in one thread\n '
runner.get_runjob().update_job_to_queued(job_ids_queued[index], 'scheduler_id')
runner.get_jobs_status().start_job(job_ids_running[index])
runner.get_jobs_status().start_job(job_ids_finish[index])
job_output = {'version': '11', 'result': {'result': 1}, 'id': 'EXAMPLE_KEY'}
runner.finish_job(job_id=job_ids_finish[index], job_output=job_output)
for index in range(thread_count):
x = threading.Thread(target=update_states(index, job_ids_queued, job_ids_running, job_ids_completed))
threads.append(x)
x.start()
for (index, thread) in enumerate(threads):
thread.join()
queued_jobs = self.mongo_util.get_jobs(job_ids=job_ids_queued)
for job in queued_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNotNone(job_rec.get('queued'))
self.assertEqual(job_rec.get('status'), 'queued')
running_jobs = self.mongo_util.get_jobs(job_ids=job_ids_running)
for job in running_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNotNone(job_rec.get('running'))
self.assertEqual(job_rec.get('status'), 'running')
finish_jobs = self.mongo_util.get_jobs(job_ids=job_ids_completed)
for job in finish_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNotNone(job_rec.get('finished'))
self.assertEqual(job_rec.get('status'), 'completed')
jobs = self.mongo_util.get_jobs(job_ids=((job_ids_queued + job_ids_running) + job_ids_completed))
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count()) | testing update jobs into different status in multiple threads | test/tests_for_sdkmr/ee2_load_test.py | test_update_job_status_stress | bio-boris/execution_engine2 | 1 | python | def test_update_job_status_stress(self):
'\n \n '
ori_job_count = Job.objects.count()
runner = self.method_runner
job_params = get_sample_job_params()
job_params['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
thread_count = self.thread_count
job_ids_queued = list()
job_ids_running = list()
job_ids_completed = list()
for index in range(thread_count):
job_ids_queued.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
job_ids_running.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
job_ids_completed.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
queued_jobs = self.mongo_util.get_jobs(job_ids=job_ids_queued)
for job in queued_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNone(job_rec.get('queued'))
self.assertEqual(job_rec.get('status'), 'created')
running_jobs = self.mongo_util.get_jobs(job_ids=job_ids_running)
for job in running_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNone(job_rec.get('running'))
self.assertEqual(job_rec.get('status'), 'created')
finish_jobs = self.mongo_util.get_jobs(job_ids=job_ids_completed)
for job in finish_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNone(job_rec.get('finished'))
self.assertEqual(job_rec.get('status'), 'created')
threads = list()
def update_states(index, job_ids_queued, job_ids_running, job_ids_finish):
'\n update jobs status in one thread\n '
runner.get_runjob().update_job_to_queued(job_ids_queued[index], 'scheduler_id')
runner.get_jobs_status().start_job(job_ids_running[index])
runner.get_jobs_status().start_job(job_ids_finish[index])
job_output = {'version': '11', 'result': {'result': 1}, 'id': 'EXAMPLE_KEY'}
runner.finish_job(job_id=job_ids_finish[index], job_output=job_output)
for index in range(thread_count):
x = threading.Thread(target=update_states(index, job_ids_queued, job_ids_running, job_ids_completed))
threads.append(x)
x.start()
for (index, thread) in enumerate(threads):
thread.join()
queued_jobs = self.mongo_util.get_jobs(job_ids=job_ids_queued)
for job in queued_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNotNone(job_rec.get('queued'))
self.assertEqual(job_rec.get('status'), 'queued')
running_jobs = self.mongo_util.get_jobs(job_ids=job_ids_running)
for job in running_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNotNone(job_rec.get('running'))
self.assertEqual(job_rec.get('status'), 'running')
finish_jobs = self.mongo_util.get_jobs(job_ids=job_ids_completed)
for job in finish_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNotNone(job_rec.get('finished'))
self.assertEqual(job_rec.get('status'), 'completed')
jobs = self.mongo_util.get_jobs(job_ids=((job_ids_queued + job_ids_running) + job_ids_completed))
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count()) | def test_update_job_status_stress(self):
'\n \n '
ori_job_count = Job.objects.count()
runner = self.method_runner
job_params = get_sample_job_params()
job_params['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
thread_count = self.thread_count
job_ids_queued = list()
job_ids_running = list()
job_ids_completed = list()
for index in range(thread_count):
job_ids_queued.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
job_ids_running.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
job_ids_completed.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
queued_jobs = self.mongo_util.get_jobs(job_ids=job_ids_queued)
for job in queued_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNone(job_rec.get('queued'))
self.assertEqual(job_rec.get('status'), 'created')
running_jobs = self.mongo_util.get_jobs(job_ids=job_ids_running)
for job in running_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNone(job_rec.get('running'))
self.assertEqual(job_rec.get('status'), 'created')
finish_jobs = self.mongo_util.get_jobs(job_ids=job_ids_completed)
for job in finish_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNone(job_rec.get('finished'))
self.assertEqual(job_rec.get('status'), 'created')
threads = list()
def update_states(index, job_ids_queued, job_ids_running, job_ids_finish):
'\n update jobs status in one thread\n '
runner.get_runjob().update_job_to_queued(job_ids_queued[index], 'scheduler_id')
runner.get_jobs_status().start_job(job_ids_running[index])
runner.get_jobs_status().start_job(job_ids_finish[index])
job_output = {'version': '11', 'result': {'result': 1}, 'id': 'EXAMPLE_KEY'}
runner.finish_job(job_id=job_ids_finish[index], job_output=job_output)
for index in range(thread_count):
x = threading.Thread(target=update_states(index, job_ids_queued, job_ids_running, job_ids_completed))
threads.append(x)
x.start()
for (index, thread) in enumerate(threads):
thread.join()
queued_jobs = self.mongo_util.get_jobs(job_ids=job_ids_queued)
for job in queued_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNotNone(job_rec.get('queued'))
self.assertEqual(job_rec.get('status'), 'queued')
running_jobs = self.mongo_util.get_jobs(job_ids=job_ids_running)
for job in running_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNotNone(job_rec.get('running'))
self.assertEqual(job_rec.get('status'), 'running')
finish_jobs = self.mongo_util.get_jobs(job_ids=job_ids_completed)
for job in finish_jobs:
job_rec = job.to_mongo().to_dict()
self.assertIsNotNone(job_rec.get('finished'))
self.assertEqual(job_rec.get('status'), 'completed')
jobs = self.mongo_util.get_jobs(job_ids=((job_ids_queued + job_ids_running) + job_ids_completed))
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count())<|docstring|>testing update jobs into different status in multiple threads<|endoftext|> |
2298e5d7138cd53107a16380057410aa42c5e7c66ac1a4a40c2df42bfcfd04f1 | @patch.object(Condor, 'run_job', return_value=si)
@patch.object(WorkspaceAuth, 'can_write', return_value=True)
@patch('installed_clients.CatalogClient.Catalog.list_client_group_configs', autospec=True)
@patch('installed_clients.CatalogClient.Catalog.get_module_version', autospec=True)
def test_retry_job_stress(self, cc_get_mod_ver, cc_list_cli_configs, workspace, condor):
'\n Not a stress test, more of an impl test\n '
cc_get_mod_ver.return_value = {'git_commit_hash': 'moduleversiongoeshere'}
cc_list_cli_configs.return_value = []
method_1 = 'app1.a_method'
job_params_1 = get_sample_job_params(method=method_1, app_id='app1/a')
del job_params_1['parent_job_id']
job_ids = []
for i in range(10):
job_ids.append(self.impl.run_job(ctx=self.ctx, params=job_params_1)[0])
for job_id in job_ids:
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_id, 'status': 'error'})
self.impl.retry_job(ctx=self.ctx, params={'job_id': job_id}) | Not a stress test, more of an impl test | test/tests_for_sdkmr/ee2_load_test.py | test_retry_job_stress | bio-boris/execution_engine2 | 1 | python | @patch.object(Condor, 'run_job', return_value=si)
@patch.object(WorkspaceAuth, 'can_write', return_value=True)
@patch('installed_clients.CatalogClient.Catalog.list_client_group_configs', autospec=True)
@patch('installed_clients.CatalogClient.Catalog.get_module_version', autospec=True)
def test_retry_job_stress(self, cc_get_mod_ver, cc_list_cli_configs, workspace, condor):
'\n \n '
cc_get_mod_ver.return_value = {'git_commit_hash': 'moduleversiongoeshere'}
cc_list_cli_configs.return_value = []
method_1 = 'app1.a_method'
job_params_1 = get_sample_job_params(method=method_1, app_id='app1/a')
del job_params_1['parent_job_id']
job_ids = []
for i in range(10):
job_ids.append(self.impl.run_job(ctx=self.ctx, params=job_params_1)[0])
for job_id in job_ids:
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_id, 'status': 'error'})
self.impl.retry_job(ctx=self.ctx, params={'job_id': job_id}) | @patch.object(Condor, 'run_job', return_value=si)
@patch.object(WorkspaceAuth, 'can_write', return_value=True)
@patch('installed_clients.CatalogClient.Catalog.list_client_group_configs', autospec=True)
@patch('installed_clients.CatalogClient.Catalog.get_module_version', autospec=True)
def test_retry_job_stress(self, cc_get_mod_ver, cc_list_cli_configs, workspace, condor):
'\n \n '
cc_get_mod_ver.return_value = {'git_commit_hash': 'moduleversiongoeshere'}
cc_list_cli_configs.return_value = []
method_1 = 'app1.a_method'
job_params_1 = get_sample_job_params(method=method_1, app_id='app1/a')
del job_params_1['parent_job_id']
job_ids = []
for i in range(10):
job_ids.append(self.impl.run_job(ctx=self.ctx, params=job_params_1)[0])
for job_id in job_ids:
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_id, 'status': 'error'})
self.impl.retry_job(ctx=self.ctx, params={'job_id': job_id})<|docstring|>Not a stress test, more of an impl test<|endoftext|> |
71ebbfe98523e3ccba43c5e77d9d8c479ca1f08d30f774c159ad3943a7fe921a | @patch.object(Condor, 'run_job', return_value=si)
@patch.object(WorkspaceAuth, 'can_write', return_value=True)
@patch('installed_clients.CatalogClient.Catalog.list_client_group_configs', autospec=True)
@patch('installed_clients.CatalogClient.Catalog.get_module_version', autospec=True)
@patch('installed_clients.CatalogClient.Catalog.log_exec_stats', autospec=True)
def test_run_job_stress(self, cc_log_stats, cc_get_mod_ver, cc_list_cli_configs, workspace, condor):
'\n testing running 3 different jobs in multiple theads.\n '
cc_get_mod_ver.return_value = {'git_commit_hash': 'moduleversiongoeshere'}
cc_list_cli_configs.return_value = []
thread_count = self.thread_count
ori_job_count = Job.objects.count()
method_1 = 'app1.a_method'
method_2 = 'app2.b_method'
method_3 = 'app3.c_method'
job_params_1 = get_sample_job_params(method=method_1, app_id='app1/a')
job_params_2 = get_sample_job_params(method=method_2, app_id='app2/b')
job_params_3 = get_sample_job_params(method=method_3, app_id='app3/c')
threads = list()
job_ids = list()
que = queue.Queue()
for index in range(thread_count):
x = threading.Thread(target=que.put(self.impl.run_job(ctx=self.ctx, params=job_params_1)))
threads.append(x)
x.start()
y = threading.Thread(target=que.put(self.impl.run_job(ctx=self.ctx, params=job_params_2)))
threads.append(y)
y.start()
z = threading.Thread(target=que.put(self.impl.run_job(ctx=self.ctx, params=job_params_3)))
threads.append(z)
z.start()
for (index, thread) in enumerate(threads):
thread.join()
while (not que.empty()):
job_ids.append(que.get()[0])
jobs = self.mongo_util.get_jobs(job_ids=job_ids)
methods = [job.job_input.method for job in jobs]
self.assertEqual(len(methods), (thread_count * 3))
self.assertEqual(methods.count(method_1), thread_count)
self.assertEqual(methods.count(method_2), thread_count)
self.assertEqual(methods.count(method_3), thread_count)
status = [job.status for job in jobs]
self.assertCountEqual(status, (([Status.queued.value] * thread_count) * 3))
self.assertEqual(len(set(job_ids)), (thread_count * 3))
self.assertEqual(len(job_ids), len(set(job_ids)))
self.assertEqual(ori_job_count, (Job.objects.count() - (thread_count * 3)))
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count()) | testing running 3 different jobs in multiple theads. | test/tests_for_sdkmr/ee2_load_test.py | test_run_job_stress | bio-boris/execution_engine2 | 1 | python | @patch.object(Condor, 'run_job', return_value=si)
@patch.object(WorkspaceAuth, 'can_write', return_value=True)
@patch('installed_clients.CatalogClient.Catalog.list_client_group_configs', autospec=True)
@patch('installed_clients.CatalogClient.Catalog.get_module_version', autospec=True)
@patch('installed_clients.CatalogClient.Catalog.log_exec_stats', autospec=True)
def test_run_job_stress(self, cc_log_stats, cc_get_mod_ver, cc_list_cli_configs, workspace, condor):
'\n \n '
cc_get_mod_ver.return_value = {'git_commit_hash': 'moduleversiongoeshere'}
cc_list_cli_configs.return_value = []
thread_count = self.thread_count
ori_job_count = Job.objects.count()
method_1 = 'app1.a_method'
method_2 = 'app2.b_method'
method_3 = 'app3.c_method'
job_params_1 = get_sample_job_params(method=method_1, app_id='app1/a')
job_params_2 = get_sample_job_params(method=method_2, app_id='app2/b')
job_params_3 = get_sample_job_params(method=method_3, app_id='app3/c')
threads = list()
job_ids = list()
que = queue.Queue()
for index in range(thread_count):
x = threading.Thread(target=que.put(self.impl.run_job(ctx=self.ctx, params=job_params_1)))
threads.append(x)
x.start()
y = threading.Thread(target=que.put(self.impl.run_job(ctx=self.ctx, params=job_params_2)))
threads.append(y)
y.start()
z = threading.Thread(target=que.put(self.impl.run_job(ctx=self.ctx, params=job_params_3)))
threads.append(z)
z.start()
for (index, thread) in enumerate(threads):
thread.join()
while (not que.empty()):
job_ids.append(que.get()[0])
jobs = self.mongo_util.get_jobs(job_ids=job_ids)
methods = [job.job_input.method for job in jobs]
self.assertEqual(len(methods), (thread_count * 3))
self.assertEqual(methods.count(method_1), thread_count)
self.assertEqual(methods.count(method_2), thread_count)
self.assertEqual(methods.count(method_3), thread_count)
status = [job.status for job in jobs]
self.assertCountEqual(status, (([Status.queued.value] * thread_count) * 3))
self.assertEqual(len(set(job_ids)), (thread_count * 3))
self.assertEqual(len(job_ids), len(set(job_ids)))
self.assertEqual(ori_job_count, (Job.objects.count() - (thread_count * 3)))
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count()) | @patch.object(Condor, 'run_job', return_value=si)
@patch.object(WorkspaceAuth, 'can_write', return_value=True)
@patch('installed_clients.CatalogClient.Catalog.list_client_group_configs', autospec=True)
@patch('installed_clients.CatalogClient.Catalog.get_module_version', autospec=True)
@patch('installed_clients.CatalogClient.Catalog.log_exec_stats', autospec=True)
def test_run_job_stress(self, cc_log_stats, cc_get_mod_ver, cc_list_cli_configs, workspace, condor):
'\n \n '
cc_get_mod_ver.return_value = {'git_commit_hash': 'moduleversiongoeshere'}
cc_list_cli_configs.return_value = []
thread_count = self.thread_count
ori_job_count = Job.objects.count()
method_1 = 'app1.a_method'
method_2 = 'app2.b_method'
method_3 = 'app3.c_method'
job_params_1 = get_sample_job_params(method=method_1, app_id='app1/a')
job_params_2 = get_sample_job_params(method=method_2, app_id='app2/b')
job_params_3 = get_sample_job_params(method=method_3, app_id='app3/c')
threads = list()
job_ids = list()
que = queue.Queue()
for index in range(thread_count):
x = threading.Thread(target=que.put(self.impl.run_job(ctx=self.ctx, params=job_params_1)))
threads.append(x)
x.start()
y = threading.Thread(target=que.put(self.impl.run_job(ctx=self.ctx, params=job_params_2)))
threads.append(y)
y.start()
z = threading.Thread(target=que.put(self.impl.run_job(ctx=self.ctx, params=job_params_3)))
threads.append(z)
z.start()
for (index, thread) in enumerate(threads):
thread.join()
while (not que.empty()):
job_ids.append(que.get()[0])
jobs = self.mongo_util.get_jobs(job_ids=job_ids)
methods = [job.job_input.method for job in jobs]
self.assertEqual(len(methods), (thread_count * 3))
self.assertEqual(methods.count(method_1), thread_count)
self.assertEqual(methods.count(method_2), thread_count)
self.assertEqual(methods.count(method_3), thread_count)
status = [job.status for job in jobs]
self.assertCountEqual(status, (([Status.queued.value] * thread_count) * 3))
self.assertEqual(len(set(job_ids)), (thread_count * 3))
self.assertEqual(len(job_ids), len(set(job_ids)))
self.assertEqual(ori_job_count, (Job.objects.count() - (thread_count * 3)))
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count())<|docstring|>testing running 3 different jobs in multiple theads.<|endoftext|> |
10b5f42ac0e30eb051a5c36defdabeb352ce17e965d043dad12251327dc97337 | def test_update_job_status(self):
'\n testing update jobs into different status in multiple threads\n '
ori_job_count = Job.objects.count()
runner = self.method_runner
job_params = get_sample_job_params()
job_params['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
thread_count = self.thread_count
job_ids_queued = list()
job_ids_running = list()
job_ids_completed = list()
for index in range(thread_count):
job_ids_queued.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
job_ids_running.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
job_ids_completed.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
init_jobs = self.mongo_util.get_jobs(job_ids=((job_ids_queued + job_ids_running) + job_ids_completed))
for job in init_jobs:
self.assertEqual(job.to_mongo().to_dict().get('status'), 'created')
threads = list()
def update_states(index, job_ids_queued, job_ids_running, job_ids_completed):
'\n update jobs status in one thread\n '
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_queued[index], 'status': 'queued'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_running[index], 'status': 'running'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_completed[index], 'status': 'completed'})
for index in range(thread_count):
x = threading.Thread(target=update_states(index, job_ids_queued, job_ids_running, job_ids_completed))
threads.append(x)
x.start()
for (index, thread) in enumerate(threads):
thread.join()
queued_jobs = self.mongo_util.get_jobs(job_ids=job_ids_queued)
for job in queued_jobs:
self.assertEqual(job.to_mongo().to_dict().get('status'), 'queued')
running_jobs = self.mongo_util.get_jobs(job_ids=job_ids_running)
for job in running_jobs:
self.assertEqual(job.to_mongo().to_dict().get('status'), 'running')
finish_jobs = self.mongo_util.get_jobs(job_ids=job_ids_completed)
for job in finish_jobs:
self.assertEqual(job.to_mongo().to_dict().get('status'), 'completed')
jobs = self.mongo_util.get_jobs(job_ids=((job_ids_queued + job_ids_running) + job_ids_completed))
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count()) | testing update jobs into different status in multiple threads | test/tests_for_sdkmr/ee2_load_test.py | test_update_job_status | bio-boris/execution_engine2 | 1 | python | def test_update_job_status(self):
'\n \n '
ori_job_count = Job.objects.count()
runner = self.method_runner
job_params = get_sample_job_params()
job_params['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
thread_count = self.thread_count
job_ids_queued = list()
job_ids_running = list()
job_ids_completed = list()
for index in range(thread_count):
job_ids_queued.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
job_ids_running.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
job_ids_completed.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
init_jobs = self.mongo_util.get_jobs(job_ids=((job_ids_queued + job_ids_running) + job_ids_completed))
for job in init_jobs:
self.assertEqual(job.to_mongo().to_dict().get('status'), 'created')
threads = list()
def update_states(index, job_ids_queued, job_ids_running, job_ids_completed):
'\n update jobs status in one thread\n '
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_queued[index], 'status': 'queued'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_running[index], 'status': 'running'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_completed[index], 'status': 'completed'})
for index in range(thread_count):
x = threading.Thread(target=update_states(index, job_ids_queued, job_ids_running, job_ids_completed))
threads.append(x)
x.start()
for (index, thread) in enumerate(threads):
thread.join()
queued_jobs = self.mongo_util.get_jobs(job_ids=job_ids_queued)
for job in queued_jobs:
self.assertEqual(job.to_mongo().to_dict().get('status'), 'queued')
running_jobs = self.mongo_util.get_jobs(job_ids=job_ids_running)
for job in running_jobs:
self.assertEqual(job.to_mongo().to_dict().get('status'), 'running')
finish_jobs = self.mongo_util.get_jobs(job_ids=job_ids_completed)
for job in finish_jobs:
self.assertEqual(job.to_mongo().to_dict().get('status'), 'completed')
jobs = self.mongo_util.get_jobs(job_ids=((job_ids_queued + job_ids_running) + job_ids_completed))
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count()) | def test_update_job_status(self):
'\n \n '
ori_job_count = Job.objects.count()
runner = self.method_runner
job_params = get_sample_job_params()
job_params['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
thread_count = self.thread_count
job_ids_queued = list()
job_ids_running = list()
job_ids_completed = list()
for index in range(thread_count):
job_ids_queued.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
job_ids_running.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
job_ids_completed.append(runner.get_runjob()._init_job_rec(self.user_id, job_params))
init_jobs = self.mongo_util.get_jobs(job_ids=((job_ids_queued + job_ids_running) + job_ids_completed))
for job in init_jobs:
self.assertEqual(job.to_mongo().to_dict().get('status'), 'created')
threads = list()
def update_states(index, job_ids_queued, job_ids_running, job_ids_completed):
'\n update jobs status in one thread\n '
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_queued[index], 'status': 'queued'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_running[index], 'status': 'running'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_completed[index], 'status': 'completed'})
for index in range(thread_count):
x = threading.Thread(target=update_states(index, job_ids_queued, job_ids_running, job_ids_completed))
threads.append(x)
x.start()
for (index, thread) in enumerate(threads):
thread.join()
queued_jobs = self.mongo_util.get_jobs(job_ids=job_ids_queued)
for job in queued_jobs:
self.assertEqual(job.to_mongo().to_dict().get('status'), 'queued')
running_jobs = self.mongo_util.get_jobs(job_ids=job_ids_running)
for job in running_jobs:
self.assertEqual(job.to_mongo().to_dict().get('status'), 'running')
finish_jobs = self.mongo_util.get_jobs(job_ids=job_ids_completed)
for job in finish_jobs:
self.assertEqual(job.to_mongo().to_dict().get('status'), 'completed')
jobs = self.mongo_util.get_jobs(job_ids=((job_ids_queued + job_ids_running) + job_ids_completed))
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count())<|docstring|>testing update jobs into different status in multiple threads<|endoftext|> |
253dbb8a953b69b2c0562cf7b605287549ccef7f270a91ea1e8de0031f3c8def | @patch('installed_clients.CatalogClient.Catalog.get_module_version', autospec=True)
def test_check_jobs_stress(self, cc_get_mod_ver):
'\n testing check jobs in multiple theads.\n '
cc_get_mod_ver.return_value = {'git_commit_hash': '123'}
thread_count = self.thread_count
ori_job_count = Job.objects.count()
runner = self.method_runner
method_1 = 'a_method'
method_2 = 'b_method'
job_params_1 = get_sample_job_params(method=method_1)
job_params_1['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_params_2 = get_sample_job_params(method=method_2)
job_params_2['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_id_1 = runner.get_runjob()._init_job_rec(self.user_id, job_params_1)
job_id_2 = runner.get_runjob()._init_job_rec(self.user_id, job_params_2)
threads = list()
job_status = list()
que = queue.Queue()
for index in range(thread_count):
x = threading.Thread(target=que.put(self.impl.check_jobs(ctx=self.ctx, params={'job_ids': [job_id_1, job_id_2]})))
threads.append(x)
x.start()
for (index, thread) in enumerate(threads):
thread.join()
while (not que.empty()):
job_status.append(que.get())
for job_status in job_status:
job_status = job_status[0]['job_states']
job_ids = [js['job_id'] for js in job_status]
job_methods = [js['job_input']['method'] for js in job_status]
self.assertCountEqual(job_ids, [job_id_1, job_id_2])
self.assertCountEqual(job_methods, [method_1, method_2])
jobs = self.mongo_util.get_jobs(job_ids=[job_id_1, job_id_2])
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count()) | testing check jobs in multiple theads. | test/tests_for_sdkmr/ee2_load_test.py | test_check_jobs_stress | bio-boris/execution_engine2 | 1 | python | @patch('installed_clients.CatalogClient.Catalog.get_module_version', autospec=True)
def test_check_jobs_stress(self, cc_get_mod_ver):
'\n \n '
cc_get_mod_ver.return_value = {'git_commit_hash': '123'}
thread_count = self.thread_count
ori_job_count = Job.objects.count()
runner = self.method_runner
method_1 = 'a_method'
method_2 = 'b_method'
job_params_1 = get_sample_job_params(method=method_1)
job_params_1['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_params_2 = get_sample_job_params(method=method_2)
job_params_2['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_id_1 = runner.get_runjob()._init_job_rec(self.user_id, job_params_1)
job_id_2 = runner.get_runjob()._init_job_rec(self.user_id, job_params_2)
threads = list()
job_status = list()
que = queue.Queue()
for index in range(thread_count):
x = threading.Thread(target=que.put(self.impl.check_jobs(ctx=self.ctx, params={'job_ids': [job_id_1, job_id_2]})))
threads.append(x)
x.start()
for (index, thread) in enumerate(threads):
thread.join()
while (not que.empty()):
job_status.append(que.get())
for job_status in job_status:
job_status = job_status[0]['job_states']
job_ids = [js['job_id'] for js in job_status]
job_methods = [js['job_input']['method'] for js in job_status]
self.assertCountEqual(job_ids, [job_id_1, job_id_2])
self.assertCountEqual(job_methods, [method_1, method_2])
jobs = self.mongo_util.get_jobs(job_ids=[job_id_1, job_id_2])
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count()) | @patch('installed_clients.CatalogClient.Catalog.get_module_version', autospec=True)
def test_check_jobs_stress(self, cc_get_mod_ver):
'\n \n '
cc_get_mod_ver.return_value = {'git_commit_hash': '123'}
thread_count = self.thread_count
ori_job_count = Job.objects.count()
runner = self.method_runner
method_1 = 'a_method'
method_2 = 'b_method'
job_params_1 = get_sample_job_params(method=method_1)
job_params_1['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_params_2 = get_sample_job_params(method=method_2)
job_params_2['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_id_1 = runner.get_runjob()._init_job_rec(self.user_id, job_params_1)
job_id_2 = runner.get_runjob()._init_job_rec(self.user_id, job_params_2)
threads = list()
job_status = list()
que = queue.Queue()
for index in range(thread_count):
x = threading.Thread(target=que.put(self.impl.check_jobs(ctx=self.ctx, params={'job_ids': [job_id_1, job_id_2]})))
threads.append(x)
x.start()
for (index, thread) in enumerate(threads):
thread.join()
while (not que.empty()):
job_status.append(que.get())
for job_status in job_status:
job_status = job_status[0]['job_states']
job_ids = [js['job_id'] for js in job_status]
job_methods = [js['job_input']['method'] for js in job_status]
self.assertCountEqual(job_ids, [job_id_1, job_id_2])
self.assertCountEqual(job_methods, [method_1, method_2])
jobs = self.mongo_util.get_jobs(job_ids=[job_id_1, job_id_2])
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count())<|docstring|>testing check jobs in multiple theads.<|endoftext|> |
65060cee2b056d93d3384945f76cd49e20f5c1922b994f72a34fac92b9263e80 | def test_check_job_canceled_stress(self):
'\n testing check_job_canceled in multiple theads.\n '
thread_count = self.thread_count
ori_job_count = Job.objects.count()
runner = self.method_runner
job_params = get_sample_job_params()
job_params['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_id_running = runner.get_runjob()._init_job_rec(self.user_id, job_params)
job_id_terminated = runner.get_runjob()._init_job_rec(self.user_id, job_params)
job_id_completed = runner.get_runjob()._init_job_rec(self.user_id, job_params)
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_id_running, 'status': 'running'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_id_terminated, 'status': 'terminated'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_id_completed, 'status': 'completed'})
threads = list()
job_canceled_status = list()
que = queue.Queue()
for index in range(thread_count):
x = threading.Thread(target=que.put(self.impl.check_job_canceled(ctx=self.ctx, params={'job_id': job_id_running})))
threads.append(x)
x.start()
y = threading.Thread(target=que.put(self.impl.check_job_canceled(ctx=self.ctx, params={'job_id': job_id_terminated})))
threads.append(y)
y.start()
z = threading.Thread(target=que.put(self.impl.check_job_canceled(ctx=self.ctx, params={'job_id': job_id_completed})))
threads.append(z)
z.start()
for (index, thread) in enumerate(threads):
thread.join()
while (not que.empty()):
job_canceled_status.append(que.get())
job_ids_returned = [jcs_return[0]['job_id'] for jcs_return in job_canceled_status]
self.assertEqual(len(job_ids_returned), (thread_count * 3))
self.assertEqual(job_ids_returned.count(job_id_running), thread_count)
self.assertEqual(job_ids_returned.count(job_id_terminated), thread_count)
self.assertEqual(job_ids_returned.count(job_id_completed), thread_count)
for job_canceled_status_return in job_canceled_status:
job_canceled_status_return = job_canceled_status_return[0]
if (job_canceled_status_return['job_id'] == job_id_running):
self.assertFalse(job_canceled_status_return['canceled'])
self.assertFalse(job_canceled_status_return['finished'])
if (job_canceled_status_return['job_id'] == job_id_terminated):
self.assertTrue(job_canceled_status_return['canceled'])
self.assertTrue(job_canceled_status_return['finished'])
if (job_canceled_status_return['job_id'] == job_id_completed):
self.assertFalse(job_canceled_status_return['canceled'])
self.assertTrue(job_canceled_status_return['finished'])
jobs = self.mongo_util.get_jobs(job_ids=[job_id_running, job_id_terminated, job_id_completed])
for job in jobs:
job.delete()
self.assertEqual(ori_job_count, Job.objects.count()) | testing check_job_canceled in multiple theads. | test/tests_for_sdkmr/ee2_load_test.py | test_check_job_canceled_stress | bio-boris/execution_engine2 | 1 | python | def test_check_job_canceled_stress(self):
'\n \n '
thread_count = self.thread_count
ori_job_count = Job.objects.count()
runner = self.method_runner
job_params = get_sample_job_params()
job_params['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_id_running = runner.get_runjob()._init_job_rec(self.user_id, job_params)
job_id_terminated = runner.get_runjob()._init_job_rec(self.user_id, job_params)
job_id_completed = runner.get_runjob()._init_job_rec(self.user_id, job_params)
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_id_running, 'status': 'running'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_id_terminated, 'status': 'terminated'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_id_completed, 'status': 'completed'})
threads = list()
job_canceled_status = list()
que = queue.Queue()
for index in range(thread_count):
x = threading.Thread(target=que.put(self.impl.check_job_canceled(ctx=self.ctx, params={'job_id': job_id_running})))
threads.append(x)
x.start()
y = threading.Thread(target=que.put(self.impl.check_job_canceled(ctx=self.ctx, params={'job_id': job_id_terminated})))
threads.append(y)
y.start()
z = threading.Thread(target=que.put(self.impl.check_job_canceled(ctx=self.ctx, params={'job_id': job_id_completed})))
threads.append(z)
z.start()
for (index, thread) in enumerate(threads):
thread.join()
while (not que.empty()):
job_canceled_status.append(que.get())
job_ids_returned = [jcs_return[0]['job_id'] for jcs_return in job_canceled_status]
self.assertEqual(len(job_ids_returned), (thread_count * 3))
self.assertEqual(job_ids_returned.count(job_id_running), thread_count)
self.assertEqual(job_ids_returned.count(job_id_terminated), thread_count)
self.assertEqual(job_ids_returned.count(job_id_completed), thread_count)
for job_canceled_status_return in job_canceled_status:
job_canceled_status_return = job_canceled_status_return[0]
if (job_canceled_status_return['job_id'] == job_id_running):
self.assertFalse(job_canceled_status_return['canceled'])
self.assertFalse(job_canceled_status_return['finished'])
if (job_canceled_status_return['job_id'] == job_id_terminated):
self.assertTrue(job_canceled_status_return['canceled'])
self.assertTrue(job_canceled_status_return['finished'])
if (job_canceled_status_return['job_id'] == job_id_completed):
self.assertFalse(job_canceled_status_return['canceled'])
self.assertTrue(job_canceled_status_return['finished'])
jobs = self.mongo_util.get_jobs(job_ids=[job_id_running, job_id_terminated, job_id_completed])
for job in jobs:
job.delete()
self.assertEqual(ori_job_count, Job.objects.count()) | def test_check_job_canceled_stress(self):
'\n \n '
thread_count = self.thread_count
ori_job_count = Job.objects.count()
runner = self.method_runner
job_params = get_sample_job_params()
job_params['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_id_running = runner.get_runjob()._init_job_rec(self.user_id, job_params)
job_id_terminated = runner.get_runjob()._init_job_rec(self.user_id, job_params)
job_id_completed = runner.get_runjob()._init_job_rec(self.user_id, job_params)
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_id_running, 'status': 'running'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_id_terminated, 'status': 'terminated'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_id_completed, 'status': 'completed'})
threads = list()
job_canceled_status = list()
que = queue.Queue()
for index in range(thread_count):
x = threading.Thread(target=que.put(self.impl.check_job_canceled(ctx=self.ctx, params={'job_id': job_id_running})))
threads.append(x)
x.start()
y = threading.Thread(target=que.put(self.impl.check_job_canceled(ctx=self.ctx, params={'job_id': job_id_terminated})))
threads.append(y)
y.start()
z = threading.Thread(target=que.put(self.impl.check_job_canceled(ctx=self.ctx, params={'job_id': job_id_completed})))
threads.append(z)
z.start()
for (index, thread) in enumerate(threads):
thread.join()
while (not que.empty()):
job_canceled_status.append(que.get())
job_ids_returned = [jcs_return[0]['job_id'] for jcs_return in job_canceled_status]
self.assertEqual(len(job_ids_returned), (thread_count * 3))
self.assertEqual(job_ids_returned.count(job_id_running), thread_count)
self.assertEqual(job_ids_returned.count(job_id_terminated), thread_count)
self.assertEqual(job_ids_returned.count(job_id_completed), thread_count)
for job_canceled_status_return in job_canceled_status:
job_canceled_status_return = job_canceled_status_return[0]
if (job_canceled_status_return['job_id'] == job_id_running):
self.assertFalse(job_canceled_status_return['canceled'])
self.assertFalse(job_canceled_status_return['finished'])
if (job_canceled_status_return['job_id'] == job_id_terminated):
self.assertTrue(job_canceled_status_return['canceled'])
self.assertTrue(job_canceled_status_return['finished'])
if (job_canceled_status_return['job_id'] == job_id_completed):
self.assertFalse(job_canceled_status_return['canceled'])
self.assertTrue(job_canceled_status_return['finished'])
jobs = self.mongo_util.get_jobs(job_ids=[job_id_running, job_id_terminated, job_id_completed])
for job in jobs:
job.delete()
self.assertEqual(ori_job_count, Job.objects.count())<|docstring|>testing check_job_canceled in multiple theads.<|endoftext|> |
229a999d502593ad5e9c9a4b65e59140d5451f12e1f5737195c3bcb39c52ab39 | def test_get_job_logs_stress(self):
'\n testing get_job_logs in multiple theads.\n '
thread_count = self.thread_count
ori_job_count = Job.objects.count()
runner = self.method_runner
params = get_sample_job_params()
params['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_id = runner.get_runjob()._init_job_rec(self.user_id, params)
ts = time.time()
job_line = [{'line': 'hello ee2', 'is_error': 1, 'ts': ts}]
self.impl.add_job_logs(ctx=self.ctx, params={'job_id': job_id}, lines=job_line)
threads = list()
job_lines = list()
que = queue.Queue()
for index in range(thread_count):
x = threading.Thread(target=que.put(self.impl.get_job_logs(ctx=self.ctx, params={'job_id': job_id})))
threads.append(x)
x.start()
for (index, thread) in enumerate(threads):
thread.join()
while (not que.empty()):
job_lines.append(que.get())
self.assertEqual(len(job_lines), thread_count)
for job_line in job_lines:
job_line = job_line[0]['lines'][0]
self.assertEqual(job_line['line'], 'hello ee2')
self.assertEqual(job_line['linepos'], 0)
self.assertEqual(job_line['is_error'], 1)
self.assertEqual(job_line['ts'], int((ts * 1000)))
jobs = self.mongo_util.get_jobs(job_ids=[job_id])
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count()) | testing get_job_logs in multiple theads. | test/tests_for_sdkmr/ee2_load_test.py | test_get_job_logs_stress | bio-boris/execution_engine2 | 1 | python | def test_get_job_logs_stress(self):
'\n \n '
thread_count = self.thread_count
ori_job_count = Job.objects.count()
runner = self.method_runner
params = get_sample_job_params()
params['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_id = runner.get_runjob()._init_job_rec(self.user_id, params)
ts = time.time()
job_line = [{'line': 'hello ee2', 'is_error': 1, 'ts': ts}]
self.impl.add_job_logs(ctx=self.ctx, params={'job_id': job_id}, lines=job_line)
threads = list()
job_lines = list()
que = queue.Queue()
for index in range(thread_count):
x = threading.Thread(target=que.put(self.impl.get_job_logs(ctx=self.ctx, params={'job_id': job_id})))
threads.append(x)
x.start()
for (index, thread) in enumerate(threads):
thread.join()
while (not que.empty()):
job_lines.append(que.get())
self.assertEqual(len(job_lines), thread_count)
for job_line in job_lines:
job_line = job_line[0]['lines'][0]
self.assertEqual(job_line['line'], 'hello ee2')
self.assertEqual(job_line['linepos'], 0)
self.assertEqual(job_line['is_error'], 1)
self.assertEqual(job_line['ts'], int((ts * 1000)))
jobs = self.mongo_util.get_jobs(job_ids=[job_id])
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count()) | def test_get_job_logs_stress(self):
'\n \n '
thread_count = self.thread_count
ori_job_count = Job.objects.count()
runner = self.method_runner
params = get_sample_job_params()
params['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_id = runner.get_runjob()._init_job_rec(self.user_id, params)
ts = time.time()
job_line = [{'line': 'hello ee2', 'is_error': 1, 'ts': ts}]
self.impl.add_job_logs(ctx=self.ctx, params={'job_id': job_id}, lines=job_line)
threads = list()
job_lines = list()
que = queue.Queue()
for index in range(thread_count):
x = threading.Thread(target=que.put(self.impl.get_job_logs(ctx=self.ctx, params={'job_id': job_id})))
threads.append(x)
x.start()
for (index, thread) in enumerate(threads):
thread.join()
while (not que.empty()):
job_lines.append(que.get())
self.assertEqual(len(job_lines), thread_count)
for job_line in job_lines:
job_line = job_line[0]['lines'][0]
self.assertEqual(job_line['line'], 'hello ee2')
self.assertEqual(job_line['linepos'], 0)
self.assertEqual(job_line['is_error'], 1)
self.assertEqual(job_line['ts'], int((ts * 1000)))
jobs = self.mongo_util.get_jobs(job_ids=[job_id])
jobs.delete()
self.assertEqual(ori_job_count, Job.objects.count())<|docstring|>testing get_job_logs in multiple theads.<|endoftext|> |
e473f9bf616bb7ffe3ca4cd2260957ca3209e36713092a5fe96afc572d691e0f | def test_add_job_logs_stress(self):
'\n testing add_job_logs in multiple theads.\n '
thread_count = self.thread_count
ori_job_count = Job.objects.count()
print('original job count is', ori_job_count)
runner = self.method_runner
params = get_sample_job_params()
params['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_id = runner.get_runjob()._init_job_rec(self.user_id, params)
ts = time.time()
job_line = [{'line': 'hello ee2', 'is_error': 1, 'ts': ts}]
threads = list()
que = queue.Queue()
print('Number of threads are', thread_count)
for index in range(thread_count):
x = threading.Thread(target=que.put(self.impl.add_job_logs(ctx=self.ctx, params={'job_id': job_id}, lines=job_line)))
threads.append(x)
x.start()
for (index, thread) in enumerate(threads):
thread.join()
job_lines = self.impl.get_job_logs(ctx=self.ctx, params={'job_id': job_id})[0]
self.assertEqual(job_lines['last_line_number'], (thread_count - 1))
lines = job_lines['lines']
self.assertEqual(len(lines), thread_count)
line_pos = list()
for line in lines:
self.assertEqual(line['line'], 'hello ee2')
self.assertEqual(line['is_error'], 1)
self.assertEqual(line['ts'], int((ts * 1000)))
line_pos.append(line['linepos'])
self.assertCountEqual(line_pos, list(range(0, thread_count)))
jobs = self.mongo_util.get_jobs(job_ids=[job_id])
for job in jobs:
job.delete()
self.assertEqual(ori_job_count, Job.objects.count()) | testing add_job_logs in multiple theads. | test/tests_for_sdkmr/ee2_load_test.py | test_add_job_logs_stress | bio-boris/execution_engine2 | 1 | python | def test_add_job_logs_stress(self):
'\n \n '
thread_count = self.thread_count
ori_job_count = Job.objects.count()
print('original job count is', ori_job_count)
runner = self.method_runner
params = get_sample_job_params()
params['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_id = runner.get_runjob()._init_job_rec(self.user_id, params)
ts = time.time()
job_line = [{'line': 'hello ee2', 'is_error': 1, 'ts': ts}]
threads = list()
que = queue.Queue()
print('Number of threads are', thread_count)
for index in range(thread_count):
x = threading.Thread(target=que.put(self.impl.add_job_logs(ctx=self.ctx, params={'job_id': job_id}, lines=job_line)))
threads.append(x)
x.start()
for (index, thread) in enumerate(threads):
thread.join()
job_lines = self.impl.get_job_logs(ctx=self.ctx, params={'job_id': job_id})[0]
self.assertEqual(job_lines['last_line_number'], (thread_count - 1))
lines = job_lines['lines']
self.assertEqual(len(lines), thread_count)
line_pos = list()
for line in lines:
self.assertEqual(line['line'], 'hello ee2')
self.assertEqual(line['is_error'], 1)
self.assertEqual(line['ts'], int((ts * 1000)))
line_pos.append(line['linepos'])
self.assertCountEqual(line_pos, list(range(0, thread_count)))
jobs = self.mongo_util.get_jobs(job_ids=[job_id])
for job in jobs:
job.delete()
self.assertEqual(ori_job_count, Job.objects.count()) | def test_add_job_logs_stress(self):
'\n \n '
thread_count = self.thread_count
ori_job_count = Job.objects.count()
print('original job count is', ori_job_count)
runner = self.method_runner
params = get_sample_job_params()
params['job_reqs'] = JobRequirements(1, 1, 1, 'njs')
job_id = runner.get_runjob()._init_job_rec(self.user_id, params)
ts = time.time()
job_line = [{'line': 'hello ee2', 'is_error': 1, 'ts': ts}]
threads = list()
que = queue.Queue()
print('Number of threads are', thread_count)
for index in range(thread_count):
x = threading.Thread(target=que.put(self.impl.add_job_logs(ctx=self.ctx, params={'job_id': job_id}, lines=job_line)))
threads.append(x)
x.start()
for (index, thread) in enumerate(threads):
thread.join()
job_lines = self.impl.get_job_logs(ctx=self.ctx, params={'job_id': job_id})[0]
self.assertEqual(job_lines['last_line_number'], (thread_count - 1))
lines = job_lines['lines']
self.assertEqual(len(lines), thread_count)
line_pos = list()
for line in lines:
self.assertEqual(line['line'], 'hello ee2')
self.assertEqual(line['is_error'], 1)
self.assertEqual(line['ts'], int((ts * 1000)))
line_pos.append(line['linepos'])
self.assertCountEqual(line_pos, list(range(0, thread_count)))
jobs = self.mongo_util.get_jobs(job_ids=[job_id])
for job in jobs:
job.delete()
self.assertEqual(ori_job_count, Job.objects.count())<|docstring|>testing add_job_logs in multiple theads.<|endoftext|> |
5a60f74715737fef86d9a1d1ed89315a805c17afdb5c350cf299206da363867a | def update_states(index, job_ids_queued, job_ids_running, job_ids_finish):
'\n update jobs status in one thread\n '
runner.get_runjob().update_job_to_queued(job_ids_queued[index], 'scheduler_id')
runner.get_jobs_status().start_job(job_ids_running[index])
runner.get_jobs_status().start_job(job_ids_finish[index])
job_output = {'version': '11', 'result': {'result': 1}, 'id': 'EXAMPLE_KEY'}
runner.finish_job(job_id=job_ids_finish[index], job_output=job_output) | update jobs status in one thread | test/tests_for_sdkmr/ee2_load_test.py | update_states | bio-boris/execution_engine2 | 1 | python | def update_states(index, job_ids_queued, job_ids_running, job_ids_finish):
'\n \n '
runner.get_runjob().update_job_to_queued(job_ids_queued[index], 'scheduler_id')
runner.get_jobs_status().start_job(job_ids_running[index])
runner.get_jobs_status().start_job(job_ids_finish[index])
job_output = {'version': '11', 'result': {'result': 1}, 'id': 'EXAMPLE_KEY'}
runner.finish_job(job_id=job_ids_finish[index], job_output=job_output) | def update_states(index, job_ids_queued, job_ids_running, job_ids_finish):
'\n \n '
runner.get_runjob().update_job_to_queued(job_ids_queued[index], 'scheduler_id')
runner.get_jobs_status().start_job(job_ids_running[index])
runner.get_jobs_status().start_job(job_ids_finish[index])
job_output = {'version': '11', 'result': {'result': 1}, 'id': 'EXAMPLE_KEY'}
runner.finish_job(job_id=job_ids_finish[index], job_output=job_output)<|docstring|>update jobs status in one thread<|endoftext|> |
e5d296863db2821b429a18cd04e22d7a777f284f44ac26fd754308e0ff69d84b | def update_states(index, job_ids_queued, job_ids_running, job_ids_completed):
'\n update jobs status in one thread\n '
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_queued[index], 'status': 'queued'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_running[index], 'status': 'running'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_completed[index], 'status': 'completed'}) | update jobs status in one thread | test/tests_for_sdkmr/ee2_load_test.py | update_states | bio-boris/execution_engine2 | 1 | python | def update_states(index, job_ids_queued, job_ids_running, job_ids_completed):
'\n \n '
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_queued[index], 'status': 'queued'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_running[index], 'status': 'running'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_completed[index], 'status': 'completed'}) | def update_states(index, job_ids_queued, job_ids_running, job_ids_completed):
'\n \n '
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_queued[index], 'status': 'queued'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_running[index], 'status': 'running'})
self.impl.update_job_status(ctx=self.ctx, params={'job_id': job_ids_completed[index], 'status': 'completed'})<|docstring|>update jobs status in one thread<|endoftext|> |
f5a1b74e41274deb8fcf7bab5fe03ae9d5e0aa16a921b595714b48e23e2ab656 | def test_unitcell(vasp_files):
'\n HEAD OF MICROSCOPIC STATIC DIELECTRIC TENSOR (INDEPENDENT PARTICLE, excluding Hartree and local field effects)\n ------------------------------------------------------\n 1.269877 0.000000 -0.000000\n 0.000000 1.269877 0.000000\n 0.000000 0.000000 1.269877\n ------------------------------------------------------\n\n MACROSCOPIC STATIC DIELECTRIC TENSOR (including local field effects in DFT)\n ------------------------------------------------------\n 1.255879 0.000000 -0.000000\n -0.000000 1.255879 0.000000\n -0.000000 0.000000 1.255879\n ------------------------------------------------------\n '
path = (vasp_files / 'unitcell_Ne_solid')
unitcell = make_unitcell_from_vasp(vasprun_band=Vasprun((path / 'vasprun-band.xml')), outcar_band=Outcar((path / 'OUTCAR-band')), outcar_dielectric_clamped=Outcar((path / 'OUTCAR-dielectric')), outcar_dielectric_ionic=Outcar((path / 'OUTCAR-dielectric')))
assert (unitcell.system == 'Ne')
assert (unitcell.vbm == (- 10.3168))
assert (unitcell.cbm == 1.2042)
assert (unitcell.ele_dielectric_const[0][0] == 1.255879)
assert (unitcell.ion_dielectric_const[0][0] == 0.0) | HEAD OF MICROSCOPIC STATIC DIELECTRIC TENSOR (INDEPENDENT PARTICLE, excluding Hartree and local field effects)
------------------------------------------------------
1.269877 0.000000 -0.000000
0.000000 1.269877 0.000000
0.000000 0.000000 1.269877
------------------------------------------------------
MACROSCOPIC STATIC DIELECTRIC TENSOR (including local field effects in DFT)
------------------------------------------------------
1.255879 0.000000 -0.000000
-0.000000 1.255879 0.000000
-0.000000 0.000000 1.255879
------------------------------------------------------ | pydefect/tests/cli/vasp/test_make_unitcell.py | test_unitcell | kumagai-group/pydefect | 20 | python | def test_unitcell(vasp_files):
'\n HEAD OF MICROSCOPIC STATIC DIELECTRIC TENSOR (INDEPENDENT PARTICLE, excluding Hartree and local field effects)\n ------------------------------------------------------\n 1.269877 0.000000 -0.000000\n 0.000000 1.269877 0.000000\n 0.000000 0.000000 1.269877\n ------------------------------------------------------\n\n MACROSCOPIC STATIC DIELECTRIC TENSOR (including local field effects in DFT)\n ------------------------------------------------------\n 1.255879 0.000000 -0.000000\n -0.000000 1.255879 0.000000\n -0.000000 0.000000 1.255879\n ------------------------------------------------------\n '
path = (vasp_files / 'unitcell_Ne_solid')
unitcell = make_unitcell_from_vasp(vasprun_band=Vasprun((path / 'vasprun-band.xml')), outcar_band=Outcar((path / 'OUTCAR-band')), outcar_dielectric_clamped=Outcar((path / 'OUTCAR-dielectric')), outcar_dielectric_ionic=Outcar((path / 'OUTCAR-dielectric')))
assert (unitcell.system == 'Ne')
assert (unitcell.vbm == (- 10.3168))
assert (unitcell.cbm == 1.2042)
assert (unitcell.ele_dielectric_const[0][0] == 1.255879)
assert (unitcell.ion_dielectric_const[0][0] == 0.0) | def test_unitcell(vasp_files):
'\n HEAD OF MICROSCOPIC STATIC DIELECTRIC TENSOR (INDEPENDENT PARTICLE, excluding Hartree and local field effects)\n ------------------------------------------------------\n 1.269877 0.000000 -0.000000\n 0.000000 1.269877 0.000000\n 0.000000 0.000000 1.269877\n ------------------------------------------------------\n\n MACROSCOPIC STATIC DIELECTRIC TENSOR (including local field effects in DFT)\n ------------------------------------------------------\n 1.255879 0.000000 -0.000000\n -0.000000 1.255879 0.000000\n -0.000000 0.000000 1.255879\n ------------------------------------------------------\n '
path = (vasp_files / 'unitcell_Ne_solid')
unitcell = make_unitcell_from_vasp(vasprun_band=Vasprun((path / 'vasprun-band.xml')), outcar_band=Outcar((path / 'OUTCAR-band')), outcar_dielectric_clamped=Outcar((path / 'OUTCAR-dielectric')), outcar_dielectric_ionic=Outcar((path / 'OUTCAR-dielectric')))
assert (unitcell.system == 'Ne')
assert (unitcell.vbm == (- 10.3168))
assert (unitcell.cbm == 1.2042)
assert (unitcell.ele_dielectric_const[0][0] == 1.255879)
assert (unitcell.ion_dielectric_const[0][0] == 0.0)<|docstring|>HEAD OF MICROSCOPIC STATIC DIELECTRIC TENSOR (INDEPENDENT PARTICLE, excluding Hartree and local field effects)
------------------------------------------------------
1.269877 0.000000 -0.000000
0.000000 1.269877 0.000000
0.000000 0.000000 1.269877
------------------------------------------------------
MACROSCOPIC STATIC DIELECTRIC TENSOR (including local field effects in DFT)
------------------------------------------------------
1.255879 0.000000 -0.000000
-0.000000 1.255879 0.000000
-0.000000 0.000000 1.255879
------------------------------------------------------<|endoftext|> |
e3ecf7ccd01ff9de2c0ec8cb6223c6cbb4bab7ca3bcd288af0ee52919c55d437 | def _reduce_glare(input_img, tol=250, avg_mode=False):
'\n Clips images, so that the highest values are lower\n '
if (not avg_mode):
return np.clip(input_img, 0, tol)
avg = np.average(input_img[((input_img.shape[0] // 3):((2 * input_img.shape[0]) // 3), (input_img.shape[1] // 3):((2 * input_img.shape[1]) // 3), 0)])
avg1 = np.average(input_img[((input_img.shape[0] // 3):((2 * input_img.shape[0]) // 3), (input_img.shape[1] // 3):((2 * input_img.shape[1]) // 3), 1)])
avg2 = np.average(input_img[((input_img.shape[0] // 3):((2 * input_img.shape[0]) // 3), (input_img.shape[1] // 3):((2 * input_img.shape[1]) // 3), 2)])
input_img[(input_img[(:, :, 0)] > tol)] = (- avg)
input_img[(input_img[(:, :, 1)] > tol)] = (- avg1)
input_img[(input_img[(:, :, 2)] > tol)] = (- avg2)
return input_img | Clips images, so that the highest values are lower | src/matkirpack/plotload/PlotLoad.py | _reduce_glare | matkir/Master_programs | 0 | python | def _reduce_glare(input_img, tol=250, avg_mode=False):
'\n \n '
if (not avg_mode):
return np.clip(input_img, 0, tol)
avg = np.average(input_img[((input_img.shape[0] // 3):((2 * input_img.shape[0]) // 3), (input_img.shape[1] // 3):((2 * input_img.shape[1]) // 3), 0)])
avg1 = np.average(input_img[((input_img.shape[0] // 3):((2 * input_img.shape[0]) // 3), (input_img.shape[1] // 3):((2 * input_img.shape[1]) // 3), 1)])
avg2 = np.average(input_img[((input_img.shape[0] // 3):((2 * input_img.shape[0]) // 3), (input_img.shape[1] // 3):((2 * input_img.shape[1]) // 3), 2)])
input_img[(input_img[(:, :, 0)] > tol)] = (- avg)
input_img[(input_img[(:, :, 1)] > tol)] = (- avg1)
input_img[(input_img[(:, :, 2)] > tol)] = (- avg2)
return input_img | def _reduce_glare(input_img, tol=250, avg_mode=False):
'\n \n '
if (not avg_mode):
return np.clip(input_img, 0, tol)
avg = np.average(input_img[((input_img.shape[0] // 3):((2 * input_img.shape[0]) // 3), (input_img.shape[1] // 3):((2 * input_img.shape[1]) // 3), 0)])
avg1 = np.average(input_img[((input_img.shape[0] // 3):((2 * input_img.shape[0]) // 3), (input_img.shape[1] // 3):((2 * input_img.shape[1]) // 3), 1)])
avg2 = np.average(input_img[((input_img.shape[0] // 3):((2 * input_img.shape[0]) // 3), (input_img.shape[1] // 3):((2 * input_img.shape[1]) // 3), 2)])
input_img[(input_img[(:, :, 0)] > tol)] = (- avg)
input_img[(input_img[(:, :, 1)] > tol)] = (- avg1)
input_img[(input_img[(:, :, 2)] > tol)] = (- avg2)
return input_img<|docstring|>Clips images, so that the highest values are lower<|endoftext|> |
78470ef620dabeb11e28b5878ce15ad1c987f02ee04b73a49dad8113455f78d5 | def _crop_img(input_img, gray, tol=20, erosion=True, total=False):
'\n Removes the black bars around images\n '
if total:
kernel = np.ones((20, 20), np.uint8)
gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)
mask = (gray > 20)
mask = cv2.morphologyEx(mask.astype(np.float32), cv2.MORPH_OPEN, kernel)
ret_img = input_img[np.ix_(mask.any(1), mask.any(0))]
return _crop_center(ret_img, int((input_img.shape[0] * total)))
if erosion:
kernel = np.ones((5, 5), np.uint8)
gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)
mask = (gray > tol)
return input_img[np.ix_(mask.any(1), mask.any(0))] | Removes the black bars around images | src/matkirpack/plotload/PlotLoad.py | _crop_img | matkir/Master_programs | 0 | python | def _crop_img(input_img, gray, tol=20, erosion=True, total=False):
'\n \n '
if total:
kernel = np.ones((20, 20), np.uint8)
gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)
mask = (gray > 20)
mask = cv2.morphologyEx(mask.astype(np.float32), cv2.MORPH_OPEN, kernel)
ret_img = input_img[np.ix_(mask.any(1), mask.any(0))]
return _crop_center(ret_img, int((input_img.shape[0] * total)))
if erosion:
kernel = np.ones((5, 5), np.uint8)
gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)
mask = (gray > tol)
return input_img[np.ix_(mask.any(1), mask.any(0))] | def _crop_img(input_img, gray, tol=20, erosion=True, total=False):
'\n \n '
if total:
kernel = np.ones((20, 20), np.uint8)
gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)
mask = (gray > 20)
mask = cv2.morphologyEx(mask.astype(np.float32), cv2.MORPH_OPEN, kernel)
ret_img = input_img[np.ix_(mask.any(1), mask.any(0))]
return _crop_center(ret_img, int((input_img.shape[0] * total)))
if erosion:
kernel = np.ones((5, 5), np.uint8)
gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)
mask = (gray > tol)
return input_img[np.ix_(mask.any(1), mask.any(0))]<|docstring|>Removes the black bars around images<|endoftext|> |
f657b7c470c4568c196de7f1c028eb8abc6248e27cc5324df09bd3003f435468 | def _find_folder(data_type):
'\n Finds the folder used from the datatype\n :param data_type: path in kvasir\n :return: abs path to folder\n '
if (data_type == None):
folder = os.path.expanduser('~')
folder = (folder + '/Documents/kvasir-dataset-v2/none/')
elif (type(data_type) == str):
if (('.png' in data_type) or ('.jpg' in data_type)):
return data_type
if (data_type[0] == '/'):
return data_type
folder = os.path.expanduser('~')
folder = (((folder + '/Documents/kvasir-dataset-v2/') + data_type) + '/')
if (not os.path.isdir(folder)):
folder = os.path.expanduser('~')
folder = (folder + '/Documents/kvasir-dataset-v2/blanding/')
else:
folder = os.path.expanduser('~')
folder = (folder + '/Documents/kvasir-dataset-v2/blanding/')
return folder | Finds the folder used from the datatype
:param data_type: path in kvasir
:return: abs path to folder | src/matkirpack/plotload/PlotLoad.py | _find_folder | matkir/Master_programs | 0 | python | def _find_folder(data_type):
'\n Finds the folder used from the datatype\n :param data_type: path in kvasir\n :return: abs path to folder\n '
if (data_type == None):
folder = os.path.expanduser('~')
folder = (folder + '/Documents/kvasir-dataset-v2/none/')
elif (type(data_type) == str):
if (('.png' in data_type) or ('.jpg' in data_type)):
return data_type
if (data_type[0] == '/'):
return data_type
folder = os.path.expanduser('~')
folder = (((folder + '/Documents/kvasir-dataset-v2/') + data_type) + '/')
if (not os.path.isdir(folder)):
folder = os.path.expanduser('~')
folder = (folder + '/Documents/kvasir-dataset-v2/blanding/')
else:
folder = os.path.expanduser('~')
folder = (folder + '/Documents/kvasir-dataset-v2/blanding/')
return folder | def _find_folder(data_type):
'\n Finds the folder used from the datatype\n :param data_type: path in kvasir\n :return: abs path to folder\n '
if (data_type == None):
folder = os.path.expanduser('~')
folder = (folder + '/Documents/kvasir-dataset-v2/none/')
elif (type(data_type) == str):
if (('.png' in data_type) or ('.jpg' in data_type)):
return data_type
if (data_type[0] == '/'):
return data_type
folder = os.path.expanduser('~')
folder = (((folder + '/Documents/kvasir-dataset-v2/') + data_type) + '/')
if (not os.path.isdir(folder)):
folder = os.path.expanduser('~')
folder = (folder + '/Documents/kvasir-dataset-v2/blanding/')
else:
folder = os.path.expanduser('~')
folder = (folder + '/Documents/kvasir-dataset-v2/blanding/')
return folder<|docstring|>Finds the folder used from the datatype
:param data_type: path in kvasir
:return: abs path to folder<|endoftext|> |
1de4a9808e242bba4f7332bf2d83d7f3eea4f7b71589bf86f025f70df42e3111 | def load_polyp_data(img_shape, data_type=None, rot=False, crop=True, glare=False):
'\n Loads the polyp data\n '
if ('-l' in sys.argv):
return np.load('train_data.npy')
folder = _find_folder(data_type)
if rot:
data = np.ndarray(shape=((len(os.listdir(folder)) * 4), img_shape[0], img_shape[1], img_shape[2]), dtype=np.int32)
print(f'loading {(len(os.listdir(folder)) * 4)} images')
else:
data = np.ndarray(shape=(len(os.listdir(folder)), img_shape[0], img_shape[1], img_shape[2]), dtype=np.int32)
print(f'loading {len(os.listdir(folder))} images')
i = 0
for img in tqdm(sorted(os.listdir(folder))):
path = os.path.join(folder, img)
save = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
if crop:
gray = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2GRAY)
save = _crop_img(save, gray)
save = cv2.resize(save, (img_shape[1], img_shape[0]))
if rot:
for r in [0, 90, 180, 270]:
M = cv2.getRotationMatrix2D(((img_shape[1] / 2), (img_shape[0] / 2)), r, 1)
dst = cv2.warpAffine(save, M, (img_shape[1], img_shape[0]))
data[i] = dst
i += 1
else:
data[i] = save
i += 1
data = ((data.astype(np.float32) - 127.5) / 127.5)
return data | Loads the polyp data | src/matkirpack/plotload/PlotLoad.py | load_polyp_data | matkir/Master_programs | 0 | python | def load_polyp_data(img_shape, data_type=None, rot=False, crop=True, glare=False):
'\n \n '
if ('-l' in sys.argv):
return np.load('train_data.npy')
folder = _find_folder(data_type)
if rot:
data = np.ndarray(shape=((len(os.listdir(folder)) * 4), img_shape[0], img_shape[1], img_shape[2]), dtype=np.int32)
print(f'loading {(len(os.listdir(folder)) * 4)} images')
else:
data = np.ndarray(shape=(len(os.listdir(folder)), img_shape[0], img_shape[1], img_shape[2]), dtype=np.int32)
print(f'loading {len(os.listdir(folder))} images')
i = 0
for img in tqdm(sorted(os.listdir(folder))):
path = os.path.join(folder, img)
save = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
if crop:
gray = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2GRAY)
save = _crop_img(save, gray)
save = cv2.resize(save, (img_shape[1], img_shape[0]))
if rot:
for r in [0, 90, 180, 270]:
M = cv2.getRotationMatrix2D(((img_shape[1] / 2), (img_shape[0] / 2)), r, 1)
dst = cv2.warpAffine(save, M, (img_shape[1], img_shape[0]))
data[i] = dst
i += 1
else:
data[i] = save
i += 1
data = ((data.astype(np.float32) - 127.5) / 127.5)
return data | def load_polyp_data(img_shape, data_type=None, rot=False, crop=True, glare=False):
'\n \n '
if ('-l' in sys.argv):
return np.load('train_data.npy')
folder = _find_folder(data_type)
if rot:
data = np.ndarray(shape=((len(os.listdir(folder)) * 4), img_shape[0], img_shape[1], img_shape[2]), dtype=np.int32)
print(f'loading {(len(os.listdir(folder)) * 4)} images')
else:
data = np.ndarray(shape=(len(os.listdir(folder)), img_shape[0], img_shape[1], img_shape[2]), dtype=np.int32)
print(f'loading {len(os.listdir(folder))} images')
i = 0
for img in tqdm(sorted(os.listdir(folder))):
path = os.path.join(folder, img)
save = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
if crop:
gray = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2GRAY)
save = _crop_img(save, gray)
save = cv2.resize(save, (img_shape[1], img_shape[0]))
if rot:
for r in [0, 90, 180, 270]:
M = cv2.getRotationMatrix2D(((img_shape[1] / 2), (img_shape[0] / 2)), r, 1)
dst = cv2.warpAffine(save, M, (img_shape[1], img_shape[0]))
data[i] = dst
i += 1
else:
data[i] = save
i += 1
data = ((data.astype(np.float32) - 127.5) / 127.5)
return data<|docstring|>Loads the polyp data<|endoftext|> |
3e0c19af4f047bb9acf15c0f9b43803d64bc016ca0e07218890bc3f943c732bb | def load_polyp_batch(img_shape, batch_size, data_type=None, rot=False, crop=True, glare=False):
'\n Loads the polyp data, in a for of random images from a batch\n '
folder = _find_folder(data_type)
data = np.ndarray(shape=(batch_size, img_shape[0], img_shape[1], img_shape[2]), dtype=np.int32)
i = 0
imgs = np.random.choice(os.listdir(folder), batch_size, replace=True)
for img in imgs:
path = os.path.join(folder, img)
save = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
if crop:
gray = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2GRAY)
save = _crop_img(save, gray)
save = cv2.resize(save, (img_shape[1], img_shape[0]))
if rot:
r = np.random.choice([0, 90, 180, 270], p=[0.7, 0.1, 0.1, 0.1])
else:
r = 0
if (r != 0):
M = cv2.getRotationMatrix2D(((img_shape[1] / 2), (img_shape[0] / 2)), r, 1)
dst = cv2.warpAffine(save, M, (img_shape[1], img_shape[0]))
data[i] = dst
i += 1
else:
data[i] = save
i += 1
data = ((data.astype(np.float32) - 127.5) / 127.5)
np.save('train_data.npy', data)
return data | Loads the polyp data, in a for of random images from a batch | src/matkirpack/plotload/PlotLoad.py | load_polyp_batch | matkir/Master_programs | 0 | python | def load_polyp_batch(img_shape, batch_size, data_type=None, rot=False, crop=True, glare=False):
'\n \n '
folder = _find_folder(data_type)
data = np.ndarray(shape=(batch_size, img_shape[0], img_shape[1], img_shape[2]), dtype=np.int32)
i = 0
imgs = np.random.choice(os.listdir(folder), batch_size, replace=True)
for img in imgs:
path = os.path.join(folder, img)
save = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
if crop:
gray = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2GRAY)
save = _crop_img(save, gray)
save = cv2.resize(save, (img_shape[1], img_shape[0]))
if rot:
r = np.random.choice([0, 90, 180, 270], p=[0.7, 0.1, 0.1, 0.1])
else:
r = 0
if (r != 0):
M = cv2.getRotationMatrix2D(((img_shape[1] / 2), (img_shape[0] / 2)), r, 1)
dst = cv2.warpAffine(save, M, (img_shape[1], img_shape[0]))
data[i] = dst
i += 1
else:
data[i] = save
i += 1
data = ((data.astype(np.float32) - 127.5) / 127.5)
np.save('train_data.npy', data)
return data | def load_polyp_batch(img_shape, batch_size, data_type=None, rot=False, crop=True, glare=False):
'\n \n '
folder = _find_folder(data_type)
data = np.ndarray(shape=(batch_size, img_shape[0], img_shape[1], img_shape[2]), dtype=np.int32)
i = 0
imgs = np.random.choice(os.listdir(folder), batch_size, replace=True)
for img in imgs:
path = os.path.join(folder, img)
save = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
if crop:
gray = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2GRAY)
save = _crop_img(save, gray)
save = cv2.resize(save, (img_shape[1], img_shape[0]))
if rot:
r = np.random.choice([0, 90, 180, 270], p=[0.7, 0.1, 0.1, 0.1])
else:
r = 0
if (r != 0):
M = cv2.getRotationMatrix2D(((img_shape[1] / 2), (img_shape[0] / 2)), r, 1)
dst = cv2.warpAffine(save, M, (img_shape[1], img_shape[0]))
data[i] = dst
i += 1
else:
data[i] = save
i += 1
data = ((data.astype(np.float32) - 127.5) / 127.5)
np.save('train_data.npy', data)
return data<|docstring|>Loads the polyp data, in a for of random images from a batch<|endoftext|> |
cb8a574074fdb63d06745f7659f82a6fc27a0022ed0a143189842f2a14faa9d0 | def load_one_img(img_shape, dest=None, crop=False, glare=False, total=0, printable=False, extra_dim=False):
'\n Loads a spessific img, or random if non declared\n '
folder = _find_folder(dest)
if ((not ('.png' in folder)) and (not ('.jpg' in folder))):
img = (folder + np.random.choice(os.listdir(folder), 1)[0])
else:
img = folder
save = cv2.cvtColor(cv2.imread(img), cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(cv2.imread(img), cv2.COLOR_BGR2GRAY)
if crop:
save = _crop_img(save, gray, total=total)
if glare:
save = _reduce_glare(save)
save = cv2.resize(save, (img_shape[1], img_shape[0]))
data = ((save.astype(np.float32) - 127.5) / 127.5)
if printable:
data = ((data * 0.5) + 0.5)
return data
if extra_dim:
data = np.expand_dims(data, axis=0)
return data
return (data, img) | Loads a spessific img, or random if non declared | src/matkirpack/plotload/PlotLoad.py | load_one_img | matkir/Master_programs | 0 | python | def load_one_img(img_shape, dest=None, crop=False, glare=False, total=0, printable=False, extra_dim=False):
'\n \n '
folder = _find_folder(dest)
if ((not ('.png' in folder)) and (not ('.jpg' in folder))):
img = (folder + np.random.choice(os.listdir(folder), 1)[0])
else:
img = folder
save = cv2.cvtColor(cv2.imread(img), cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(cv2.imread(img), cv2.COLOR_BGR2GRAY)
if crop:
save = _crop_img(save, gray, total=total)
if glare:
save = _reduce_glare(save)
save = cv2.resize(save, (img_shape[1], img_shape[0]))
data = ((save.astype(np.float32) - 127.5) / 127.5)
if printable:
data = ((data * 0.5) + 0.5)
return data
if extra_dim:
data = np.expand_dims(data, axis=0)
return data
return (data, img) | def load_one_img(img_shape, dest=None, crop=False, glare=False, total=0, printable=False, extra_dim=False):
'\n \n '
folder = _find_folder(dest)
if ((not ('.png' in folder)) and (not ('.jpg' in folder))):
img = (folder + np.random.choice(os.listdir(folder), 1)[0])
else:
img = folder
save = cv2.cvtColor(cv2.imread(img), cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(cv2.imread(img), cv2.COLOR_BGR2GRAY)
if crop:
save = _crop_img(save, gray, total=total)
if glare:
save = _reduce_glare(save)
save = cv2.resize(save, (img_shape[1], img_shape[0]))
data = ((save.astype(np.float32) - 127.5) / 127.5)
if printable:
data = ((data * 0.5) + 0.5)
return data
if extra_dim:
data = np.expand_dims(data, axis=0)
return data
return (data, img)<|docstring|>Loads a spessific img, or random if non declared<|endoftext|> |
af7ddd278add0ee84eae013ffa92faf41e2fac603ac263b6bb797b35a7dd33bb | def read_query(self, file: str) -> (str, str):
'\n Reads a SQL file in.\n Note: Relies on SQL script being named the same as table or View it is creating.\n\n :param file: String of the file to read query from.\n :return: Tuple of strings of the table name and SQL query from the file.\n '
(file_name, file_extension) = os.path.splitext(p=file)
if (file_extension == '.sql'):
with open(file=os.path.join(self.script_dir, file), mode='r') as f:
query = f.read()
return (file_name, query)
else:
raise Exception(f'''Passed in a {file_extension} file.
Please pass in a .sql file instead.''') | Reads a SQL file in.
Note: Relies on SQL script being named the same as table or View it is creating.
:param file: String of the file to read query from.
:return: Tuple of strings of the table name and SQL query from the file. | extractor.py | read_query | avisionh/sqlquerygraph | 3 | python | def read_query(self, file: str) -> (str, str):
'\n Reads a SQL file in.\n Note: Relies on SQL script being named the same as table or View it is creating.\n\n :param file: String of the file to read query from.\n :return: Tuple of strings of the table name and SQL query from the file.\n '
(file_name, file_extension) = os.path.splitext(p=file)
if (file_extension == '.sql'):
with open(file=os.path.join(self.script_dir, file), mode='r') as f:
query = f.read()
return (file_name, query)
else:
raise Exception(f'Passed in a {file_extension} file.
Please pass in a .sql file instead.') | def read_query(self, file: str) -> (str, str):
'\n Reads a SQL file in.\n Note: Relies on SQL script being named the same as table or View it is creating.\n\n :param file: String of the file to read query from.\n :return: Tuple of strings of the table name and SQL query from the file.\n '
(file_name, file_extension) = os.path.splitext(p=file)
if (file_extension == '.sql'):
with open(file=os.path.join(self.script_dir, file), mode='r') as f:
query = f.read()
return (file_name, query)
else:
raise Exception(f'Passed in a {file_extension} file.
Please pass in a .sql file instead.')<|docstring|>Reads a SQL file in.
Note: Relies on SQL script being named the same as table or View it is creating.
:param file: String of the file to read query from.
:return: Tuple of strings of the table name and SQL query from the file.<|endoftext|> |
d9b902d4e1e84f7ed2a037dfb2bd2a301d75d3dc043ead0661153df9c68082e7 | @staticmethod
def clean_query(query: str, str_to_remove: Union[(str, list)]=None) -> str:
'\n Cleans a query so it can be parsed.\n\n :param query: String of the query to clean.\n :param str_to_remove: String or list of strings to remove from the query.\n :return: String of the cleaned query to parse.\n '
query = query.replace('\n', ' ')
query = re.sub(pattern='\\s+', repl=' ', string=query)
if (str_to_remove is not None):
for txt in str_to_remove:
query = query.replace(txt, '')
return query | Cleans a query so it can be parsed.
:param query: String of the query to clean.
:param str_to_remove: String or list of strings to remove from the query.
:return: String of the cleaned query to parse. | extractor.py | clean_query | avisionh/sqlquerygraph | 3 | python | @staticmethod
def clean_query(query: str, str_to_remove: Union[(str, list)]=None) -> str:
'\n Cleans a query so it can be parsed.\n\n :param query: String of the query to clean.\n :param str_to_remove: String or list of strings to remove from the query.\n :return: String of the cleaned query to parse.\n '
query = query.replace('\n', ' ')
query = re.sub(pattern='\\s+', repl=' ', string=query)
if (str_to_remove is not None):
for txt in str_to_remove:
query = query.replace(txt, )
return query | @staticmethod
def clean_query(query: str, str_to_remove: Union[(str, list)]=None) -> str:
'\n Cleans a query so it can be parsed.\n\n :param query: String of the query to clean.\n :param str_to_remove: String or list of strings to remove from the query.\n :return: String of the cleaned query to parse.\n '
query = query.replace('\n', ' ')
query = re.sub(pattern='\\s+', repl=' ', string=query)
if (str_to_remove is not None):
for txt in str_to_remove:
query = query.replace(txt, )
return query<|docstring|>Cleans a query so it can be parsed.
:param query: String of the query to clean.
:param str_to_remove: String or list of strings to remove from the query.
:return: String of the cleaned query to parse.<|endoftext|> |
d60406713f0d9239797d08be0504c53a0bdcc555a08af989f85125b457cc6850 | @staticmethod
def parse_query(query: str, print_tree: bool=False) -> dict:
'\n Parse a query into a JSON parse-tree.\n\n :param query: String of the SQL query to parse as a JSON parse-tree.\n :param print_tree: Boolean to print the JSON parse-tree.\n :return: Dictionary of the query as a JSON parse-tree.\n '
query_json = parse(sql=query)
if print_tree:
pprint(object=query_json)
return query_json | Parse a query into a JSON parse-tree.
:param query: String of the SQL query to parse as a JSON parse-tree.
:param print_tree: Boolean to print the JSON parse-tree.
:return: Dictionary of the query as a JSON parse-tree. | extractor.py | parse_query | avisionh/sqlquerygraph | 3 | python | @staticmethod
def parse_query(query: str, print_tree: bool=False) -> dict:
'\n Parse a query into a JSON parse-tree.\n\n :param query: String of the SQL query to parse as a JSON parse-tree.\n :param print_tree: Boolean to print the JSON parse-tree.\n :return: Dictionary of the query as a JSON parse-tree.\n '
query_json = parse(sql=query)
if print_tree:
pprint(object=query_json)
return query_json | @staticmethod
def parse_query(query: str, print_tree: bool=False) -> dict:
'\n Parse a query into a JSON parse-tree.\n\n :param query: String of the SQL query to parse as a JSON parse-tree.\n :param print_tree: Boolean to print the JSON parse-tree.\n :return: Dictionary of the query as a JSON parse-tree.\n '
query_json = parse(sql=query)
if print_tree:
pprint(object=query_json)
return query_json<|docstring|>Parse a query into a JSON parse-tree.
:param query: String of the SQL query to parse as a JSON parse-tree.
:param print_tree: Boolean to print the JSON parse-tree.
:return: Dictionary of the query as a JSON parse-tree.<|endoftext|> |
af3550c2e38cd6d2dd2b75c1a42cd63ed07ef656e140613b5d1d9189583d008b | @staticmethod
def extract_from_json(obj: dict, key: str) -> list:
"\n Recursively fetch values from a nested JSON.\n\n For our purposes, extract where key is 'from' allows extraction of *most* table names after a `FROM` clause.\n - It does not extract the table names when the name is nested in a subquery.\n - Nor does it extract table names in '<TYPE> JOIN` clauses.\n To achieve above two, need to extract where the key is 'value' and compare with actual table names.\n This is because the values returned when key is 'value' are table names, column names etc.\n Reference\n - https://hackersandslackers.com/extract-data-from-complex-json-python/\n :param obj: Dictionary to extract values from.\n :param key: String of the value you want to extract.\n :return: List of values for the key.\n "
arr = []
def extract(obj: Union[(dict, list)], arr: list, key: str) -> list:
'\n Recusively search for values of key in a JSON tree.\n\n :param obj: Dictionary to extract values from.\n :param arr: List to store extracted values to.\n :param key: String of the dictionary key to extract associated value from.\n :return: List of the extracted values.\n '
if isinstance(obj, dict):
for (k, v) in obj.items():
if isinstance(v, (dict, list)):
extract(obj=v, arr=arr, key=key)
elif (k == key):
arr.append(v)
elif isinstance(obj, list):
for item in obj:
extract(obj=item, arr=arr, key=key)
return arr
values = extract(obj=obj, arr=arr, key=key)
return values | Recursively fetch values from a nested JSON.
For our purposes, extract where key is 'from' allows extraction of *most* table names after a `FROM` clause.
- It does not extract the table names when the name is nested in a subquery.
- Nor does it extract table names in '<TYPE> JOIN` clauses.
To achieve above two, need to extract where the key is 'value' and compare with actual table names.
This is because the values returned when key is 'value' are table names, column names etc.
Reference
- https://hackersandslackers.com/extract-data-from-complex-json-python/
:param obj: Dictionary to extract values from.
:param key: String of the value you want to extract.
:return: List of values for the key. | extractor.py | extract_from_json | avisionh/sqlquerygraph | 3 | python | @staticmethod
def extract_from_json(obj: dict, key: str) -> list:
"\n Recursively fetch values from a nested JSON.\n\n For our purposes, extract where key is 'from' allows extraction of *most* table names after a `FROM` clause.\n - It does not extract the table names when the name is nested in a subquery.\n - Nor does it extract table names in '<TYPE> JOIN` clauses.\n To achieve above two, need to extract where the key is 'value' and compare with actual table names.\n This is because the values returned when key is 'value' are table names, column names etc.\n Reference\n - https://hackersandslackers.com/extract-data-from-complex-json-python/\n :param obj: Dictionary to extract values from.\n :param key: String of the value you want to extract.\n :return: List of values for the key.\n "
arr = []
def extract(obj: Union[(dict, list)], arr: list, key: str) -> list:
'\n Recusively search for values of key in a JSON tree.\n\n :param obj: Dictionary to extract values from.\n :param arr: List to store extracted values to.\n :param key: String of the dictionary key to extract associated value from.\n :return: List of the extracted values.\n '
if isinstance(obj, dict):
for (k, v) in obj.items():
if isinstance(v, (dict, list)):
extract(obj=v, arr=arr, key=key)
elif (k == key):
arr.append(v)
elif isinstance(obj, list):
for item in obj:
extract(obj=item, arr=arr, key=key)
return arr
values = extract(obj=obj, arr=arr, key=key)
return values | @staticmethod
def extract_from_json(obj: dict, key: str) -> list:
"\n Recursively fetch values from a nested JSON.\n\n For our purposes, extract where key is 'from' allows extraction of *most* table names after a `FROM` clause.\n - It does not extract the table names when the name is nested in a subquery.\n - Nor does it extract table names in '<TYPE> JOIN` clauses.\n To achieve above two, need to extract where the key is 'value' and compare with actual table names.\n This is because the values returned when key is 'value' are table names, column names etc.\n Reference\n - https://hackersandslackers.com/extract-data-from-complex-json-python/\n :param obj: Dictionary to extract values from.\n :param key: String of the value you want to extract.\n :return: List of values for the key.\n "
arr = []
def extract(obj: Union[(dict, list)], arr: list, key: str) -> list:
'\n Recusively search for values of key in a JSON tree.\n\n :param obj: Dictionary to extract values from.\n :param arr: List to store extracted values to.\n :param key: String of the dictionary key to extract associated value from.\n :return: List of the extracted values.\n '
if isinstance(obj, dict):
for (k, v) in obj.items():
if isinstance(v, (dict, list)):
extract(obj=v, arr=arr, key=key)
elif (k == key):
arr.append(v)
elif isinstance(obj, list):
for item in obj:
extract(obj=item, arr=arr, key=key)
return arr
values = extract(obj=obj, arr=arr, key=key)
return values<|docstring|>Recursively fetch values from a nested JSON.
For our purposes, extract where key is 'from' allows extraction of *most* table names after a `FROM` clause.
- It does not extract the table names when the name is nested in a subquery.
- Nor does it extract table names in '<TYPE> JOIN` clauses.
To achieve above two, need to extract where the key is 'value' and compare with actual table names.
This is because the values returned when key is 'value' are table names, column names etc.
Reference
- https://hackersandslackers.com/extract-data-from-complex-json-python/
:param obj: Dictionary to extract values from.
:param key: String of the value you want to extract.
:return: List of values for the key.<|endoftext|> |
4882bcb7908935c9a5c919843b9e55622eeb389cf46266260feaa127feb2bdb9 | def extract_table_dependencies_from_queries(self, reference_datasets: list, str_to_remove: Union[(str, list)]=None, verbose: bool=False) -> dict:
'\n Extracts the table names and their dependencies from a set of .sql files.\n\n :param reference_datasets: List of datasets/schema of database.\n :param str_to_remove: String or list of strings to remove from the query.\n :param verbose: Boolean to output steps taken and query after cleaning. Useful for debugging.\n :return: Dictionary of tables as keys and their dependent tables as values.\n '
(queries, jsons, dicts) = ({}, {}, {})
reference_datasets = tuple([f'{txt}.' for txt in reference_datasets])
for file_name in tqdm(os.listdir(path=self.script_dir)):
if verbose:
print(f'''Reading query {file_name}...
''')
(file_name, query) = self.read_query(file=file_name)
queries[file_name] = query
if (str_to_remove is not None):
if verbose:
print(f'''Cleaning query {file_name} by removing {str_to_remove}...
''')
queries[file_name] = self.clean_query(query=queries[file_name], str_to_remove=str_to_remove)
if verbose:
print(f'Cleaned query is {queries[file_name]}')
print(f'''Parsing query {file_name}...
''')
jsons[file_name] = self.parse_query(query=queries[file_name], print_tree=verbose)
if verbose:
print(f'''Extracting table names from {file_name}...
''')
table_from = self.extract_from_json(obj=jsons[file_name], key='from')
table_from = [txt for txt in table_from if ('.' in txt)]
table_value = self.extract_from_json(obj=jsons[file_name], key='value')
table_join = [txt for txt in table_value if str(txt).startswith(reference_datasets)]
tables = sorted(list(set((table_from + table_join))))
if verbose:
print(f'''Extracted table names are {tables}...
''')
dicts[f'{self.schema}.{file_name}'] = tables
return dicts | Extracts the table names and their dependencies from a set of .sql files.
:param reference_datasets: List of datasets/schema of database.
:param str_to_remove: String or list of strings to remove from the query.
:param verbose: Boolean to output steps taken and query after cleaning. Useful for debugging.
:return: Dictionary of tables as keys and their dependent tables as values. | extractor.py | extract_table_dependencies_from_queries | avisionh/sqlquerygraph | 3 | python | def extract_table_dependencies_from_queries(self, reference_datasets: list, str_to_remove: Union[(str, list)]=None, verbose: bool=False) -> dict:
'\n Extracts the table names and their dependencies from a set of .sql files.\n\n :param reference_datasets: List of datasets/schema of database.\n :param str_to_remove: String or list of strings to remove from the query.\n :param verbose: Boolean to output steps taken and query after cleaning. Useful for debugging.\n :return: Dictionary of tables as keys and their dependent tables as values.\n '
(queries, jsons, dicts) = ({}, {}, {})
reference_datasets = tuple([f'{txt}.' for txt in reference_datasets])
for file_name in tqdm(os.listdir(path=self.script_dir)):
if verbose:
print(f'Reading query {file_name}...
')
(file_name, query) = self.read_query(file=file_name)
queries[file_name] = query
if (str_to_remove is not None):
if verbose:
print(f'Cleaning query {file_name} by removing {str_to_remove}...
')
queries[file_name] = self.clean_query(query=queries[file_name], str_to_remove=str_to_remove)
if verbose:
print(f'Cleaned query is {queries[file_name]}')
print(f'Parsing query {file_name}...
')
jsons[file_name] = self.parse_query(query=queries[file_name], print_tree=verbose)
if verbose:
print(f'Extracting table names from {file_name}...
')
table_from = self.extract_from_json(obj=jsons[file_name], key='from')
table_from = [txt for txt in table_from if ('.' in txt)]
table_value = self.extract_from_json(obj=jsons[file_name], key='value')
table_join = [txt for txt in table_value if str(txt).startswith(reference_datasets)]
tables = sorted(list(set((table_from + table_join))))
if verbose:
print(f'Extracted table names are {tables}...
')
dicts[f'{self.schema}.{file_name}'] = tables
return dicts | def extract_table_dependencies_from_queries(self, reference_datasets: list, str_to_remove: Union[(str, list)]=None, verbose: bool=False) -> dict:
'\n Extracts the table names and their dependencies from a set of .sql files.\n\n :param reference_datasets: List of datasets/schema of database.\n :param str_to_remove: String or list of strings to remove from the query.\n :param verbose: Boolean to output steps taken and query after cleaning. Useful for debugging.\n :return: Dictionary of tables as keys and their dependent tables as values.\n '
(queries, jsons, dicts) = ({}, {}, {})
reference_datasets = tuple([f'{txt}.' for txt in reference_datasets])
for file_name in tqdm(os.listdir(path=self.script_dir)):
if verbose:
print(f'Reading query {file_name}...
')
(file_name, query) = self.read_query(file=file_name)
queries[file_name] = query
if (str_to_remove is not None):
if verbose:
print(f'Cleaning query {file_name} by removing {str_to_remove}...
')
queries[file_name] = self.clean_query(query=queries[file_name], str_to_remove=str_to_remove)
if verbose:
print(f'Cleaned query is {queries[file_name]}')
print(f'Parsing query {file_name}...
')
jsons[file_name] = self.parse_query(query=queries[file_name], print_tree=verbose)
if verbose:
print(f'Extracting table names from {file_name}...
')
table_from = self.extract_from_json(obj=jsons[file_name], key='from')
table_from = [txt for txt in table_from if ('.' in txt)]
table_value = self.extract_from_json(obj=jsons[file_name], key='value')
table_join = [txt for txt in table_value if str(txt).startswith(reference_datasets)]
tables = sorted(list(set((table_from + table_join))))
if verbose:
print(f'Extracted table names are {tables}...
')
dicts[f'{self.schema}.{file_name}'] = tables
return dicts<|docstring|>Extracts the table names and their dependencies from a set of .sql files.
:param reference_datasets: List of datasets/schema of database.
:param str_to_remove: String or list of strings to remove from the query.
:param verbose: Boolean to output steps taken and query after cleaning. Useful for debugging.
:return: Dictionary of tables as keys and their dependent tables as values.<|endoftext|> |
66390140656a78f4b8e1d5098bbca219eda65b32072a6e9ecdc4a649f4334e11 | def extract(obj: Union[(dict, list)], arr: list, key: str) -> list:
'\n Recusively search for values of key in a JSON tree.\n\n :param obj: Dictionary to extract values from.\n :param arr: List to store extracted values to.\n :param key: String of the dictionary key to extract associated value from.\n :return: List of the extracted values.\n '
if isinstance(obj, dict):
for (k, v) in obj.items():
if isinstance(v, (dict, list)):
extract(obj=v, arr=arr, key=key)
elif (k == key):
arr.append(v)
elif isinstance(obj, list):
for item in obj:
extract(obj=item, arr=arr, key=key)
return arr | Recusively search for values of key in a JSON tree.
:param obj: Dictionary to extract values from.
:param arr: List to store extracted values to.
:param key: String of the dictionary key to extract associated value from.
:return: List of the extracted values. | extractor.py | extract | avisionh/sqlquerygraph | 3 | python | def extract(obj: Union[(dict, list)], arr: list, key: str) -> list:
'\n Recusively search for values of key in a JSON tree.\n\n :param obj: Dictionary to extract values from.\n :param arr: List to store extracted values to.\n :param key: String of the dictionary key to extract associated value from.\n :return: List of the extracted values.\n '
if isinstance(obj, dict):
for (k, v) in obj.items():
if isinstance(v, (dict, list)):
extract(obj=v, arr=arr, key=key)
elif (k == key):
arr.append(v)
elif isinstance(obj, list):
for item in obj:
extract(obj=item, arr=arr, key=key)
return arr | def extract(obj: Union[(dict, list)], arr: list, key: str) -> list:
'\n Recusively search for values of key in a JSON tree.\n\n :param obj: Dictionary to extract values from.\n :param arr: List to store extracted values to.\n :param key: String of the dictionary key to extract associated value from.\n :return: List of the extracted values.\n '
if isinstance(obj, dict):
for (k, v) in obj.items():
if isinstance(v, (dict, list)):
extract(obj=v, arr=arr, key=key)
elif (k == key):
arr.append(v)
elif isinstance(obj, list):
for item in obj:
extract(obj=item, arr=arr, key=key)
return arr<|docstring|>Recusively search for values of key in a JSON tree.
:param obj: Dictionary to extract values from.
:param arr: List to store extracted values to.
:param key: String of the dictionary key to extract associated value from.
:return: List of the extracted values.<|endoftext|> |
b3cb84e0a664d4adce8d50cf8e6e0e9818a0fd0479915c6ca50488011e197d24 | def best_solution(self) -> int:
'Returns the minimum number of stations from all solutions'
return min((solution['Num_Stations'] for solution in self.solutions)) | Returns the minimum number of stations from all solutions | experiment.py | best_solution | janedoesrepo/GRASP-Metaheuristic | 5 | python | def best_solution(self) -> int:
return min((solution['Num_Stations'] for solution in self.solutions)) | def best_solution(self) -> int:
return min((solution['Num_Stations'] for solution in self.solutions))<|docstring|>Returns the minimum number of stations from all solutions<|endoftext|> |
e366195f5ff6883af9a8db0ab4867b0da82fee220873c1c193c449ea1f6dd96a | @staticmethod
def compute_ARD(solution_stations: int, min_stations: int) -> float:
'Compute the average relative deviation of a solution'
ARD = (100 * ((solution_stations - min_stations) / min_stations))
return ARD | Compute the average relative deviation of a solution | experiment.py | compute_ARD | janedoesrepo/GRASP-Metaheuristic | 5 | python | @staticmethod
def compute_ARD(solution_stations: int, min_stations: int) -> float:
ARD = (100 * ((solution_stations - min_stations) / min_stations))
return ARD | @staticmethod
def compute_ARD(solution_stations: int, min_stations: int) -> float:
ARD = (100 * ((solution_stations - min_stations) / min_stations))
return ARD<|docstring|>Compute the average relative deviation of a solution<|endoftext|> |
80d0f195cdaeb804fd2f26ef8ae188633967bff99ba063f3d6753eaa828ff113 | def initialize(self, problem):
'Initialize the planner with some planning problem'
assert (problem.initialized == True), 'Planning problem data structure has not been initialized'
self.curr_problem = problem
self.env = problem.env
self.lattice = problem.lattice
self.cost = problem.cost
self.heuristic = problem.heuristic
self.visualize = problem.visualize
self.start_node = problem.start_n
self.goal_node = problem.goal_n
self.heuristic_weight = problem.params['heuristic_weight']
if self.visualize:
self.env.initialize_plot(self.lattice.node_to_state(self.start_node), self.lattice.node_to_state(self.goal_node))
self.initialized = True | Initialize the planner with some planning problem | planning_python/planners/search_based_planner.py | initialize | daahuang/planning_python | 12 | python | def initialize(self, problem):
assert (problem.initialized == True), 'Planning problem data structure has not been initialized'
self.curr_problem = problem
self.env = problem.env
self.lattice = problem.lattice
self.cost = problem.cost
self.heuristic = problem.heuristic
self.visualize = problem.visualize
self.start_node = problem.start_n
self.goal_node = problem.goal_n
self.heuristic_weight = problem.params['heuristic_weight']
if self.visualize:
self.env.initialize_plot(self.lattice.node_to_state(self.start_node), self.lattice.node_to_state(self.goal_node))
self.initialized = True | def initialize(self, problem):
assert (problem.initialized == True), 'Planning problem data structure has not been initialized'
self.curr_problem = problem
self.env = problem.env
self.lattice = problem.lattice
self.cost = problem.cost
self.heuristic = problem.heuristic
self.visualize = problem.visualize
self.start_node = problem.start_n
self.goal_node = problem.goal_n
self.heuristic_weight = problem.params['heuristic_weight']
if self.visualize:
self.env.initialize_plot(self.lattice.node_to_state(self.start_node), self.lattice.node_to_state(self.goal_node))
self.initialized = True<|docstring|>Initialize the planner with some planning problem<|endoftext|> |
becb722c8e6ba3c8d4eb49c67c70d91105da158869f187497f880f1a1a067b16 | def get_successors(self, node):
'Given a node, query the lattice for successors, collision check the successors and return successor nodes, edges, costs, obstacle\n successors\n\n @param: node - tuple corresponsing to a discrete node \n @return: neighbors - list of tuples where each tuple is a valid neighbor node of input\n costs - costs of associated valid edges\n valid_edges - list of collision free edges(continuous coords) coming out of the input node\n invalid_edges - a list of tuples where each tuple is of following form: (invalid edge, first invalid state along edge)\n '
if self.lattice.edge_precalc_done:
succs = self.lattice.node_to_succs[node]
else:
succs = self.lattice.get_successors(node)
neighbors = []
costs = []
valid_edges = []
invalid_edges = []
for (i, succ) in enumerate(succs):
succ_node = succ[0]
succ_edge = succ[1]
(isvalid, first_coll_state) = self.env.is_edge_valid(succ_edge)
if (not isvalid):
if first_coll_state:
invalid_edges.append((succ_edge, first_coll_state))
continue
neighbors.append(succ_node)
valid_edges.append(succ_edge)
if self.lattice.costs_precalc_done:
costs.append(self.lattice.succ_costs[node][i])
else:
costs.append(self.cost.get_cost(succ_edge))
if self.visualize:
self.visualize_search(valid_edges, invalid_edges)
return (neighbors, costs, valid_edges, invalid_edges) | Given a node, query the lattice for successors, collision check the successors and return successor nodes, edges, costs, obstacle
successors
@param: node - tuple corresponsing to a discrete node
@return: neighbors - list of tuples where each tuple is a valid neighbor node of input
costs - costs of associated valid edges
valid_edges - list of collision free edges(continuous coords) coming out of the input node
invalid_edges - a list of tuples where each tuple is of following form: (invalid edge, first invalid state along edge) | planning_python/planners/search_based_planner.py | get_successors | daahuang/planning_python | 12 | python | def get_successors(self, node):
'Given a node, query the lattice for successors, collision check the successors and return successor nodes, edges, costs, obstacle\n successors\n\n @param: node - tuple corresponsing to a discrete node \n @return: neighbors - list of tuples where each tuple is a valid neighbor node of input\n costs - costs of associated valid edges\n valid_edges - list of collision free edges(continuous coords) coming out of the input node\n invalid_edges - a list of tuples where each tuple is of following form: (invalid edge, first invalid state along edge)\n '
if self.lattice.edge_precalc_done:
succs = self.lattice.node_to_succs[node]
else:
succs = self.lattice.get_successors(node)
neighbors = []
costs = []
valid_edges = []
invalid_edges = []
for (i, succ) in enumerate(succs):
succ_node = succ[0]
succ_edge = succ[1]
(isvalid, first_coll_state) = self.env.is_edge_valid(succ_edge)
if (not isvalid):
if first_coll_state:
invalid_edges.append((succ_edge, first_coll_state))
continue
neighbors.append(succ_node)
valid_edges.append(succ_edge)
if self.lattice.costs_precalc_done:
costs.append(self.lattice.succ_costs[node][i])
else:
costs.append(self.cost.get_cost(succ_edge))
if self.visualize:
self.visualize_search(valid_edges, invalid_edges)
return (neighbors, costs, valid_edges, invalid_edges) | def get_successors(self, node):
'Given a node, query the lattice for successors, collision check the successors and return successor nodes, edges, costs, obstacle\n successors\n\n @param: node - tuple corresponsing to a discrete node \n @return: neighbors - list of tuples where each tuple is a valid neighbor node of input\n costs - costs of associated valid edges\n valid_edges - list of collision free edges(continuous coords) coming out of the input node\n invalid_edges - a list of tuples where each tuple is of following form: (invalid edge, first invalid state along edge)\n '
if self.lattice.edge_precalc_done:
succs = self.lattice.node_to_succs[node]
else:
succs = self.lattice.get_successors(node)
neighbors = []
costs = []
valid_edges = []
invalid_edges = []
for (i, succ) in enumerate(succs):
succ_node = succ[0]
succ_edge = succ[1]
(isvalid, first_coll_state) = self.env.is_edge_valid(succ_edge)
if (not isvalid):
if first_coll_state:
invalid_edges.append((succ_edge, first_coll_state))
continue
neighbors.append(succ_node)
valid_edges.append(succ_edge)
if self.lattice.costs_precalc_done:
costs.append(self.lattice.succ_costs[node][i])
else:
costs.append(self.cost.get_cost(succ_edge))
if self.visualize:
self.visualize_search(valid_edges, invalid_edges)
return (neighbors, costs, valid_edges, invalid_edges)<|docstring|>Given a node, query the lattice for successors, collision check the successors and return successor nodes, edges, costs, obstacle
successors
@param: node - tuple corresponsing to a discrete node
@return: neighbors - list of tuples where each tuple is a valid neighbor node of input
costs - costs of associated valid edges
valid_edges - list of collision free edges(continuous coords) coming out of the input node
invalid_edges - a list of tuples where each tuple is of following form: (invalid edge, first invalid state along edge)<|endoftext|> |
8ed424b81a4759b75c8a2f4ed7180de575a97a55dead66a91bd6112b4aad66b3 | def get_predecessors(self, node):
'Given a node, query the lattice for predecessors, collision check the predecessors and return predecessor nodes, edges, costs, obstacle\n predecessors\n\n @param: node - tuple corresponsing to a discrete node \n @return: neighbors - list of tuples where each tuple is a valid neighbor node of input\n costs - costs of associated valid edges\n valid_edges - list of collision free edges(continuous coords) coming out of the input node\n invalid_edges - a list of tuples where each tuple is of following form: (invalid edge, first invalid state along edge)\n '
if self.lattice.edge_precalc_done:
preds = self.lattice.node_to_preds[node]
else:
preds = self.lattice.get_predecessors(node)
neighbors = []
costs = []
valid_edges = []
invalid_edges = []
for (i, pred) in enumerate(preds):
pred_node = pred[0]
pred_edge = pred[1]
(isvalid, first_coll_state) = self.env.is_edge_valid(pred_edge)
if (not isvalid):
if first_coll_state:
invalid_edges.append((pred_edge, first_coll_state))
continue
neighbors.append(pred_node)
valid_edges.append(pred_edge)
if self.lattice.costs_precalc_done:
costs.append(self.lattice.pred_costs[node][i])
else:
costs.append(self.cost.get_cost(pred_edge))
if self.visualize:
self.visualize_search(valid_edges, invalid_edges)
return (neighbors, costs, valid_edges, invalid_edges) | Given a node, query the lattice for predecessors, collision check the predecessors and return predecessor nodes, edges, costs, obstacle
predecessors
@param: node - tuple corresponsing to a discrete node
@return: neighbors - list of tuples where each tuple is a valid neighbor node of input
costs - costs of associated valid edges
valid_edges - list of collision free edges(continuous coords) coming out of the input node
invalid_edges - a list of tuples where each tuple is of following form: (invalid edge, first invalid state along edge) | planning_python/planners/search_based_planner.py | get_predecessors | daahuang/planning_python | 12 | python | def get_predecessors(self, node):
'Given a node, query the lattice for predecessors, collision check the predecessors and return predecessor nodes, edges, costs, obstacle\n predecessors\n\n @param: node - tuple corresponsing to a discrete node \n @return: neighbors - list of tuples where each tuple is a valid neighbor node of input\n costs - costs of associated valid edges\n valid_edges - list of collision free edges(continuous coords) coming out of the input node\n invalid_edges - a list of tuples where each tuple is of following form: (invalid edge, first invalid state along edge)\n '
if self.lattice.edge_precalc_done:
preds = self.lattice.node_to_preds[node]
else:
preds = self.lattice.get_predecessors(node)
neighbors = []
costs = []
valid_edges = []
invalid_edges = []
for (i, pred) in enumerate(preds):
pred_node = pred[0]
pred_edge = pred[1]
(isvalid, first_coll_state) = self.env.is_edge_valid(pred_edge)
if (not isvalid):
if first_coll_state:
invalid_edges.append((pred_edge, first_coll_state))
continue
neighbors.append(pred_node)
valid_edges.append(pred_edge)
if self.lattice.costs_precalc_done:
costs.append(self.lattice.pred_costs[node][i])
else:
costs.append(self.cost.get_cost(pred_edge))
if self.visualize:
self.visualize_search(valid_edges, invalid_edges)
return (neighbors, costs, valid_edges, invalid_edges) | def get_predecessors(self, node):
'Given a node, query the lattice for predecessors, collision check the predecessors and return predecessor nodes, edges, costs, obstacle\n predecessors\n\n @param: node - tuple corresponsing to a discrete node \n @return: neighbors - list of tuples where each tuple is a valid neighbor node of input\n costs - costs of associated valid edges\n valid_edges - list of collision free edges(continuous coords) coming out of the input node\n invalid_edges - a list of tuples where each tuple is of following form: (invalid edge, first invalid state along edge)\n '
if self.lattice.edge_precalc_done:
preds = self.lattice.node_to_preds[node]
else:
preds = self.lattice.get_predecessors(node)
neighbors = []
costs = []
valid_edges = []
invalid_edges = []
for (i, pred) in enumerate(preds):
pred_node = pred[0]
pred_edge = pred[1]
(isvalid, first_coll_state) = self.env.is_edge_valid(pred_edge)
if (not isvalid):
if first_coll_state:
invalid_edges.append((pred_edge, first_coll_state))
continue
neighbors.append(pred_node)
valid_edges.append(pred_edge)
if self.lattice.costs_precalc_done:
costs.append(self.lattice.pred_costs[node][i])
else:
costs.append(self.cost.get_cost(pred_edge))
if self.visualize:
self.visualize_search(valid_edges, invalid_edges)
return (neighbors, costs, valid_edges, invalid_edges)<|docstring|>Given a node, query the lattice for predecessors, collision check the predecessors and return predecessor nodes, edges, costs, obstacle
predecessors
@param: node - tuple corresponsing to a discrete node
@return: neighbors - list of tuples where each tuple is a valid neighbor node of input
costs - costs of associated valid edges
valid_edges - list of collision free edges(continuous coords) coming out of the input node
invalid_edges - a list of tuples where each tuple is of following form: (invalid edge, first invalid state along edge)<|endoftext|> |
6ac26413f1a3f0f92a10510fb2c8dd914181e635179fe5f93c110f757cd741fb | def plan(self):
'This is the main function which is called for solving a planning problem.\n This function is specific to a planner.\n '
raise NotImplementedError | This is the main function which is called for solving a planning problem.
This function is specific to a planner. | planning_python/planners/search_based_planner.py | plan | daahuang/planning_python | 12 | python | def plan(self):
'This is the main function which is called for solving a planning problem.\n This function is specific to a planner.\n '
raise NotImplementedError | def plan(self):
'This is the main function which is called for solving a planning problem.\n This function is specific to a planner.\n '
raise NotImplementedError<|docstring|>This is the main function which is called for solving a planning problem.
This function is specific to a planner.<|endoftext|> |
e2d966e1dba357292573a8d7432c71ee06dfcd4de09361078b32cdefee450a1f | def reset_problem(self, problem):
'Reset the underlying problem that the planner solves while\n still persisting the information associated with the search tree'
assert (problem.initialized == True), 'Planning problem data structure has not been initialized'
self.curr_problem = problem
self.env = problem.env
self.lattice = problem.lattice
self.cost = problem.cost
self.heuristic = problem.heuristic
self.visualize = problem.visualize
self.start_node = problem.start_n
self.goal_node = problem.goal_n
self.heuristic_weight = problem.params['heuristic_weight']
if self.visualize:
self.env.initialize_plot(self.lattice.node_to_state(self.start_node), self.lattice.node_to_state(self.goal_node))
self.initialized = True | Reset the underlying problem that the planner solves while
still persisting the information associated with the search tree | planning_python/planners/search_based_planner.py | reset_problem | daahuang/planning_python | 12 | python | def reset_problem(self, problem):
'Reset the underlying problem that the planner solves while\n still persisting the information associated with the search tree'
assert (problem.initialized == True), 'Planning problem data structure has not been initialized'
self.curr_problem = problem
self.env = problem.env
self.lattice = problem.lattice
self.cost = problem.cost
self.heuristic = problem.heuristic
self.visualize = problem.visualize
self.start_node = problem.start_n
self.goal_node = problem.goal_n
self.heuristic_weight = problem.params['heuristic_weight']
if self.visualize:
self.env.initialize_plot(self.lattice.node_to_state(self.start_node), self.lattice.node_to_state(self.goal_node))
self.initialized = True | def reset_problem(self, problem):
'Reset the underlying problem that the planner solves while\n still persisting the information associated with the search tree'
assert (problem.initialized == True), 'Planning problem data structure has not been initialized'
self.curr_problem = problem
self.env = problem.env
self.lattice = problem.lattice
self.cost = problem.cost
self.heuristic = problem.heuristic
self.visualize = problem.visualize
self.start_node = problem.start_n
self.goal_node = problem.goal_n
self.heuristic_weight = problem.params['heuristic_weight']
if self.visualize:
self.env.initialize_plot(self.lattice.node_to_state(self.start_node), self.lattice.node_to_state(self.goal_node))
self.initialized = True<|docstring|>Reset the underlying problem that the planner solves while
still persisting the information associated with the search tree<|endoftext|> |
9cb67371ac65a9d6fd500dceb29526e4b8a76664adc491f5e8852094882760d5 | def clear_planner(self):
'When this is called the planner clears information associated with the \n previous tree search'
raise NotImplementedError | When this is called the planner clears information associated with the
previous tree search | planning_python/planners/search_based_planner.py | clear_planner | daahuang/planning_python | 12 | python | def clear_planner(self):
'When this is called the planner clears information associated with the \n previous tree search'
raise NotImplementedError | def clear_planner(self):
'When this is called the planner clears information associated with the \n previous tree search'
raise NotImplementedError<|docstring|>When this is called the planner clears information associated with the
previous tree search<|endoftext|> |
5f266607578e38bd33d291c8c6a9794890d1a4768b9cf62bb36eeca72ad7e937 | def __str__(self) -> str:
' Return just the row for a special form need. '
return str(self.loc_row) | Return just the row for a special form need. | fpiweb/models.py | __str__ | damiencalloway/Food-Pantry-Inventory | 1 | python | def __str__(self) -> str:
' '
return str(self.loc_row) | def __str__(self) -> str:
' '
return str(self.loc_row)<|docstring|>Return just the row for a special form need.<|endoftext|> |
a014ee7b8cac4bd3d2de34b2d68f8ccae083d5fa81f1548ab437456ccd80c7d5 | def __repr__(self) -> str:
' Default way to display a location row record. '
display = f'Row {self.loc_row} ({self.loc_row_descr})'
return display | Default way to display a location row record. | fpiweb/models.py | __repr__ | damiencalloway/Food-Pantry-Inventory | 1 | python | def __repr__(self) -> str:
' '
display = f'Row {self.loc_row} ({self.loc_row_descr})'
return display | def __repr__(self) -> str:
' '
display = f'Row {self.loc_row} ({self.loc_row_descr})'
return display<|docstring|>Default way to display a location row record.<|endoftext|> |
e4a0bb45887930fbfa995959a27b4107da23f4c21c40896d4afa69409cbb0c98 | def __str__(self) -> str:
' Return just the bin for a special form need. '
return str(self.loc_bin) | Return just the bin for a special form need. | fpiweb/models.py | __str__ | damiencalloway/Food-Pantry-Inventory | 1 | python | def __str__(self) -> str:
' '
return str(self.loc_bin) | def __str__(self) -> str:
' '
return str(self.loc_bin)<|docstring|>Return just the bin for a special form need.<|endoftext|> |
cc33bd9c760f6e37bd3974c03afbfc098678a7933d55ac57b08879841f7d530f | def __repr__(self) -> str:
' Default way to display a location bin record. '
display = f'Bin {self.loc_bin} ({self.loc_bin_descr})'
return display | Default way to display a location bin record. | fpiweb/models.py | __repr__ | damiencalloway/Food-Pantry-Inventory | 1 | python | def __repr__(self) -> str:
' '
display = f'Bin {self.loc_bin} ({self.loc_bin_descr})'
return display | def __repr__(self) -> str:
' '
display = f'Bin {self.loc_bin} ({self.loc_bin_descr})'
return display<|docstring|>Default way to display a location bin record.<|endoftext|> |
0ccedb0ac7673998e3243e32044658f0385ab5c77d54935037b9d3dbd88596c7 | def __str__(self) -> str:
' Return just the tier for a special form need. '
return str(self.loc_tier) | Return just the tier for a special form need. | fpiweb/models.py | __str__ | damiencalloway/Food-Pantry-Inventory | 1 | python | def __str__(self) -> str:
' '
return str(self.loc_tier) | def __str__(self) -> str:
' '
return str(self.loc_tier)<|docstring|>Return just the tier for a special form need.<|endoftext|> |
924df978a20dcb006d6274660fbcaa67a143898e3c037a7ed6d712ff79cf417c | def __repr__(self) -> str:
' Default way to display a location tier record. '
display = f'Tier {self.loc_tier} ({self.loc_tier_descr})'
return display | Default way to display a location tier record. | fpiweb/models.py | __repr__ | damiencalloway/Food-Pantry-Inventory | 1 | python | def __repr__(self) -> str:
' '
display = f'Tier {self.loc_tier} ({self.loc_tier_descr})'
return display | def __repr__(self) -> str:
' '
display = f'Tier {self.loc_tier} ({self.loc_tier_descr})'
return display<|docstring|>Default way to display a location tier record.<|endoftext|> |
781e21acbd7c3cf19a629364e716fccfd1efa1d6bdc2ffe3c98bc2238375a6ba | def __str__(self) -> str:
' Default way to display a location record. '
display = f'Location {self.loc_code} - {self.loc_descr}'
if self.loc_in_warehouse:
display += f' ({self.loc_row}/{self.loc_bin}/{self.loc_tier})'
return display | Default way to display a location record. | fpiweb/models.py | __str__ | damiencalloway/Food-Pantry-Inventory | 1 | python | def __str__(self) -> str:
' '
display = f'Location {self.loc_code} - {self.loc_descr}'
if self.loc_in_warehouse:
display += f' ({self.loc_row}/{self.loc_bin}/{self.loc_tier})'
return display | def __str__(self) -> str:
' '
display = f'Location {self.loc_code} - {self.loc_descr}'
if self.loc_in_warehouse:
display += f' ({self.loc_row}/{self.loc_bin}/{self.loc_tier})'
return display<|docstring|>Default way to display a location record.<|endoftext|> |
1829ce8e8566230852514d4933a41da7349c6868d3872abf2d8df1657859c51a | def __str__(self):
' Default way to display this box type record. '
display = f'{self.box_type_code} - {self.box_type_descr} ({self.box_type_qty})'
return display | Default way to display this box type record. | fpiweb/models.py | __str__ | damiencalloway/Food-Pantry-Inventory | 1 | python | def __str__(self):
' '
display = f'{self.box_type_code} - {self.box_type_descr} ({self.box_type_qty})'
return display | def __str__(self):
' '
display = f'{self.box_type_code} - {self.box_type_descr} ({self.box_type_qty})'
return display<|docstring|>Default way to display this box type record.<|endoftext|> |
f6b46f6dfd1297c1ae6cc1196699d87fd5303a45030b8a02bee81e655de5acb0 | def __str__(self):
' Default way to display this product category record. '
display = f'{self.prod_cat_name}'
if self.prod_cat_descr:
display += f' - {self.prod_cat_descr[:50]}'
return display | Default way to display this product category record. | fpiweb/models.py | __str__ | damiencalloway/Food-Pantry-Inventory | 1 | python | def __str__(self):
' '
display = f'{self.prod_cat_name}'
if self.prod_cat_descr:
display += f' - {self.prod_cat_descr[:50]}'
return display | def __str__(self):
' '
display = f'{self.prod_cat_name}'
if self.prod_cat_descr:
display += f' - {self.prod_cat_descr[:50]}'
return display<|docstring|>Default way to display this product category record.<|endoftext|> |
1802501330705f5bcc097b88944308ee6832c2aa1989a8a10bab036a35f918a4 | def __str__(self):
' Default way to display this product record. '
display = f'{self.prod_name} ({self.prod_cat})'
return display | Default way to display this product record. | fpiweb/models.py | __str__ | damiencalloway/Food-Pantry-Inventory | 1 | python | def __str__(self):
' '
display = f'{self.prod_name} ({self.prod_cat})'
return display | def __str__(self):
' '
display = f'{self.prod_name} ({self.prod_cat})'
return display<|docstring|>Default way to display this product record.<|endoftext|> |
8131faf7ead1532a81130c0542272455daf9e2ee84376662b85f6dc591dcfbc3 | def __str__(self):
' Default way to display this box record. '
if (self.exp_month_start or self.exp_month_end):
display = f'{self.box_number} ({self.box_type}) {self.loc_row}/{self.loc_bin}/{self.loc_tier} {self.product} {self.quantity}{self.exp_year} ({self.exp_month_start}-{self.exp_month_end}){self.date_filled}'
else:
display = f'{self.box_number} ({self.box_type}) {self.loc_row}/{self.loc_bin}/{self.loc_tier} {self.product} {self.quantity}{self.exp_year} {self.date_filled}'
return display | Default way to display this box record. | fpiweb/models.py | __str__ | damiencalloway/Food-Pantry-Inventory | 1 | python | def __str__(self):
' '
if (self.exp_month_start or self.exp_month_end):
display = f'{self.box_number} ({self.box_type}) {self.loc_row}/{self.loc_bin}/{self.loc_tier} {self.product} {self.quantity}{self.exp_year} ({self.exp_month_start}-{self.exp_month_end}){self.date_filled}'
else:
display = f'{self.box_number} ({self.box_type}) {self.loc_row}/{self.loc_bin}/{self.loc_tier} {self.product} {self.quantity}{self.exp_year} {self.date_filled}'
return display | def __str__(self):
' '
if (self.exp_month_start or self.exp_month_end):
display = f'{self.box_number} ({self.box_type}) {self.loc_row}/{self.loc_bin}/{self.loc_tier} {self.product} {self.quantity}{self.exp_year} ({self.exp_month_start}-{self.exp_month_end}){self.date_filled}'
else:
display = f'{self.box_number} ({self.box_type}) {self.loc_row}/{self.loc_bin}/{self.loc_tier} {self.product} {self.quantity}{self.exp_year} {self.date_filled}'
return display<|docstring|>Default way to display this box record.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.