body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
87414c955a0530474f58392fced7a41c0416a87516d3ac5f01ee84e24b7db3c3
def is_subnet_of(a, b): '\n Check if network-b is subnet of network-a\n ' if (a.network_address != b.network_address): return False return (a.prefixlen >= b.prefixlen)
Check if network-b is subnet of network-a
openr/py/openr/utils/ipnetwork.py
is_subnet_of
arshanh/openr
1
python
def is_subnet_of(a, b): '\n \n ' if (a.network_address != b.network_address): return False return (a.prefixlen >= b.prefixlen)
def is_subnet_of(a, b): '\n \n ' if (a.network_address != b.network_address): return False return (a.prefixlen >= b.prefixlen)<|docstring|>Check if network-b is subnet of network-a<|endoftext|>
c35b496293c98963861033ece8d7d8657fdf568aaef6bae5f7528de8c33808a7
def contain_any_prefix(prefix, ip_networks): '\n Utility function to check if prefix contain any of the prefixes/ips\n\n :returns: True if prefix contains any of the ip_networks else False\n ' if (ip_networks is None): return True prefix = ipaddress.ip_network(prefix) return any((is_subnet_of(prefix, net) for net in ip_networks))
Utility function to check if prefix contain any of the prefixes/ips :returns: True if prefix contains any of the ip_networks else False
openr/py/openr/utils/ipnetwork.py
contain_any_prefix
arshanh/openr
1
python
def contain_any_prefix(prefix, ip_networks): '\n Utility function to check if prefix contain any of the prefixes/ips\n\n :returns: True if prefix contains any of the ip_networks else False\n ' if (ip_networks is None): return True prefix = ipaddress.ip_network(prefix) return any((is_subnet_of(prefix, net) for net in ip_networks))
def contain_any_prefix(prefix, ip_networks): '\n Utility function to check if prefix contain any of the prefixes/ips\n\n :returns: True if prefix contains any of the ip_networks else False\n ' if (ip_networks is None): return True prefix = ipaddress.ip_network(prefix) return any((is_subnet_of(prefix, net) for net in ip_networks))<|docstring|>Utility function to check if prefix contain any of the prefixes/ips :returns: True if prefix contains any of the ip_networks else False<|endoftext|>
e65db7e41adfbde8496c561802848424bb4fd30efda7691b5a4a2531ebd2629a
def _bbox2pymesh(element): '\n Convert the bounding box of <element> into a pymesh Mesh in world coordinates.\n\n Inputs:\n element (ProjectObject)\n\n Return:\n pymesh.Mesh - Mesh representation of the oriented bounding box in\n world coordinates.\n ' vertices = (element.pose * element.bounds.corners()) (i1, i2, i3, i4, i5, i6, i7, i8) = range(8) faces = [[i1, i2, i3], [i1, i3, i4], [i4, i3, i8], [i4, i8, i5], [i5, i8, i7], [i5, i7, i6], [i6, i7, i2], [i6, i2, i1], [i1, i4, i5], [i1, i5, i6], [i2, i7, i8], [i2, i8, i3]] return pymesh.form_mesh(vertices.T, np.array(faces))
Convert the bounding box of <element> into a pymesh Mesh in world coordinates. Inputs: element (ProjectObject) Return: pymesh.Mesh - Mesh representation of the oriented bounding box in world coordinates.
sumo/metrics/bb_evaluator.py
_bbox2pymesh
RishabhJain2018/sumo-challenge
70
python
def _bbox2pymesh(element): '\n Convert the bounding box of <element> into a pymesh Mesh in world coordinates.\n\n Inputs:\n element (ProjectObject)\n\n Return:\n pymesh.Mesh - Mesh representation of the oriented bounding box in\n world coordinates.\n ' vertices = (element.pose * element.bounds.corners()) (i1, i2, i3, i4, i5, i6, i7, i8) = range(8) faces = [[i1, i2, i3], [i1, i3, i4], [i4, i3, i8], [i4, i8, i5], [i5, i8, i7], [i5, i7, i6], [i6, i7, i2], [i6, i2, i1], [i1, i4, i5], [i1, i5, i6], [i2, i7, i8], [i2, i8, i3]] return pymesh.form_mesh(vertices.T, np.array(faces))
def _bbox2pymesh(element): '\n Convert the bounding box of <element> into a pymesh Mesh in world coordinates.\n\n Inputs:\n element (ProjectObject)\n\n Return:\n pymesh.Mesh - Mesh representation of the oriented bounding box in\n world coordinates.\n ' vertices = (element.pose * element.bounds.corners()) (i1, i2, i3, i4, i5, i6, i7, i8) = range(8) faces = [[i1, i2, i3], [i1, i3, i4], [i4, i3, i8], [i4, i8, i5], [i5, i8, i7], [i5, i7, i6], [i6, i7, i2], [i6, i2, i1], [i1, i4, i5], [i1, i5, i6], [i2, i7, i8], [i2, i8, i3]] return pymesh.form_mesh(vertices.T, np.array(faces))<|docstring|>Convert the bounding box of <element> into a pymesh Mesh in world coordinates. Inputs: element (ProjectObject) Return: pymesh.Mesh - Mesh representation of the oriented bounding box in world coordinates.<|endoftext|>
c8cf6fcd29f08dfb9ed63408a22d9fbff3384dc0e094ad5fc2742665160b22fb
def __init__(self, submission, ground_truth, settings=None): '\n Constructor. Computes similarity between all elements in the\n submission and ground_truth and also computes\n data association caches.\n\n Inputs:\n submission (ProjectScene) - Submitted scene to be evaluated\n ground_truth (ProjectScene) - The ground truth scene\n settings (dict) - configuration for the evaluator. See\n Evaluator.py for recognized keys and values.\n ' for e in submission.elements.values(): posed_corners = e.pose.transform_all_from(e.bounds.corners()) e.posed_bbox = ComputeBbox().from_point_cloud(posed_corners) for e in ground_truth.elements.values(): posed_corners = e.pose.transform_all_from(e.bounds.corners()) e.posed_bbox = ComputeBbox().from_point_cloud(posed_corners) super(BBEvaluator, self).__init__(submission, ground_truth, settings)
Constructor. Computes similarity between all elements in the submission and ground_truth and also computes data association caches. Inputs: submission (ProjectScene) - Submitted scene to be evaluated ground_truth (ProjectScene) - The ground truth scene settings (dict) - configuration for the evaluator. See Evaluator.py for recognized keys and values.
sumo/metrics/bb_evaluator.py
__init__
RishabhJain2018/sumo-challenge
70
python
def __init__(self, submission, ground_truth, settings=None): '\n Constructor. Computes similarity between all elements in the\n submission and ground_truth and also computes\n data association caches.\n\n Inputs:\n submission (ProjectScene) - Submitted scene to be evaluated\n ground_truth (ProjectScene) - The ground truth scene\n settings (dict) - configuration for the evaluator. See\n Evaluator.py for recognized keys and values.\n ' for e in submission.elements.values(): posed_corners = e.pose.transform_all_from(e.bounds.corners()) e.posed_bbox = ComputeBbox().from_point_cloud(posed_corners) for e in ground_truth.elements.values(): posed_corners = e.pose.transform_all_from(e.bounds.corners()) e.posed_bbox = ComputeBbox().from_point_cloud(posed_corners) super(BBEvaluator, self).__init__(submission, ground_truth, settings)
def __init__(self, submission, ground_truth, settings=None): '\n Constructor. Computes similarity between all elements in the\n submission and ground_truth and also computes\n data association caches.\n\n Inputs:\n submission (ProjectScene) - Submitted scene to be evaluated\n ground_truth (ProjectScene) - The ground truth scene\n settings (dict) - configuration for the evaluator. See\n Evaluator.py for recognized keys and values.\n ' for e in submission.elements.values(): posed_corners = e.pose.transform_all_from(e.bounds.corners()) e.posed_bbox = ComputeBbox().from_point_cloud(posed_corners) for e in ground_truth.elements.values(): posed_corners = e.pose.transform_all_from(e.bounds.corners()) e.posed_bbox = ComputeBbox().from_point_cloud(posed_corners) super(BBEvaluator, self).__init__(submission, ground_truth, settings)<|docstring|>Constructor. Computes similarity between all elements in the submission and ground_truth and also computes data association caches. Inputs: submission (ProjectScene) - Submitted scene to be evaluated ground_truth (ProjectScene) - The ground truth scene settings (dict) - configuration for the evaluator. See Evaluator.py for recognized keys and values.<|endoftext|>
aed25298d7adb0b05f70fcefef5ab2726de1b584ac487950401e45b24b0d9c51
def evaluate_all(self): '\n Computes all metrics for the submission\n\n Return:\n metrics (dict) - Keys/values are:\n "shape_score" : float\n "rotation_error" : float\n "translation_error" : float\n "semantics_score" : float\n "perceptual_score" : float\n ' metrics = {} metrics['shape_score'] = self.shape_score() (rotation_error, translation_error) = self.pose_error() metrics['rotation_error'] = rotation_error metrics['translation_error'] = translation_error metrics['semantics_score'] = self.semantics_score() metrics['perceptual_score'] = self.perceptual_score() return metrics
Computes all metrics for the submission Return: metrics (dict) - Keys/values are: "shape_score" : float "rotation_error" : float "translation_error" : float "semantics_score" : float "perceptual_score" : float
sumo/metrics/bb_evaluator.py
evaluate_all
RishabhJain2018/sumo-challenge
70
python
def evaluate_all(self): '\n Computes all metrics for the submission\n\n Return:\n metrics (dict) - Keys/values are:\n "shape_score" : float\n "rotation_error" : float\n "translation_error" : float\n "semantics_score" : float\n "perceptual_score" : float\n ' metrics = {} metrics['shape_score'] = self.shape_score() (rotation_error, translation_error) = self.pose_error() metrics['rotation_error'] = rotation_error metrics['translation_error'] = translation_error metrics['semantics_score'] = self.semantics_score() metrics['perceptual_score'] = self.perceptual_score() return metrics
def evaluate_all(self): '\n Computes all metrics for the submission\n\n Return:\n metrics (dict) - Keys/values are:\n "shape_score" : float\n "rotation_error" : float\n "translation_error" : float\n "semantics_score" : float\n "perceptual_score" : float\n ' metrics = {} metrics['shape_score'] = self.shape_score() (rotation_error, translation_error) = self.pose_error() metrics['rotation_error'] = rotation_error metrics['translation_error'] = translation_error metrics['semantics_score'] = self.semantics_score() metrics['perceptual_score'] = self.perceptual_score() return metrics<|docstring|>Computes all metrics for the submission Return: metrics (dict) - Keys/values are: "shape_score" : float "rotation_error" : float "translation_error" : float "semantics_score" : float "perceptual_score" : float<|endoftext|>
737caee2c3d1843c9606a141aa9f5d69c142aa85b74b120cf1748c0303e550b7
def _shape_similarity(self, element1, element2): '\n Similarity function that compares the bounding boxes of\n <element1> and <element2>\n\n Inputs:\n element1 (ProjectObject)\n element2 (ProjectObject)\n\n Return:\n float - bounding box IoU (Equation 1 in SUMO white paper)\n ' bbox1 = element1.posed_bbox bbox2 = element2.posed_bbox for axis in range(3): if ((bbox1.min_corner[axis] > bbox2.max_corner[axis]) or (bbox2.min_corner[axis] > bbox1.max_corner[axis])): return 0 box1 = _bbox2pymesh(element1) box2 = _bbox2pymesh(element2) inter = pymesh.boolean(box1, box2, operation='intersection') (ivert, ifaces, _) = remove_duplicated_vertices_raw(inter.vertices, inter.faces) inter_mesh = pymesh.form_mesh(ivert, ifaces) intersection = abs(inter_mesh.volume) union = ((abs(box1.volume) + abs(box2.volume)) - intersection) return (intersection / union)
Similarity function that compares the bounding boxes of <element1> and <element2> Inputs: element1 (ProjectObject) element2 (ProjectObject) Return: float - bounding box IoU (Equation 1 in SUMO white paper)
sumo/metrics/bb_evaluator.py
_shape_similarity
RishabhJain2018/sumo-challenge
70
python
def _shape_similarity(self, element1, element2): '\n Similarity function that compares the bounding boxes of\n <element1> and <element2>\n\n Inputs:\n element1 (ProjectObject)\n element2 (ProjectObject)\n\n Return:\n float - bounding box IoU (Equation 1 in SUMO white paper)\n ' bbox1 = element1.posed_bbox bbox2 = element2.posed_bbox for axis in range(3): if ((bbox1.min_corner[axis] > bbox2.max_corner[axis]) or (bbox2.min_corner[axis] > bbox1.max_corner[axis])): return 0 box1 = _bbox2pymesh(element1) box2 = _bbox2pymesh(element2) inter = pymesh.boolean(box1, box2, operation='intersection') (ivert, ifaces, _) = remove_duplicated_vertices_raw(inter.vertices, inter.faces) inter_mesh = pymesh.form_mesh(ivert, ifaces) intersection = abs(inter_mesh.volume) union = ((abs(box1.volume) + abs(box2.volume)) - intersection) return (intersection / union)
def _shape_similarity(self, element1, element2): '\n Similarity function that compares the bounding boxes of\n <element1> and <element2>\n\n Inputs:\n element1 (ProjectObject)\n element2 (ProjectObject)\n\n Return:\n float - bounding box IoU (Equation 1 in SUMO white paper)\n ' bbox1 = element1.posed_bbox bbox2 = element2.posed_bbox for axis in range(3): if ((bbox1.min_corner[axis] > bbox2.max_corner[axis]) or (bbox2.min_corner[axis] > bbox1.max_corner[axis])): return 0 box1 = _bbox2pymesh(element1) box2 = _bbox2pymesh(element2) inter = pymesh.boolean(box1, box2, operation='intersection') (ivert, ifaces, _) = remove_duplicated_vertices_raw(inter.vertices, inter.faces) inter_mesh = pymesh.form_mesh(ivert, ifaces) intersection = abs(inter_mesh.volume) union = ((abs(box1.volume) + abs(box2.volume)) - intersection) return (intersection / union)<|docstring|>Similarity function that compares the bounding boxes of <element1> and <element2> Inputs: element1 (ProjectObject) element2 (ProjectObject) Return: float - bounding box IoU (Equation 1 in SUMO white paper)<|endoftext|>
57768f56ee63c34723896cd80c44fd2f3109dcc1de401a23b112efb015229599
@apply_defaults def __init__(self, redshift_conn_id='', operator_mode=LoadOperatorMode.Table, query='', table='', *args, **kwargs): 'LoadFactOperator constructor. Defines the parameters required for the operator.' super(LoadFactOperator, self).__init__(*args, **kwargs) self.redshift_conn_id = redshift_conn_id self.operator_mode = operator_mode self.query = query self.table = table
LoadFactOperator constructor. Defines the parameters required for the operator.
05-data-pipelines/plugins/operators/load_fact.py
__init__
Ceridan/data-engineering-projects
0
python
@apply_defaults def __init__(self, redshift_conn_id=, operator_mode=LoadOperatorMode.Table, query=, table=, *args, **kwargs): super(LoadFactOperator, self).__init__(*args, **kwargs) self.redshift_conn_id = redshift_conn_id self.operator_mode = operator_mode self.query = query self.table = table
@apply_defaults def __init__(self, redshift_conn_id=, operator_mode=LoadOperatorMode.Table, query=, table=, *args, **kwargs): super(LoadFactOperator, self).__init__(*args, **kwargs) self.redshift_conn_id = redshift_conn_id self.operator_mode = operator_mode self.query = query self.table = table<|docstring|>LoadFactOperator constructor. Defines the parameters required for the operator.<|endoftext|>
62b51fde2a8752216a8379448df8a418a3c2f9a2ffb5add0f1da256193cf404b
def execute(self, context): 'Load fact table from staging.' try: self.log.info('Initialing Postgres hook (for Redshift)') redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id) if (self.operator_mode == LoadOperatorMode.Table): sql = table_to_query_map[self.table] self.log.info(f'Loading data from staging to table "{self.table}"') else: sql = self.query self.log.info(f'''Loading data using custom query: {self.query}''') redshift.run(sql) self.log.info(f'Load operation successfully completed') except psycopg2.Error as e: self.log.error(f'Error occurred during during LOAD operation: {e}') raise
Load fact table from staging.
05-data-pipelines/plugins/operators/load_fact.py
execute
Ceridan/data-engineering-projects
0
python
def execute(self, context): try: self.log.info('Initialing Postgres hook (for Redshift)') redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id) if (self.operator_mode == LoadOperatorMode.Table): sql = table_to_query_map[self.table] self.log.info(f'Loading data from staging to table "{self.table}"') else: sql = self.query self.log.info(f'Loading data using custom query: {self.query}') redshift.run(sql) self.log.info(f'Load operation successfully completed') except psycopg2.Error as e: self.log.error(f'Error occurred during during LOAD operation: {e}') raise
def execute(self, context): try: self.log.info('Initialing Postgres hook (for Redshift)') redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id) if (self.operator_mode == LoadOperatorMode.Table): sql = table_to_query_map[self.table] self.log.info(f'Loading data from staging to table "{self.table}"') else: sql = self.query self.log.info(f'Loading data using custom query: {self.query}') redshift.run(sql) self.log.info(f'Load operation successfully completed') except psycopg2.Error as e: self.log.error(f'Error occurred during during LOAD operation: {e}') raise<|docstring|>Load fact table from staging.<|endoftext|>
af2f6e8779e96224534835b84646b32eee558ddbfdfc4fed0e0d8ed45923a2b5
def smooth_l1(x1, x2, sigma): 'Smooth L1 loss' sigma2 = (sigma ** 2) diff = (x1 - x2) abs_diff = diff.abs() mask = (abs_diff.detach() < (1.0 / sigma2)).float() return (((mask * (sigma2 / 2.0)) * (diff ** 2)) + ((1 - mask) * (abs_diff - (0.5 / sigma2))))
Smooth L1 loss
seamseg/modules/losses.py
smooth_l1
urasakikeisuke/seamseg
282
python
def smooth_l1(x1, x2, sigma): sigma2 = (sigma ** 2) diff = (x1 - x2) abs_diff = diff.abs() mask = (abs_diff.detach() < (1.0 / sigma2)).float() return (((mask * (sigma2 / 2.0)) * (diff ** 2)) + ((1 - mask) * (abs_diff - (0.5 / sigma2))))
def smooth_l1(x1, x2, sigma): sigma2 = (sigma ** 2) diff = (x1 - x2) abs_diff = diff.abs() mask = (abs_diff.detach() < (1.0 / sigma2)).float() return (((mask * (sigma2 / 2.0)) * (diff ** 2)) + ((1 - mask) * (abs_diff - (0.5 / sigma2))))<|docstring|>Smooth L1 loss<|endoftext|>
3cafbea574ffa07ef690963164968898f6e470fff4e311a76647a7195431cb7b
@abc.abstractmethod def write(self, filename: str, file: FileRep): '\n Given\n filename of generated file\n file representation\n Write/Store generated file\n '
Given filename of generated file file representation Write/Store generated file
tools/gen/dump.py
write
mingkaic/tenncor
1
python
@abc.abstractmethod def write(self, filename: str, file: FileRep): '\n Given\n filename of generated file\n file representation\n Write/Store generated file\n '
@abc.abstractmethod def write(self, filename: str, file: FileRep): '\n Given\n filename of generated file\n file representation\n Write/Store generated file\n '<|docstring|>Given filename of generated file file representation Write/Store generated file<|endoftext|>
5a7aa1bc3dfb0a28b72cf026e7945cc81108814e5fae9d850f7f989ef8b963a6
def test_empty_keyword(self): " Returns 4 individuals with the name 'label' " authors = [a for a in scholarly.search_keyword('')] self.assertEqual(len(authors), 4)
Returns 4 individuals with the name 'label'
scholarly/test.py
test_empty_keyword
nobrowning/scholarly
5
python
def test_empty_keyword(self): " " authors = [a for a in scholarly.search_keyword()] self.assertEqual(len(authors), 4)
def test_empty_keyword(self): " " authors = [a for a in scholarly.search_keyword()] self.assertEqual(len(authors), 4)<|docstring|>Returns 4 individuals with the name 'label'<|endoftext|>
92214e15a827927bf5bf5fc028410cedc6522c50dfcb9e032524d9b7d7a376e0
def test_multiple_authors(self): " As of March 14, 2019 there are 34 'Zucker's " authors = [a.name for a in scholarly.search_author('Zucker')] self.assertEqual(len(authors), 58) self.assertIn(u'Steven W Zucker', authors)
As of March 14, 2019 there are 34 'Zucker's
scholarly/test.py
test_multiple_authors
nobrowning/scholarly
5
python
def test_multiple_authors(self): " " authors = [a.name for a in scholarly.search_author('Zucker')] self.assertEqual(len(authors), 58) self.assertIn(u'Steven W Zucker', authors)
def test_multiple_authors(self): " " authors = [a.name for a in scholarly.search_author('Zucker')] self.assertEqual(len(authors), 58) self.assertIn(u'Steven W Zucker', authors)<|docstring|>As of March 14, 2019 there are 34 'Zucker's<|endoftext|>
40d0dc7eec7b4fdfd2c67ca0ce183a6a2bed9ce218e51a550d7ffc1e21884abf
def test_multiple_publications(self): ' As of March 14, 2019 there are 28 pubs that fit the search term' pubs = [p.bib['title'] for p in scholarly.search_pubs_query('"naive physics" stability "3d shape"')] self.assertEqual(len(pubs), 28) self.assertIn(u'Visual perception of the physical stability of asymmetric three-dimensional objects', pubs)
As of March 14, 2019 there are 28 pubs that fit the search term
scholarly/test.py
test_multiple_publications
nobrowning/scholarly
5
python
def test_multiple_publications(self): ' ' pubs = [p.bib['title'] for p in scholarly.search_pubs_query('"naive physics" stability "3d shape"')] self.assertEqual(len(pubs), 28) self.assertIn(u'Visual perception of the physical stability of asymmetric three-dimensional objects', pubs)
def test_multiple_publications(self): ' ' pubs = [p.bib['title'] for p in scholarly.search_pubs_query('"naive physics" stability "3d shape"')] self.assertEqual(len(pubs), 28) self.assertIn(u'Visual perception of the physical stability of asymmetric three-dimensional objects', pubs)<|docstring|>As of March 14, 2019 there are 28 pubs that fit the search term<|endoftext|>
c5544f3cf9618f07e678e398df744977a495b8d49c590b9c7d7bbb136c9fa5fc
def np_wrap_to_pi(angles): 'Wrap angles between [-pi, pi]. Angles right at -pi or pi may flip.' return (((angles + np.pi) % (2 * np.pi)) - np.pi)
Wrap angles between [-pi, pi]. Angles right at -pi or pi may flip.
src/monopsr/core/orientation_encoder.py
np_wrap_to_pi
minghanz/monopsr
104
python
def np_wrap_to_pi(angles): return (((angles + np.pi) % (2 * np.pi)) - np.pi)
def np_wrap_to_pi(angles): return (((angles + np.pi) % (2 * np.pi)) - np.pi)<|docstring|>Wrap angles between [-pi, pi]. Angles right at -pi or pi may flip.<|endoftext|>
3a7ede972682cb6d54304442d5f50de190c4207aa8f129546321f9f6c225ca99
def np_orientation_to_angle_bin(orientation, num_bins, overlap): 'Converts an orientation into an angle bin and residual.\n Example for 8 bins:\n 321\n 4 0\n 567\n Bin centres start at an angle of 0.0.\n\n Args:\n orientation: orientation angle in radians\n num_bins: number of angle bins\n overlap: amount of overlap for the bins in radians\n\n Returns:\n angle_bin: bin index\n residual: residual angle from the bin centre\n one_hot_valid_bins: one hot encoding of the valid bins\n ' two_pi = (2 * np.pi) orientation_wrapped = (orientation % two_pi) angle_per_bin = (two_pi / num_bins) shifted_angle = ((orientation_wrapped + (angle_per_bin / 2)) % two_pi) best_angle_bin = int((shifted_angle / angle_per_bin)) best_residual = (shifted_angle - ((best_angle_bin * angle_per_bin) + (angle_per_bin / 2))) bin_centres = np.asarray([(angle_per_bin * bin_idx) for bin_idx in range(num_bins)]) residuals = np.arctan2(np.sin((orientation_wrapped - bin_centres)), np.cos((orientation_wrapped - bin_centres))) valid_bins = [best_angle_bin] if (overlap != 0.0): bin_centre = (best_angle_bin * angle_per_bin) upper_bound = (bin_centre + (0.5 * angle_per_bin)) lower_bound = (bin_centre - (0.5 * angle_per_bin)) actual_angle = ((best_angle_bin * angle_per_bin) + best_residual) upper_bound_dist = np.abs((upper_bound - actual_angle)) lower_bound_dist = np.abs((lower_bound - actual_angle)) if (upper_bound_dist < overlap): new_valid_bin = (best_angle_bin + 1) if (new_valid_bin == num_bins): new_valid_bin = 0 valid_bins.append(new_valid_bin) elif (lower_bound_dist < overlap): new_valid_bin = (best_angle_bin - 1) if (new_valid_bin < 0): new_valid_bin = (num_bins - 1) valid_bins.append(new_valid_bin) one_hot_valid_bins = np.zeros(num_bins) one_hot_valid_bins[np.asarray(valid_bins)] = 1 return (best_angle_bin, residuals, one_hot_valid_bins)
Converts an orientation into an angle bin and residual. Example for 8 bins: 321 4 0 567 Bin centres start at an angle of 0.0. Args: orientation: orientation angle in radians num_bins: number of angle bins overlap: amount of overlap for the bins in radians Returns: angle_bin: bin index residual: residual angle from the bin centre one_hot_valid_bins: one hot encoding of the valid bins
src/monopsr/core/orientation_encoder.py
np_orientation_to_angle_bin
minghanz/monopsr
104
python
def np_orientation_to_angle_bin(orientation, num_bins, overlap): 'Converts an orientation into an angle bin and residual.\n Example for 8 bins:\n 321\n 4 0\n 567\n Bin centres start at an angle of 0.0.\n\n Args:\n orientation: orientation angle in radians\n num_bins: number of angle bins\n overlap: amount of overlap for the bins in radians\n\n Returns:\n angle_bin: bin index\n residual: residual angle from the bin centre\n one_hot_valid_bins: one hot encoding of the valid bins\n ' two_pi = (2 * np.pi) orientation_wrapped = (orientation % two_pi) angle_per_bin = (two_pi / num_bins) shifted_angle = ((orientation_wrapped + (angle_per_bin / 2)) % two_pi) best_angle_bin = int((shifted_angle / angle_per_bin)) best_residual = (shifted_angle - ((best_angle_bin * angle_per_bin) + (angle_per_bin / 2))) bin_centres = np.asarray([(angle_per_bin * bin_idx) for bin_idx in range(num_bins)]) residuals = np.arctan2(np.sin((orientation_wrapped - bin_centres)), np.cos((orientation_wrapped - bin_centres))) valid_bins = [best_angle_bin] if (overlap != 0.0): bin_centre = (best_angle_bin * angle_per_bin) upper_bound = (bin_centre + (0.5 * angle_per_bin)) lower_bound = (bin_centre - (0.5 * angle_per_bin)) actual_angle = ((best_angle_bin * angle_per_bin) + best_residual) upper_bound_dist = np.abs((upper_bound - actual_angle)) lower_bound_dist = np.abs((lower_bound - actual_angle)) if (upper_bound_dist < overlap): new_valid_bin = (best_angle_bin + 1) if (new_valid_bin == num_bins): new_valid_bin = 0 valid_bins.append(new_valid_bin) elif (lower_bound_dist < overlap): new_valid_bin = (best_angle_bin - 1) if (new_valid_bin < 0): new_valid_bin = (num_bins - 1) valid_bins.append(new_valid_bin) one_hot_valid_bins = np.zeros(num_bins) one_hot_valid_bins[np.asarray(valid_bins)] = 1 return (best_angle_bin, residuals, one_hot_valid_bins)
def np_orientation_to_angle_bin(orientation, num_bins, overlap): 'Converts an orientation into an angle bin and residual.\n Example for 8 bins:\n 321\n 4 0\n 567\n Bin centres start at an angle of 0.0.\n\n Args:\n orientation: orientation angle in radians\n num_bins: number of angle bins\n overlap: amount of overlap for the bins in radians\n\n Returns:\n angle_bin: bin index\n residual: residual angle from the bin centre\n one_hot_valid_bins: one hot encoding of the valid bins\n ' two_pi = (2 * np.pi) orientation_wrapped = (orientation % two_pi) angle_per_bin = (two_pi / num_bins) shifted_angle = ((orientation_wrapped + (angle_per_bin / 2)) % two_pi) best_angle_bin = int((shifted_angle / angle_per_bin)) best_residual = (shifted_angle - ((best_angle_bin * angle_per_bin) + (angle_per_bin / 2))) bin_centres = np.asarray([(angle_per_bin * bin_idx) for bin_idx in range(num_bins)]) residuals = np.arctan2(np.sin((orientation_wrapped - bin_centres)), np.cos((orientation_wrapped - bin_centres))) valid_bins = [best_angle_bin] if (overlap != 0.0): bin_centre = (best_angle_bin * angle_per_bin) upper_bound = (bin_centre + (0.5 * angle_per_bin)) lower_bound = (bin_centre - (0.5 * angle_per_bin)) actual_angle = ((best_angle_bin * angle_per_bin) + best_residual) upper_bound_dist = np.abs((upper_bound - actual_angle)) lower_bound_dist = np.abs((lower_bound - actual_angle)) if (upper_bound_dist < overlap): new_valid_bin = (best_angle_bin + 1) if (new_valid_bin == num_bins): new_valid_bin = 0 valid_bins.append(new_valid_bin) elif (lower_bound_dist < overlap): new_valid_bin = (best_angle_bin - 1) if (new_valid_bin < 0): new_valid_bin = (num_bins - 1) valid_bins.append(new_valid_bin) one_hot_valid_bins = np.zeros(num_bins) one_hot_valid_bins[np.asarray(valid_bins)] = 1 return (best_angle_bin, residuals, one_hot_valid_bins)<|docstring|>Converts an orientation into an angle bin and residual. Example for 8 bins: 321 4 0 567 Bin centres start at an angle of 0.0. Args: orientation: orientation angle in radians num_bins: number of angle bins overlap: amount of overlap for the bins in radians Returns: angle_bin: bin index residual: residual angle from the bin centre one_hot_valid_bins: one hot encoding of the valid bins<|endoftext|>
8a466a90004ad2a80004016c3c8930a7f4372c7d6fe45a84ad2872c2bf3efcfc
def np_angle_bin_to_orientation(angle_bin, residual, num_bins): 'Converts an angle bin and residual into an orientation between [-pi, pi]\n\n Args:\n angle_bin: bin index\n residual: residual angle from bin centre\n num_bins: number of angle bins\n\n Returns:\n angle: orientation angle in radians\n ' two_pi = (2 * np.pi) angle_per_bin = (two_pi / num_bins) angle_center = (angle_bin * angle_per_bin) angle = (angle_center + residual) if (angle < (- np.pi)): angle = (angle + two_pi) if (angle > np.pi): angle = (angle - two_pi) return angle
Converts an angle bin and residual into an orientation between [-pi, pi] Args: angle_bin: bin index residual: residual angle from bin centre num_bins: number of angle bins Returns: angle: orientation angle in radians
src/monopsr/core/orientation_encoder.py
np_angle_bin_to_orientation
minghanz/monopsr
104
python
def np_angle_bin_to_orientation(angle_bin, residual, num_bins): 'Converts an angle bin and residual into an orientation between [-pi, pi]\n\n Args:\n angle_bin: bin index\n residual: residual angle from bin centre\n num_bins: number of angle bins\n\n Returns:\n angle: orientation angle in radians\n ' two_pi = (2 * np.pi) angle_per_bin = (two_pi / num_bins) angle_center = (angle_bin * angle_per_bin) angle = (angle_center + residual) if (angle < (- np.pi)): angle = (angle + two_pi) if (angle > np.pi): angle = (angle - two_pi) return angle
def np_angle_bin_to_orientation(angle_bin, residual, num_bins): 'Converts an angle bin and residual into an orientation between [-pi, pi]\n\n Args:\n angle_bin: bin index\n residual: residual angle from bin centre\n num_bins: number of angle bins\n\n Returns:\n angle: orientation angle in radians\n ' two_pi = (2 * np.pi) angle_per_bin = (two_pi / num_bins) angle_center = (angle_bin * angle_per_bin) angle = (angle_center + residual) if (angle < (- np.pi)): angle = (angle + two_pi) if (angle > np.pi): angle = (angle - two_pi) return angle<|docstring|>Converts an angle bin and residual into an orientation between [-pi, pi] Args: angle_bin: bin index residual: residual angle from bin centre num_bins: number of angle bins Returns: angle: orientation angle in radians<|endoftext|>
2cc8dfaac921edb2fbcec22da756d2f6f31b8122a30e94fdb37af2fd41114d8f
def tf_orientation_to_angle_vector(orientations_tensor): 'Converts orientation angles into angle unit vector representation.\n e.g. 45 -> [0.717, 0.717], 90 -> [0, 1]\n\n Args:\n orientations_tensor: A tensor of shape (N,) of orientation angles\n\n Returns:\n A tensor of shape (N, 2) of angle unit vectors in the format [x, y]\n ' x = tf.cos(orientations_tensor) y = tf.sin(orientations_tensor) return tf.stack([x, y], axis=1)
Converts orientation angles into angle unit vector representation. e.g. 45 -> [0.717, 0.717], 90 -> [0, 1] Args: orientations_tensor: A tensor of shape (N,) of orientation angles Returns: A tensor of shape (N, 2) of angle unit vectors in the format [x, y]
src/monopsr/core/orientation_encoder.py
tf_orientation_to_angle_vector
minghanz/monopsr
104
python
def tf_orientation_to_angle_vector(orientations_tensor): 'Converts orientation angles into angle unit vector representation.\n e.g. 45 -> [0.717, 0.717], 90 -> [0, 1]\n\n Args:\n orientations_tensor: A tensor of shape (N,) of orientation angles\n\n Returns:\n A tensor of shape (N, 2) of angle unit vectors in the format [x, y]\n ' x = tf.cos(orientations_tensor) y = tf.sin(orientations_tensor) return tf.stack([x, y], axis=1)
def tf_orientation_to_angle_vector(orientations_tensor): 'Converts orientation angles into angle unit vector representation.\n e.g. 45 -> [0.717, 0.717], 90 -> [0, 1]\n\n Args:\n orientations_tensor: A tensor of shape (N,) of orientation angles\n\n Returns:\n A tensor of shape (N, 2) of angle unit vectors in the format [x, y]\n ' x = tf.cos(orientations_tensor) y = tf.sin(orientations_tensor) return tf.stack([x, y], axis=1)<|docstring|>Converts orientation angles into angle unit vector representation. e.g. 45 -> [0.717, 0.717], 90 -> [0, 1] Args: orientations_tensor: A tensor of shape (N,) of orientation angles Returns: A tensor of shape (N, 2) of angle unit vectors in the format [x, y]<|endoftext|>
5daeffe47893dfa7cbbacc59b9ca5fc71ed8442ac8d17963341564df7dfc9544
def tf_angle_vector_to_orientation(angle_vectors_tensor): ' Converts angle unit vectors into orientation angle representation.\n e.g. [0.717, 0.717] -> 45, [0, 1] -> 90\n\n Args:\n angle_vectors_tensor: a tensor of shape (N, 2) of angle unit vectors\n in the format [x, y]\n\n Returns:\n A tensor of shape (N,) of orientation angles\n ' x = angle_vectors_tensor[(:, 0)] y = angle_vectors_tensor[(:, 1)] return tf.atan2(y, x)
Converts angle unit vectors into orientation angle representation. e.g. [0.717, 0.717] -> 45, [0, 1] -> 90 Args: angle_vectors_tensor: a tensor of shape (N, 2) of angle unit vectors in the format [x, y] Returns: A tensor of shape (N,) of orientation angles
src/monopsr/core/orientation_encoder.py
tf_angle_vector_to_orientation
minghanz/monopsr
104
python
def tf_angle_vector_to_orientation(angle_vectors_tensor): ' Converts angle unit vectors into orientation angle representation.\n e.g. [0.717, 0.717] -> 45, [0, 1] -> 90\n\n Args:\n angle_vectors_tensor: a tensor of shape (N, 2) of angle unit vectors\n in the format [x, y]\n\n Returns:\n A tensor of shape (N,) of orientation angles\n ' x = angle_vectors_tensor[(:, 0)] y = angle_vectors_tensor[(:, 1)] return tf.atan2(y, x)
def tf_angle_vector_to_orientation(angle_vectors_tensor): ' Converts angle unit vectors into orientation angle representation.\n e.g. [0.717, 0.717] -> 45, [0, 1] -> 90\n\n Args:\n angle_vectors_tensor: a tensor of shape (N, 2) of angle unit vectors\n in the format [x, y]\n\n Returns:\n A tensor of shape (N,) of orientation angles\n ' x = angle_vectors_tensor[(:, 0)] y = angle_vectors_tensor[(:, 1)] return tf.atan2(y, x)<|docstring|>Converts angle unit vectors into orientation angle representation. e.g. [0.717, 0.717] -> 45, [0, 1] -> 90 Args: angle_vectors_tensor: a tensor of shape (N, 2) of angle unit vectors in the format [x, y] Returns: A tensor of shape (N,) of orientation angles<|endoftext|>
4154d8420b243df05c6a980a25a71a805808097f7bbb60527a2b4c50d1d333a8
def __init__(self, range=(0, 180), rgb=True): '\n Args:\n range(list or tuple): range from which the applied hue offset is selected\n (maximum range can be [-90,90] for both uint8 and float32)\n rgb (bool): whether input is RGB or BGR.\n ' super(Hue, self).__init__() rgb = bool(rgb) self._init(locals())
Args: range(list or tuple): range from which the applied hue offset is selected (maximum range can be [-90,90] for both uint8 and float32) rgb (bool): whether input is RGB or BGR.
tensorpack/dataflow/imgaug/imgproc.py
__init__
gopalakrishna-r/tensorpack
4,404
python
def __init__(self, range=(0, 180), rgb=True): '\n Args:\n range(list or tuple): range from which the applied hue offset is selected\n (maximum range can be [-90,90] for both uint8 and float32)\n rgb (bool): whether input is RGB or BGR.\n ' super(Hue, self).__init__() rgb = bool(rgb) self._init(locals())
def __init__(self, range=(0, 180), rgb=True): '\n Args:\n range(list or tuple): range from which the applied hue offset is selected\n (maximum range can be [-90,90] for both uint8 and float32)\n rgb (bool): whether input is RGB or BGR.\n ' super(Hue, self).__init__() rgb = bool(rgb) self._init(locals())<|docstring|>Args: range(list or tuple): range from which the applied hue offset is selected (maximum range can be [-90,90] for both uint8 and float32) rgb (bool): whether input is RGB or BGR.<|endoftext|>
7fab59e0aaaf17514a873f887502e12f71d10322eedd49afc500d837e1f340d5
def __init__(self, delta, clip=True): '\n Args:\n delta (float): Randomly add a value within [-delta,delta]\n clip (bool): clip results to [0,255] even when data type is not uint8.\n ' super(Brightness, self).__init__() assert (delta > 0) self._init(locals())
Args: delta (float): Randomly add a value within [-delta,delta] clip (bool): clip results to [0,255] even when data type is not uint8.
tensorpack/dataflow/imgaug/imgproc.py
__init__
gopalakrishna-r/tensorpack
4,404
python
def __init__(self, delta, clip=True): '\n Args:\n delta (float): Randomly add a value within [-delta,delta]\n clip (bool): clip results to [0,255] even when data type is not uint8.\n ' super(Brightness, self).__init__() assert (delta > 0) self._init(locals())
def __init__(self, delta, clip=True): '\n Args:\n delta (float): Randomly add a value within [-delta,delta]\n clip (bool): clip results to [0,255] even when data type is not uint8.\n ' super(Brightness, self).__init__() assert (delta > 0) self._init(locals())<|docstring|>Args: delta (float): Randomly add a value within [-delta,delta] clip (bool): clip results to [0,255] even when data type is not uint8.<|endoftext|>
8cc3e7345efffb65aa018c15d81652fdfa00fe07ba5528285fc6515030373c03
def __init__(self, range, clip=True): '\n Args:\n range (tuple): Randomly scale the image by a factor in (range[0], range[1])\n clip (bool): clip results to [0,255] even when data type is not uint8.\n ' super(BrightnessScale, self).__init__() self._init(locals())
Args: range (tuple): Randomly scale the image by a factor in (range[0], range[1]) clip (bool): clip results to [0,255] even when data type is not uint8.
tensorpack/dataflow/imgaug/imgproc.py
__init__
gopalakrishna-r/tensorpack
4,404
python
def __init__(self, range, clip=True): '\n Args:\n range (tuple): Randomly scale the image by a factor in (range[0], range[1])\n clip (bool): clip results to [0,255] even when data type is not uint8.\n ' super(BrightnessScale, self).__init__() self._init(locals())
def __init__(self, range, clip=True): '\n Args:\n range (tuple): Randomly scale the image by a factor in (range[0], range[1])\n clip (bool): clip results to [0,255] even when data type is not uint8.\n ' super(BrightnessScale, self).__init__() self._init(locals())<|docstring|>Args: range (tuple): Randomly scale the image by a factor in (range[0], range[1]) clip (bool): clip results to [0,255] even when data type is not uint8.<|endoftext|>
12c20ba092ebf5f418dd65509bb283321efc8fc579ebab567aeb5ab26928179e
def __init__(self, factor_range, rgb=None, clip=True): '\n Args:\n factor_range (list or tuple): an interval to randomly sample the `contrast_factor`.\n rgb (bool or None): if None, use the mean per-channel.\n clip (bool): clip to [0, 255] even when data type is not uint8.\n ' super(Contrast, self).__init__() self._init(locals())
Args: factor_range (list or tuple): an interval to randomly sample the `contrast_factor`. rgb (bool or None): if None, use the mean per-channel. clip (bool): clip to [0, 255] even when data type is not uint8.
tensorpack/dataflow/imgaug/imgproc.py
__init__
gopalakrishna-r/tensorpack
4,404
python
def __init__(self, factor_range, rgb=None, clip=True): '\n Args:\n factor_range (list or tuple): an interval to randomly sample the `contrast_factor`.\n rgb (bool or None): if None, use the mean per-channel.\n clip (bool): clip to [0, 255] even when data type is not uint8.\n ' super(Contrast, self).__init__() self._init(locals())
def __init__(self, factor_range, rgb=None, clip=True): '\n Args:\n factor_range (list or tuple): an interval to randomly sample the `contrast_factor`.\n rgb (bool or None): if None, use the mean per-channel.\n clip (bool): clip to [0, 255] even when data type is not uint8.\n ' super(Contrast, self).__init__() self._init(locals())<|docstring|>Args: factor_range (list or tuple): an interval to randomly sample the `contrast_factor`. rgb (bool or None): if None, use the mean per-channel. clip (bool): clip to [0, 255] even when data type is not uint8.<|endoftext|>
dfc7736cfc0acf2e59a734170977c65269083be59fa24a57c0a879de6167f63c
def __init__(self, all_channel=True): '\n Args:\n all_channel (bool): if True, normalize all channels together. else separately.\n ' self._init(locals())
Args: all_channel (bool): if True, normalize all channels together. else separately.
tensorpack/dataflow/imgaug/imgproc.py
__init__
gopalakrishna-r/tensorpack
4,404
python
def __init__(self, all_channel=True): '\n Args:\n all_channel (bool): if True, normalize all channels together. else separately.\n ' self._init(locals())
def __init__(self, all_channel=True): '\n Args:\n all_channel (bool): if True, normalize all channels together. else separately.\n ' self._init(locals())<|docstring|>Args: all_channel (bool): if True, normalize all channels together. else separately.<|endoftext|>
825c0b7c2475b5959133d8cebc0dec3288cdd761cac6b0cb6ddf50e668434504
def __init__(self, size_range=(0, 3), sigma_range=(0, 0), symmetric=True, max_size=None): "\n Args:\n size_range (tuple[int]): Gaussian window size would be 2 * size +\n 1, where size is randomly sampled from this [low, high) range.\n sigma_range (tuple[float]): min,max of the sigma value. 0 means\n opencv's default.\n symmetric (bool): whether to use the same size & sigma for x and y.\n max_size (int): deprecated\n " super(GaussianBlur, self).__init__() if (not isinstance(size_range, (list, tuple))): size_range = (0, size_range) assert isinstance(sigma_range, (list, tuple)), sigma_range if (max_size is not None): log_deprecated('GaussianBlur(max_size=)', 'Use size_range= instead!', '2020-09-01') size_range = (0, max_size) self._init(locals())
Args: size_range (tuple[int]): Gaussian window size would be 2 * size + 1, where size is randomly sampled from this [low, high) range. sigma_range (tuple[float]): min,max of the sigma value. 0 means opencv's default. symmetric (bool): whether to use the same size & sigma for x and y. max_size (int): deprecated
tensorpack/dataflow/imgaug/imgproc.py
__init__
gopalakrishna-r/tensorpack
4,404
python
def __init__(self, size_range=(0, 3), sigma_range=(0, 0), symmetric=True, max_size=None): "\n Args:\n size_range (tuple[int]): Gaussian window size would be 2 * size +\n 1, where size is randomly sampled from this [low, high) range.\n sigma_range (tuple[float]): min,max of the sigma value. 0 means\n opencv's default.\n symmetric (bool): whether to use the same size & sigma for x and y.\n max_size (int): deprecated\n " super(GaussianBlur, self).__init__() if (not isinstance(size_range, (list, tuple))): size_range = (0, size_range) assert isinstance(sigma_range, (list, tuple)), sigma_range if (max_size is not None): log_deprecated('GaussianBlur(max_size=)', 'Use size_range= instead!', '2020-09-01') size_range = (0, max_size) self._init(locals())
def __init__(self, size_range=(0, 3), sigma_range=(0, 0), symmetric=True, max_size=None): "\n Args:\n size_range (tuple[int]): Gaussian window size would be 2 * size +\n 1, where size is randomly sampled from this [low, high) range.\n sigma_range (tuple[float]): min,max of the sigma value. 0 means\n opencv's default.\n symmetric (bool): whether to use the same size & sigma for x and y.\n max_size (int): deprecated\n " super(GaussianBlur, self).__init__() if (not isinstance(size_range, (list, tuple))): size_range = (0, size_range) assert isinstance(sigma_range, (list, tuple)), sigma_range if (max_size is not None): log_deprecated('GaussianBlur(max_size=)', 'Use size_range= instead!', '2020-09-01') size_range = (0, max_size) self._init(locals())<|docstring|>Args: size_range (tuple[int]): Gaussian window size would be 2 * size + 1, where size is randomly sampled from this [low, high) range. sigma_range (tuple[float]): min,max of the sigma value. 0 means opencv's default. symmetric (bool): whether to use the same size & sigma for x and y. max_size (int): deprecated<|endoftext|>
2fb90bbac9c2ddb530f52ad6c53b15dd23d87dce20bf599c08ba8c7a0af3bc58
def __init__(self, range=((- 0.5), 0.5)): '\n Args:\n range(list or tuple): gamma range\n ' super(Gamma, self).__init__() self._init(locals())
Args: range(list or tuple): gamma range
tensorpack/dataflow/imgaug/imgproc.py
__init__
gopalakrishna-r/tensorpack
4,404
python
def __init__(self, range=((- 0.5), 0.5)): '\n Args:\n range(list or tuple): gamma range\n ' super(Gamma, self).__init__() self._init(locals())
def __init__(self, range=((- 0.5), 0.5)): '\n Args:\n range(list or tuple): gamma range\n ' super(Gamma, self).__init__() self._init(locals())<|docstring|>Args: range(list or tuple): gamma range<|endoftext|>
57292982346341d3118a5d85a3151d41f4292efb6e90f2d5a7abacfa3bdcdb05
def __init__(self, min=0, max=255): '\n Args:\n min, max: the clip range\n ' self._init(locals())
Args: min, max: the clip range
tensorpack/dataflow/imgaug/imgproc.py
__init__
gopalakrishna-r/tensorpack
4,404
python
def __init__(self, min=0, max=255): '\n Args:\n min, max: the clip range\n ' self._init(locals())
def __init__(self, min=0, max=255): '\n Args:\n min, max: the clip range\n ' self._init(locals())<|docstring|>Args: min, max: the clip range<|endoftext|>
81759df06421e2bb311c91e588c8d05a93843dcafc213629245479b777537ec2
def __init__(self, alpha=0.4, rgb=True, clip=True): '\n Args:\n alpha(float): maximum saturation change.\n rgb (bool): whether input is RGB or BGR.\n clip (bool): clip results to [0,255] even when data type is not uint8.\n ' super().__init__() rgb = bool(rgb) assert (alpha < 1) self._init(locals())
Args: alpha(float): maximum saturation change. rgb (bool): whether input is RGB or BGR. clip (bool): clip results to [0,255] even when data type is not uint8.
tensorpack/dataflow/imgaug/imgproc.py
__init__
gopalakrishna-r/tensorpack
4,404
python
def __init__(self, alpha=0.4, rgb=True, clip=True): '\n Args:\n alpha(float): maximum saturation change.\n rgb (bool): whether input is RGB or BGR.\n clip (bool): clip results to [0,255] even when data type is not uint8.\n ' super().__init__() rgb = bool(rgb) assert (alpha < 1) self._init(locals())
def __init__(self, alpha=0.4, rgb=True, clip=True): '\n Args:\n alpha(float): maximum saturation change.\n rgb (bool): whether input is RGB or BGR.\n clip (bool): clip results to [0,255] even when data type is not uint8.\n ' super().__init__() rgb = bool(rgb) assert (alpha < 1) self._init(locals())<|docstring|>Args: alpha(float): maximum saturation change. rgb (bool): whether input is RGB or BGR. clip (bool): clip results to [0,255] even when data type is not uint8.<|endoftext|>
c53e9464bbc86854e0da61e077e54b0e1d77ef2ee11ec437b5845abda0ee103a
def __init__(self, std, eigval, eigvec, clip=True): '\n Args:\n std (float): maximum standard deviation\n eigval: a vector of (3,). The eigenvalues of 3 channels.\n eigvec: a 3x3 matrix. Each column is one eigen vector.\n clip (bool): clip results to [0,255] even when data type is not uint8.\n ' super(Lighting, self).__init__() eigval = np.asarray(eigval, dtype='float32') eigvec = np.asarray(eigvec, dtype='float32') assert (eigval.shape == (3,)) assert (eigvec.shape == (3, 3)) self._init(locals())
Args: std (float): maximum standard deviation eigval: a vector of (3,). The eigenvalues of 3 channels. eigvec: a 3x3 matrix. Each column is one eigen vector. clip (bool): clip results to [0,255] even when data type is not uint8.
tensorpack/dataflow/imgaug/imgproc.py
__init__
gopalakrishna-r/tensorpack
4,404
python
def __init__(self, std, eigval, eigvec, clip=True): '\n Args:\n std (float): maximum standard deviation\n eigval: a vector of (3,). The eigenvalues of 3 channels.\n eigvec: a 3x3 matrix. Each column is one eigen vector.\n clip (bool): clip results to [0,255] even when data type is not uint8.\n ' super(Lighting, self).__init__() eigval = np.asarray(eigval, dtype='float32') eigvec = np.asarray(eigvec, dtype='float32') assert (eigval.shape == (3,)) assert (eigvec.shape == (3, 3)) self._init(locals())
def __init__(self, std, eigval, eigvec, clip=True): '\n Args:\n std (float): maximum standard deviation\n eigval: a vector of (3,). The eigenvalues of 3 channels.\n eigvec: a 3x3 matrix. Each column is one eigen vector.\n clip (bool): clip results to [0,255] even when data type is not uint8.\n ' super(Lighting, self).__init__() eigval = np.asarray(eigval, dtype='float32') eigvec = np.asarray(eigvec, dtype='float32') assert (eigval.shape == (3,)) assert (eigvec.shape == (3, 3)) self._init(locals())<|docstring|>Args: std (float): maximum standard deviation eigval: a vector of (3,). The eigenvalues of 3 channels. eigvec: a 3x3 matrix. Each column is one eigen vector. clip (bool): clip results to [0,255] even when data type is not uint8.<|endoftext|>
0df937370edd0ecbe4b56466297548812267193541360797129eedb11ff4f4f9
def __init__(self, min=0, max=255, all_channel=True): '\n Args:\n max (float): The new maximum value\n min (float): The new minimum value\n all_channel (bool): if True, normalize all channels together. else separately.\n ' self._init(locals())
Args: max (float): The new maximum value min (float): The new minimum value all_channel (bool): if True, normalize all channels together. else separately.
tensorpack/dataflow/imgaug/imgproc.py
__init__
gopalakrishna-r/tensorpack
4,404
python
def __init__(self, min=0, max=255, all_channel=True): '\n Args:\n max (float): The new maximum value\n min (float): The new minimum value\n all_channel (bool): if True, normalize all channels together. else separately.\n ' self._init(locals())
def __init__(self, min=0, max=255, all_channel=True): '\n Args:\n max (float): The new maximum value\n min (float): The new minimum value\n all_channel (bool): if True, normalize all channels together. else separately.\n ' self._init(locals())<|docstring|>Args: max (float): The new maximum value min (float): The new minimum value all_channel (bool): if True, normalize all channels together. else separately.<|endoftext|>
bd54a39cd50ba56140002b4a23a12297be9f294193bc04417f9b98e717759e26
@click.group() @click.option('-l', '--log-level', type=LogLevel(), default=logging.WARNING) def cmd(log_level): 'Classification tools for Spack Errors\n\n Provides tools to get job log traces and classify them based on a taxonomy\n of errors. Logs are grepped for strings and matching error class columns are\n set to true in a resulting CSV. multiple classes may match a job log and so\n errors may be \'deconflicted\' based on a pirorty list.\n\n Error CSVs must be in the proper format. These can be exported from the\n following Metabase Analytic:\n\n https://metabase.spack.io/question/16-job-errors-api-ink\n\n Example Usage:\n\n \x08\n # Download the Error CSV from Metabase. You must specify\n # how far back you would like to look for errors (e.g. "7 DAYS")\n \x08\n # Assume you have downloaded the CSV to 20211227-7days.csv\n $> error-classification.py -l INFO get-logs -t [GITLAB_API_TOKEN] 20211227-7days.csv\n \x08\n # Note: logs are downloaded into error_logs/ directory by default\n $> error-classification.py -l INFO classify 20211227-7days.csv\n \x08\n # Note: Annotated CSV is saved to 20211227-7days_annotated.csv\n $> error-classification.py stats 20211227-7days_annotated.csv\n ...\n\n ' logging.basicConfig(level=log_level)
Classification tools for Spack Errors Provides tools to get job log traces and classify them based on a taxonomy of errors. Logs are grepped for strings and matching error class columns are set to true in a resulting CSV. multiple classes may match a job log and so errors may be 'deconflicted' based on a pirorty list. Error CSVs must be in the proper format. These can be exported from the following Metabase Analytic: https://metabase.spack.io/question/16-job-errors-api-ink Example Usage:  # Download the Error CSV from Metabase. You must specify # how far back you would like to look for errors (e.g. "7 DAYS")  # Assume you have downloaded the CSV to 20211227-7days.csv $> error-classification.py -l INFO get-logs -t [GITLAB_API_TOKEN] 20211227-7days.csv  # Note: logs are downloaded into error_logs/ directory by default $> error-classification.py -l INFO classify 20211227-7days.csv  # Note: Annotated CSV is saved to 20211227-7days_annotated.csv $> error-classification.py stats 20211227-7days_annotated.csv ...
scripts/error-classification.py
cmd
spack/testing-sandbox
0
python
@click.group() @click.option('-l', '--log-level', type=LogLevel(), default=logging.WARNING) def cmd(log_level): 'Classification tools for Spack Errors\n\n Provides tools to get job log traces and classify them based on a taxonomy\n of errors. Logs are grepped for strings and matching error class columns are\n set to true in a resulting CSV. multiple classes may match a job log and so\n errors may be \'deconflicted\' based on a pirorty list.\n\n Error CSVs must be in the proper format. These can be exported from the\n following Metabase Analytic:\n\n https://metabase.spack.io/question/16-job-errors-api-ink\n\n Example Usage:\n\n \x08\n # Download the Error CSV from Metabase. You must specify\n # how far back you would like to look for errors (e.g. "7 DAYS")\n \x08\n # Assume you have downloaded the CSV to 20211227-7days.csv\n $> error-classification.py -l INFO get-logs -t [GITLAB_API_TOKEN] 20211227-7days.csv\n \x08\n # Note: logs are downloaded into error_logs/ directory by default\n $> error-classification.py -l INFO classify 20211227-7days.csv\n \x08\n # Note: Annotated CSV is saved to 20211227-7days_annotated.csv\n $> error-classification.py stats 20211227-7days_annotated.csv\n ...\n\n ' logging.basicConfig(level=log_level)
@click.group() @click.option('-l', '--log-level', type=LogLevel(), default=logging.WARNING) def cmd(log_level): 'Classification tools for Spack Errors\n\n Provides tools to get job log traces and classify them based on a taxonomy\n of errors. Logs are grepped for strings and matching error class columns are\n set to true in a resulting CSV. multiple classes may match a job log and so\n errors may be \'deconflicted\' based on a pirorty list.\n\n Error CSVs must be in the proper format. These can be exported from the\n following Metabase Analytic:\n\n https://metabase.spack.io/question/16-job-errors-api-ink\n\n Example Usage:\n\n \x08\n # Download the Error CSV from Metabase. You must specify\n # how far back you would like to look for errors (e.g. "7 DAYS")\n \x08\n # Assume you have downloaded the CSV to 20211227-7days.csv\n $> error-classification.py -l INFO get-logs -t [GITLAB_API_TOKEN] 20211227-7days.csv\n \x08\n # Note: logs are downloaded into error_logs/ directory by default\n $> error-classification.py -l INFO classify 20211227-7days.csv\n \x08\n # Note: Annotated CSV is saved to 20211227-7days_annotated.csv\n $> error-classification.py stats 20211227-7days_annotated.csv\n ...\n\n ' logging.basicConfig(level=log_level)<|docstring|>Classification tools for Spack Errors Provides tools to get job log traces and classify them based on a taxonomy of errors. Logs are grepped for strings and matching error class columns are set to true in a resulting CSV. multiple classes may match a job log and so errors may be 'deconflicted' based on a pirorty list. Error CSVs must be in the proper format. These can be exported from the following Metabase Analytic: https://metabase.spack.io/question/16-job-errors-api-ink Example Usage:  # Download the Error CSV from Metabase. You must specify # how far back you would like to look for errors (e.g. "7 DAYS")  # Assume you have downloaded the CSV to 20211227-7days.csv $> error-classification.py -l INFO get-logs -t [GITLAB_API_TOKEN] 20211227-7days.csv  # Note: logs are downloaded into error_logs/ directory by default $> error-classification.py -l INFO classify 20211227-7days.csv  # Note: Annotated CSV is saved to 20211227-7days_annotated.csv $> error-classification.py stats 20211227-7days_annotated.csv ...<|endoftext|>
fd9c6e5a69a4c2d3851aeaa520c56a5c9a5d374634dfb85ef0215baf89fe7c80
@cmd.command() @click.option('-o', '--output', default='error_logs', type=click.Path(file_okay=False), help='Output directory for error logs.') @click.option('-t', '--token', required=True, default=(lambda : os.environ.get('API_TOKEN')), help='Spack GitLab API Token (or API_TOKEN environment variable)') @click.option('-c', '--cache', default='error_log', help='Requests cache file name') @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) def get_logs(error_csv, output, token, cache): 'Scrape Logs from Gitlab into a local directory.\n\n ' os.makedirs(output, exist_ok=True) scraper = JobLogScraper(token, session_name=cache, out_dir=output) scraper.process_csv(error_csv)
Scrape Logs from Gitlab into a local directory.
scripts/error-classification.py
get_logs
spack/testing-sandbox
0
python
@cmd.command() @click.option('-o', '--output', default='error_logs', type=click.Path(file_okay=False), help='Output directory for error logs.') @click.option('-t', '--token', required=True, default=(lambda : os.environ.get('API_TOKEN')), help='Spack GitLab API Token (or API_TOKEN environment variable)') @click.option('-c', '--cache', default='error_log', help='Requests cache file name') @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) def get_logs(error_csv, output, token, cache): '\n\n ' os.makedirs(output, exist_ok=True) scraper = JobLogScraper(token, session_name=cache, out_dir=output) scraper.process_csv(error_csv)
@cmd.command() @click.option('-o', '--output', default='error_logs', type=click.Path(file_okay=False), help='Output directory for error logs.') @click.option('-t', '--token', required=True, default=(lambda : os.environ.get('API_TOKEN')), help='Spack GitLab API Token (or API_TOKEN environment variable)') @click.option('-c', '--cache', default='error_log', help='Requests cache file name') @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) def get_logs(error_csv, output, token, cache): '\n\n ' os.makedirs(output, exist_ok=True) scraper = JobLogScraper(token, session_name=cache, out_dir=output) scraper.process_csv(error_csv)<|docstring|>Scrape Logs from Gitlab into a local directory.<|endoftext|>
9666558ee2f32cdc5bebd2b1212560cab0274341c9cba4355c761b6ef745b128
@cmd.command() @click.option('-i', '--input-dir', default='error_logs', type=click.Path(exists=True, file_okay=False), help='Directory containing job logs') @click.option('--deconflict/--no-deconflict', default=True, help='Boolean to deconflict the classified rrors') @click.option('-o', '--output', default=None, help='Save annotated CSV to this file name (default [ERROR_CSV]_annotated.csv)') @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) def classify(error_csv, input_dir, deconflict, output): 'Classify errors in the CSV based on the taxonomy.\n\n ' if (output is None): path = Path(error_csv.file_name) output = os.path.join(path.parents[0], f'{path.stem}_annotated.csv') classifier = ErrorClassifier(error_csv.file_name, log_dir=input_dir) classifier.classify() logging.info(f'Error overlap:{os.linesep}{classifier.correlations()}') if deconflict: classifier.deconflict() logging.info(f'Post-deconflict error overlap:{os.linesep}{classifier.correlations()}') logging.info(f'Saving to {output}') classifier.df.to_csv(output)
Classify errors in the CSV based on the taxonomy.
scripts/error-classification.py
classify
spack/testing-sandbox
0
python
@cmd.command() @click.option('-i', '--input-dir', default='error_logs', type=click.Path(exists=True, file_okay=False), help='Directory containing job logs') @click.option('--deconflict/--no-deconflict', default=True, help='Boolean to deconflict the classified rrors') @click.option('-o', '--output', default=None, help='Save annotated CSV to this file name (default [ERROR_CSV]_annotated.csv)') @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) def classify(error_csv, input_dir, deconflict, output): '\n\n ' if (output is None): path = Path(error_csv.file_name) output = os.path.join(path.parents[0], f'{path.stem}_annotated.csv') classifier = ErrorClassifier(error_csv.file_name, log_dir=input_dir) classifier.classify() logging.info(f'Error overlap:{os.linesep}{classifier.correlations()}') if deconflict: classifier.deconflict() logging.info(f'Post-deconflict error overlap:{os.linesep}{classifier.correlations()}') logging.info(f'Saving to {output}') classifier.df.to_csv(output)
@cmd.command() @click.option('-i', '--input-dir', default='error_logs', type=click.Path(exists=True, file_okay=False), help='Directory containing job logs') @click.option('--deconflict/--no-deconflict', default=True, help='Boolean to deconflict the classified rrors') @click.option('-o', '--output', default=None, help='Save annotated CSV to this file name (default [ERROR_CSV]_annotated.csv)') @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) def classify(error_csv, input_dir, deconflict, output): '\n\n ' if (output is None): path = Path(error_csv.file_name) output = os.path.join(path.parents[0], f'{path.stem}_annotated.csv') classifier = ErrorClassifier(error_csv.file_name, log_dir=input_dir) classifier.classify() logging.info(f'Error overlap:{os.linesep}{classifier.correlations()}') if deconflict: classifier.deconflict() logging.info(f'Post-deconflict error overlap:{os.linesep}{classifier.correlations()}') logging.info(f'Saving to {output}') classifier.df.to_csv(output)<|docstring|>Classify errors in the CSV based on the taxonomy.<|endoftext|>
f74c358e8912a4b9dca0a760ee5269192667ec5e44d2e7ce12dd6c777f6f66dc
@cmd.command() @click.option('-i', '--input-dir', default='error_logs', type=click.Path(exists=True, file_okay=False), help='Directory containing job logs') @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) @click.argument('error_class') def random_log(error_csv, error_class, input_dir): 'Print a random log from the given error_class.\n\n ' classifier = ErrorClassifier(error_csv.file_name, log_dir=input_dir) try: (idx, path) = classifier.random_log(error_class) except RuntimeError as e: logging.error(str(e)) sys.exit(1) with open(path, 'r') as fh: click.echo(fh.read()) logging.info(f'Finished printing {path}') logging.info(f"See: {classifier.df.loc[idx]['job_link']}")
Print a random log from the given error_class.
scripts/error-classification.py
random_log
spack/testing-sandbox
0
python
@cmd.command() @click.option('-i', '--input-dir', default='error_logs', type=click.Path(exists=True, file_okay=False), help='Directory containing job logs') @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) @click.argument('error_class') def random_log(error_csv, error_class, input_dir): '\n\n ' classifier = ErrorClassifier(error_csv.file_name, log_dir=input_dir) try: (idx, path) = classifier.random_log(error_class) except RuntimeError as e: logging.error(str(e)) sys.exit(1) with open(path, 'r') as fh: click.echo(fh.read()) logging.info(f'Finished printing {path}') logging.info(f"See: {classifier.df.loc[idx]['job_link']}")
@cmd.command() @click.option('-i', '--input-dir', default='error_logs', type=click.Path(exists=True, file_okay=False), help='Directory containing job logs') @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) @click.argument('error_class') def random_log(error_csv, error_class, input_dir): '\n\n ' classifier = ErrorClassifier(error_csv.file_name, log_dir=input_dir) try: (idx, path) = classifier.random_log(error_class) except RuntimeError as e: logging.error(str(e)) sys.exit(1) with open(path, 'r') as fh: click.echo(fh.read()) logging.info(f'Finished printing {path}') logging.info(f"See: {classifier.df.loc[idx]['job_link']}")<|docstring|>Print a random log from the given error_class.<|endoftext|>
412fefc6019546fef5adcf08f553daf1de9c8dbf59ee1fb5721dfb63c8210609
@cmd.command() @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) def overlap(error_csv): 'Print correlation statsitics from an annotated Error CSV.\n\n ' classifier = ErrorClassifier(error_csv.file_name, log_dir=None) try: click.echo(classifier.correlations()) except RuntimeError as e: logging.error(str(e)) sys.exit(1)
Print correlation statsitics from an annotated Error CSV.
scripts/error-classification.py
overlap
spack/testing-sandbox
0
python
@cmd.command() @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) def overlap(error_csv): '\n\n ' classifier = ErrorClassifier(error_csv.file_name, log_dir=None) try: click.echo(classifier.correlations()) except RuntimeError as e: logging.error(str(e)) sys.exit(1)
@cmd.command() @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) def overlap(error_csv): '\n\n ' classifier = ErrorClassifier(error_csv.file_name, log_dir=None) try: click.echo(classifier.correlations()) except RuntimeError as e: logging.error(str(e)) sys.exit(1)<|docstring|>Print correlation statsitics from an annotated Error CSV.<|endoftext|>
ddabf5421a427a798b016a41e166224cb009b0709e45a1d54e1db5accf642c38
@cmd.command() @click.option('-o', '--output', default=None, help='Save annotated CSV to this file name (default [ERROR_CSV] - destructive!)') @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) def deconflict(error_csv, output): 'Deconflict an annotated error CSV.\n\n ' if (output is None): output = error_csv.file_name classifier = ErrorClassifier(error_csv.file_name, log_dir=None) try: classifier.deconflict() except RuntimeError as e: logging.error(str(e)) sys.exit(1) logging.info(f'Saving to {output}') classifier.df.to_csv(output)
Deconflict an annotated error CSV.
scripts/error-classification.py
deconflict
spack/testing-sandbox
0
python
@cmd.command() @click.option('-o', '--output', default=None, help='Save annotated CSV to this file name (default [ERROR_CSV] - destructive!)') @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) def deconflict(error_csv, output): '\n\n ' if (output is None): output = error_csv.file_name classifier = ErrorClassifier(error_csv.file_name, log_dir=None) try: classifier.deconflict() except RuntimeError as e: logging.error(str(e)) sys.exit(1) logging.info(f'Saving to {output}') classifier.df.to_csv(output)
@cmd.command() @click.option('-o', '--output', default=None, help='Save annotated CSV to this file name (default [ERROR_CSV] - destructive!)') @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) def deconflict(error_csv, output): '\n\n ' if (output is None): output = error_csv.file_name classifier = ErrorClassifier(error_csv.file_name, log_dir=None) try: classifier.deconflict() except RuntimeError as e: logging.error(str(e)) sys.exit(1) logging.info(f'Saving to {output}') classifier.df.to_csv(output)<|docstring|>Deconflict an annotated error CSV.<|endoftext|>
56201a288b157e77829c58a9f7adb9ed916fac30c7d37288f104080592fe7b9e
@cmd.command() @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) def stats(error_csv): 'Print error counts and percentages.\n\n ' classifier = ErrorClassifier(error_csv.file_name, log_dir=None) try: click.echo(classifier.stats()) except RuntimeError as e: logging.error(str(e)) sys.exit(1)
Print error counts and percentages.
scripts/error-classification.py
stats
spack/testing-sandbox
0
python
@cmd.command() @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) def stats(error_csv): '\n\n ' classifier = ErrorClassifier(error_csv.file_name, log_dir=None) try: click.echo(classifier.stats()) except RuntimeError as e: logging.error(str(e)) sys.exit(1)
@cmd.command() @click.argument('error_csv', type=ErrorLogCSVType(mode='r')) def stats(error_csv): '\n\n ' classifier = ErrorClassifier(error_csv.file_name, log_dir=None) try: click.echo(classifier.stats()) except RuntimeError as e: logging.error(str(e)) sys.exit(1)<|docstring|>Print error counts and percentages.<|endoftext|>
8729b7bf85d9d23399985e3c6e60ab40255b0fc6ebb2e78dbaaf314c4487540d
def _verify_df(self): "Verify we have pulled logs for the Dataframe.\n\n Checks to make sure the files in self.log_dir are consistent with the\n job id's in the CSV file this Dataframe represents.\n\n " if (self.log_dir is not None): log_files = set([int(Path(s).stem) for s in glob.glob(f'{self.log_dir}/*.log')]) idx = set(self.df.index) def _log_file(id): return f' {self.log_dir}/{id}.log' if (log_files - idx): raise RuntimeError(f'Log files present which are not in CSV: {os.linesep}{os.linesep.join([_log_file(s) for s in (log_files - idx)])}') if (idx - log_files): raise RuntimeError(f'Errors in CSV without job logs (the following are missing): {os.linesep}{os.linesep.join([_log_file(s) for s in (idx - log_files)])}{os.linesep}Try running "get-logs" on {self.csv_path}')
Verify we have pulled logs for the Dataframe. Checks to make sure the files in self.log_dir are consistent with the job id's in the CSV file this Dataframe represents.
scripts/error-classification.py
_verify_df
spack/testing-sandbox
0
python
def _verify_df(self): "Verify we have pulled logs for the Dataframe.\n\n Checks to make sure the files in self.log_dir are consistent with the\n job id's in the CSV file this Dataframe represents.\n\n " if (self.log_dir is not None): log_files = set([int(Path(s).stem) for s in glob.glob(f'{self.log_dir}/*.log')]) idx = set(self.df.index) def _log_file(id): return f' {self.log_dir}/{id}.log' if (log_files - idx): raise RuntimeError(f'Log files present which are not in CSV: {os.linesep}{os.linesep.join([_log_file(s) for s in (log_files - idx)])}') if (idx - log_files): raise RuntimeError(f'Errors in CSV without job logs (the following are missing): {os.linesep}{os.linesep.join([_log_file(s) for s in (idx - log_files)])}{os.linesep}Try running "get-logs" on {self.csv_path}')
def _verify_df(self): "Verify we have pulled logs for the Dataframe.\n\n Checks to make sure the files in self.log_dir are consistent with the\n job id's in the CSV file this Dataframe represents.\n\n " if (self.log_dir is not None): log_files = set([int(Path(s).stem) for s in glob.glob(f'{self.log_dir}/*.log')]) idx = set(self.df.index) def _log_file(id): return f' {self.log_dir}/{id}.log' if (log_files - idx): raise RuntimeError(f'Log files present which are not in CSV: {os.linesep}{os.linesep.join([_log_file(s) for s in (log_files - idx)])}') if (idx - log_files): raise RuntimeError(f'Errors in CSV without job logs (the following are missing): {os.linesep}{os.linesep.join([_log_file(s) for s in (idx - log_files)])}{os.linesep}Try running "get-logs" on {self.csv_path}')<|docstring|>Verify we have pulled logs for the Dataframe. Checks to make sure the files in self.log_dir are consistent with the job id's in the CSV file this Dataframe represents.<|endoftext|>
f1af9ac444ec64905747643c16b7f9fbe8caf6ed4e7cb1f79eaadee8d13148d5
def _kind(self, r): "Classfies the runner type.\n\n Used to generate the 'kind' column for the CSV.\n\n " if pd.isnull(r): return 'None' elif r.startswith('uo'): return 'UO' else: return 'AWS'
Classfies the runner type. Used to generate the 'kind' column for the CSV.
scripts/error-classification.py
_kind
spack/testing-sandbox
0
python
def _kind(self, r): "Classfies the runner type.\n\n Used to generate the 'kind' column for the CSV.\n\n " if pd.isnull(r): return 'None' elif r.startswith('uo'): return 'UO' else: return 'AWS'
def _kind(self, r): "Classfies the runner type.\n\n Used to generate the 'kind' column for the CSV.\n\n " if pd.isnull(r): return 'None' elif r.startswith('uo'): return 'UO' else: return 'AWS'<|docstring|>Classfies the runner type. Used to generate the 'kind' column for the CSV.<|endoftext|>
017e47b799ce26e2ddc53bbd64aa18b1915c8da2479d2b0a818638e72c76e63a
def _grep_for_ids(self, match_string): 'Subprocess out to grep. Return job ids that match match_string.' _match_group = '1' output = subprocess.getoutput(f'grep -l "{match_string}" {self.log_dir}/*.log | sed -e "s|^.*/\(.*\).log|\{_match_group}|"') return ([int(s) for s in output.split('\n')] if output else [])
Subprocess out to grep. Return job ids that match match_string.
scripts/error-classification.py
_grep_for_ids
spack/testing-sandbox
0
python
def _grep_for_ids(self, match_string): _match_group = '1' output = subprocess.getoutput(f'grep -l "{match_string}" {self.log_dir}/*.log | sed -e "s|^.*/\(.*\).log|\{_match_group}|"') return ([int(s) for s in output.split('\n')] if output else [])
def _grep_for_ids(self, match_string): _match_group = '1' output = subprocess.getoutput(f'grep -l "{match_string}" {self.log_dir}/*.log | sed -e "s|^.*/\(.*\).log|\{_match_group}|"') return ([int(s) for s in output.split('\n')] if output else [])<|docstring|>Subprocess out to grep. Return job ids that match match_string.<|endoftext|>
78cb318d2461df8bea5cf1882e539c0a148e121d44bf07f196c30be9d600dc98
def _other_errors(self, df): "Classify all ids that do not have at least one other error as\n 'other_erorrs'\n\n " target_columns = list((set(self.error_columns) - set(['other_errors']))) return df[target_columns].apply((lambda row: (not any(list(row)))), axis=1)
Classify all ids that do not have at least one other error as 'other_erorrs'
scripts/error-classification.py
_other_errors
spack/testing-sandbox
0
python
def _other_errors(self, df): "Classify all ids that do not have at least one other error as\n 'other_erorrs'\n\n " target_columns = list((set(self.error_columns) - set(['other_errors']))) return df[target_columns].apply((lambda row: (not any(list(row)))), axis=1)
def _other_errors(self, df): "Classify all ids that do not have at least one other error as\n 'other_erorrs'\n\n " target_columns = list((set(self.error_columns) - set(['other_errors']))) return df[target_columns].apply((lambda row: (not any(list(row)))), axis=1)<|docstring|>Classify all ids that do not have at least one other error as 'other_erorrs'<|endoftext|>
88d359c7d0ccc1786c0a5e12d97a4e3352e270a8612d77d56216a42ad59f8438
def is_annotated(self): 'Return True if Dataframe has columns from taxonomy.\n\n ' if (set(self.taxonomy.keys()) <= set(self.df.columns)): return True return False
Return True if Dataframe has columns from taxonomy.
scripts/error-classification.py
is_annotated
spack/testing-sandbox
0
python
def is_annotated(self): '\n\n ' if (set(self.taxonomy.keys()) <= set(self.df.columns)): return True return False
def is_annotated(self): '\n\n ' if (set(self.taxonomy.keys()) <= set(self.df.columns)): return True return False<|docstring|>Return True if Dataframe has columns from taxonomy.<|endoftext|>
2cb1e18a2b4743c894b474188c1e093b6b81cc88c6a38926ed237067019932b7
def is_deconflicted(self): 'Return True if error columns have been deconflicted.\n\n ' return (not (self.df[self.error_columns].apply((lambda r: len([_ for _ in r if (_ is True)])), axis=1) > 1).any())
Return True if error columns have been deconflicted.
scripts/error-classification.py
is_deconflicted
spack/testing-sandbox
0
python
def is_deconflicted(self): '\n\n ' return (not (self.df[self.error_columns].apply((lambda r: len([_ for _ in r if (_ is True)])), axis=1) > 1).any())
def is_deconflicted(self): '\n\n ' return (not (self.df[self.error_columns].apply((lambda r: len([_ for _ in r if (_ is True)])), axis=1) > 1).any())<|docstring|>Return True if error columns have been deconflicted.<|endoftext|>
9eeaf9161c741c08319d71c771da9767ce7ed6186fdd7546f11562861b1b36d3
def init_dataframe(self, csv_path, log_dir): "Initialize the Dataframe.\n\n Verifies logs exist for each job id, converts created_at to datetime and\n set the 'kind' column for each type of runner.\n\n " self.log_dir = log_dir self.csv_path = csv_path self.df = pd.read_csv(csv_path, index_col='id', infer_datetime_format=True) self._verify_df() self.df['created_at'] = pd.to_datetime(self.df['created_at']) self.df['kind'] = self.df['runner'].apply(self._kind)
Initialize the Dataframe. Verifies logs exist for each job id, converts created_at to datetime and set the 'kind' column for each type of runner.
scripts/error-classification.py
init_dataframe
spack/testing-sandbox
0
python
def init_dataframe(self, csv_path, log_dir): "Initialize the Dataframe.\n\n Verifies logs exist for each job id, converts created_at to datetime and\n set the 'kind' column for each type of runner.\n\n " self.log_dir = log_dir self.csv_path = csv_path self.df = pd.read_csv(csv_path, index_col='id', infer_datetime_format=True) self._verify_df() self.df['created_at'] = pd.to_datetime(self.df['created_at']) self.df['kind'] = self.df['runner'].apply(self._kind)
def init_dataframe(self, csv_path, log_dir): "Initialize the Dataframe.\n\n Verifies logs exist for each job id, converts created_at to datetime and\n set the 'kind' column for each type of runner.\n\n " self.log_dir = log_dir self.csv_path = csv_path self.df = pd.read_csv(csv_path, index_col='id', infer_datetime_format=True) self._verify_df() self.df['created_at'] = pd.to_datetime(self.df['created_at']) self.df['kind'] = self.df['runner'].apply(self._kind)<|docstring|>Initialize the Dataframe. Verifies logs exist for each job id, converts created_at to datetime and set the 'kind' column for each type of runner.<|endoftext|>
5c4b4426cf6047c36fbc812cbfe2dad27d25a83a8ebe83f4d320fa0ea06a670d
def classify(self): 'Classify all the errors based on job logs.\n\n ' for (col, expr) in self.taxonomy.items(): if callable(expr): self.df[col] = expr(self.df) else: if isinstance(expr, str): expr = [expr] self.df[col] = False for s in expr: ids = self._grep_for_ids(s) if bool(ids): self.df.at[(ids, col)] = True try: counts = self.df[col].value_counts().loc[True] except KeyError: counts = 0 logging.info(f'Processed {col} ({counts})')
Classify all the errors based on job logs.
scripts/error-classification.py
classify
spack/testing-sandbox
0
python
def classify(self): '\n\n ' for (col, expr) in self.taxonomy.items(): if callable(expr): self.df[col] = expr(self.df) else: if isinstance(expr, str): expr = [expr] self.df[col] = False for s in expr: ids = self._grep_for_ids(s) if bool(ids): self.df.at[(ids, col)] = True try: counts = self.df[col].value_counts().loc[True] except KeyError: counts = 0 logging.info(f'Processed {col} ({counts})')
def classify(self): '\n\n ' for (col, expr) in self.taxonomy.items(): if callable(expr): self.df[col] = expr(self.df) else: if isinstance(expr, str): expr = [expr] self.df[col] = False for s in expr: ids = self._grep_for_ids(s) if bool(ids): self.df.at[(ids, col)] = True try: counts = self.df[col].value_counts().loc[True] except KeyError: counts = 0 logging.info(f'Processed {col} ({counts})')<|docstring|>Classify all the errors based on job logs.<|endoftext|>
9f33e0ca84aec6d2b6e9170e87bde03b1782f4ef69033a1c16d5b8ea92cfdf67
def correlations(self): 'Return a dataframe with statistics on correlations between error classes.\n\n ' if (not self.is_annotated()): raise RuntimeError('Dataframe does not contain error annotations!') def _overlap(columns): for (a, b) in itertools.combinations(columns, 2): numerator = len(self.df[((self.df[a] == True) & (self.df[b] == True))]) denominator = len(self.df[((self.df[a] == True) | (self.df[b] == True))]) if ((a != b) and (numerator > 0)): (yield (a, b, numerator, denominator, round(((numerator / float(denominator)) * 100), 2))) o = pd.DataFrame(list(_overlap(self.error_columns)), columns=['A', 'B', 'overlap', 'total', 'percent']) o.set_index(['A', 'B'], inplace=True) o.sort_values('percent', ascending=False, inplace=True) return o
Return a dataframe with statistics on correlations between error classes.
scripts/error-classification.py
correlations
spack/testing-sandbox
0
python
def correlations(self): '\n\n ' if (not self.is_annotated()): raise RuntimeError('Dataframe does not contain error annotations!') def _overlap(columns): for (a, b) in itertools.combinations(columns, 2): numerator = len(self.df[((self.df[a] == True) & (self.df[b] == True))]) denominator = len(self.df[((self.df[a] == True) | (self.df[b] == True))]) if ((a != b) and (numerator > 0)): (yield (a, b, numerator, denominator, round(((numerator / float(denominator)) * 100), 2))) o = pd.DataFrame(list(_overlap(self.error_columns)), columns=['A', 'B', 'overlap', 'total', 'percent']) o.set_index(['A', 'B'], inplace=True) o.sort_values('percent', ascending=False, inplace=True) return o
def correlations(self): '\n\n ' if (not self.is_annotated()): raise RuntimeError('Dataframe does not contain error annotations!') def _overlap(columns): for (a, b) in itertools.combinations(columns, 2): numerator = len(self.df[((self.df[a] == True) & (self.df[b] == True))]) denominator = len(self.df[((self.df[a] == True) | (self.df[b] == True))]) if ((a != b) and (numerator > 0)): (yield (a, b, numerator, denominator, round(((numerator / float(denominator)) * 100), 2))) o = pd.DataFrame(list(_overlap(self.error_columns)), columns=['A', 'B', 'overlap', 'total', 'percent']) o.set_index(['A', 'B'], inplace=True) o.sort_values('percent', ascending=False, inplace=True) return o<|docstring|>Return a dataframe with statistics on correlations between error classes.<|endoftext|>
81125c30f5c2227a2bb81f169abc2a23b75370574d96b4cfbe0249fe6da6942a
def deconflict(self): 'Deconflicts error classes based on deconflict_order.\n\n ' if (not self.is_annotated()): raise RuntimeError('Dataframe does not contain error annotations!') def _deconflict(A): 'Prefer errors in Column A' target = list((set(self.error_columns) - set([A]))) if self.df[A].any(): self.df.loc[(self.df[A], target)] = False for column in self.deconflict_order: _deconflict(column)
Deconflicts error classes based on deconflict_order.
scripts/error-classification.py
deconflict
spack/testing-sandbox
0
python
def deconflict(self): '\n\n ' if (not self.is_annotated()): raise RuntimeError('Dataframe does not contain error annotations!') def _deconflict(A): 'Prefer errors in Column A' target = list((set(self.error_columns) - set([A]))) if self.df[A].any(): self.df.loc[(self.df[A], target)] = False for column in self.deconflict_order: _deconflict(column)
def deconflict(self): '\n\n ' if (not self.is_annotated()): raise RuntimeError('Dataframe does not contain error annotations!') def _deconflict(A): 'Prefer errors in Column A' target = list((set(self.error_columns) - set([A]))) if self.df[A].any(): self.df.loc[(self.df[A], target)] = False for column in self.deconflict_order: _deconflict(column)<|docstring|>Deconflicts error classes based on deconflict_order.<|endoftext|>
a0141944502e742698438fff0ad848fd2c5fdf88c9d64ddf222bfc8a787acfa0
def random_log(self, error_class): 'Return the path to a random log file in the given error_class.\n\n ' if (not self.is_annotated()): raise RuntimeError('Dataframe does not contain error annotations!') if (error_class not in ErrorClassifier().error_columns): raise RuntimeError(f""""{error_class}" not one of: {os.linesep}{os.linesep.join([(' ' + s) for s in ErrorClassifier().error_columns])}""") idx = random.choice(self.df[self.df[error_class]].index) return (idx, f'{self.log_dir}/{idx}.log')
Return the path to a random log file in the given error_class.
scripts/error-classification.py
random_log
spack/testing-sandbox
0
python
def random_log(self, error_class): '\n\n ' if (not self.is_annotated()): raise RuntimeError('Dataframe does not contain error annotations!') if (error_class not in ErrorClassifier().error_columns): raise RuntimeError(f{error_class}" not one of: {os.linesep}{os.linesep.join([(' ' + s) for s in ErrorClassifier().error_columns])}") idx = random.choice(self.df[self.df[error_class]].index) return (idx, f'{self.log_dir}/{idx}.log')
def random_log(self, error_class): '\n\n ' if (not self.is_annotated()): raise RuntimeError('Dataframe does not contain error annotations!') if (error_class not in ErrorClassifier().error_columns): raise RuntimeError(f{error_class}" not one of: {os.linesep}{os.linesep.join([(' ' + s) for s in ErrorClassifier().error_columns])}") idx = random.choice(self.df[self.df[error_class]].index) return (idx, f'{self.log_dir}/{idx}.log')<|docstring|>Return the path to a random log file in the given error_class.<|endoftext|>
339580a32c956ec718ece611d7285e1541887aa01a98efdbea4929cb3c775c82
def _deconflict(A): 'Prefer errors in Column A' target = list((set(self.error_columns) - set([A]))) if self.df[A].any(): self.df.loc[(self.df[A], target)] = False
Prefer errors in Column A
scripts/error-classification.py
_deconflict
spack/testing-sandbox
0
python
def _deconflict(A): target = list((set(self.error_columns) - set([A]))) if self.df[A].any(): self.df.loc[(self.df[A], target)] = False
def _deconflict(A): target = list((set(self.error_columns) - set([A]))) if self.df[A].any(): self.df.loc[(self.df[A], target)] = False<|docstring|>Prefer errors in Column A<|endoftext|>
b41a18839196280923a130e4bdd078d91de7560d2543d24d508436bf445394af
def get_blade_slot(self): '\n Return blade slot\n dmidecode output is:\n ` Location In Chassis: Slot 03`\n ' if self.is_blade(): return self.baseboard[0].get('Location In Chassis').strip() return None
Return blade slot dmidecode output is: ` Location In Chassis: Slot 03`
netbox_agent/vendors/dell.py
get_blade_slot
markd69/netbox-agent
84
python
def get_blade_slot(self): '\n Return blade slot\n dmidecode output is:\n ` Location In Chassis: Slot 03`\n ' if self.is_blade(): return self.baseboard[0].get('Location In Chassis').strip() return None
def get_blade_slot(self): '\n Return blade slot\n dmidecode output is:\n ` Location In Chassis: Slot 03`\n ' if self.is_blade(): return self.baseboard[0].get('Location In Chassis').strip() return None<|docstring|>Return blade slot dmidecode output is: ` Location In Chassis: Slot 03`<|endoftext|>
1c751cac5f982618a3f9fc18a941e868e9d9af04681159323a2e6be8f198d1e5
def get_power_consumption(self): '\n Parse omreport output like this\n\n Amperage\n PS1 Current 1 : 1.8 A\n PS2 Current 2 : 1.4 A\n ' value = [] if (not is_tool('omreport')): logging.error('omreport does not seem to be installed, please debug') return value data = subprocess.getoutput('omreport chassis pwrmonitoring') amperage = False for line in data.splitlines(): if line.startswith('Amperage'): amperage = True continue if amperage: if line.startswith('PS'): amp_value = line.split(':')[1].split()[0] value.append(amp_value) else: break return value
Parse omreport output like this Amperage PS1 Current 1 : 1.8 A PS2 Current 2 : 1.4 A
netbox_agent/vendors/dell.py
get_power_consumption
markd69/netbox-agent
84
python
def get_power_consumption(self): '\n Parse omreport output like this\n\n Amperage\n PS1 Current 1 : 1.8 A\n PS2 Current 2 : 1.4 A\n ' value = [] if (not is_tool('omreport')): logging.error('omreport does not seem to be installed, please debug') return value data = subprocess.getoutput('omreport chassis pwrmonitoring') amperage = False for line in data.splitlines(): if line.startswith('Amperage'): amperage = True continue if amperage: if line.startswith('PS'): amp_value = line.split(':')[1].split()[0] value.append(amp_value) else: break return value
def get_power_consumption(self): '\n Parse omreport output like this\n\n Amperage\n PS1 Current 1 : 1.8 A\n PS2 Current 2 : 1.4 A\n ' value = [] if (not is_tool('omreport')): logging.error('omreport does not seem to be installed, please debug') return value data = subprocess.getoutput('omreport chassis pwrmonitoring') amperage = False for line in data.splitlines(): if line.startswith('Amperage'): amperage = True continue if amperage: if line.startswith('PS'): amp_value = line.split(':')[1].split()[0] value.append(amp_value) else: break return value<|docstring|>Parse omreport output like this Amperage PS1 Current 1 : 1.8 A PS2 Current 2 : 1.4 A<|endoftext|>
2d456bb03c7c00740ebed87e9f7e352577a7cb725d07753e6fbcdf14e57c9a67
def get_expansion_product(self): '\n Get the extension slot that is on a pair slot number\n next to the compute slot that is on an odd slot number\n ' raise NotImplementedError
Get the extension slot that is on a pair slot number next to the compute slot that is on an odd slot number
netbox_agent/vendors/dell.py
get_expansion_product
markd69/netbox-agent
84
python
def get_expansion_product(self): '\n Get the extension slot that is on a pair slot number\n next to the compute slot that is on an odd slot number\n ' raise NotImplementedError
def get_expansion_product(self): '\n Get the extension slot that is on a pair slot number\n next to the compute slot that is on an odd slot number\n ' raise NotImplementedError<|docstring|>Get the extension slot that is on a pair slot number next to the compute slot that is on an odd slot number<|endoftext|>
ebddd6c1a89bc85b023799080920e64e85bc3260c6ca6239a4f960031c4b06d9
def is_expansion_slot(self, server): '\n Return True if its an extension slot\n ' raise NotImplementedError
Return True if its an extension slot
netbox_agent/vendors/dell.py
is_expansion_slot
markd69/netbox-agent
84
python
def is_expansion_slot(self, server): '\n \n ' raise NotImplementedError
def is_expansion_slot(self, server): '\n \n ' raise NotImplementedError<|docstring|>Return True if its an extension slot<|endoftext|>
6a1344e8092e23f6396c80b35ac3e45500750dca455e64093b8034c2b9ab4eea
def get_blade_expansion_slot(self): '\n Expansion slot are always the compute bay number + 1\n ' raise NotImplementedError
Expansion slot are always the compute bay number + 1
netbox_agent/vendors/dell.py
get_blade_expansion_slot
markd69/netbox-agent
84
python
def get_blade_expansion_slot(self): '\n \n ' raise NotImplementedError
def get_blade_expansion_slot(self): '\n \n ' raise NotImplementedError<|docstring|>Expansion slot are always the compute bay number + 1<|endoftext|>
7ba0bc8bd47a9b247abcb6a33abc86d3481ea43ff6b6c011e335b60ee39a81cb
def own_expansion_slot(self): '\n Say if the device can host an extension card based\n on the product name\n ' pass
Say if the device can host an extension card based on the product name
netbox_agent/vendors/dell.py
own_expansion_slot
markd69/netbox-agent
84
python
def own_expansion_slot(self): '\n Say if the device can host an extension card based\n on the product name\n ' pass
def own_expansion_slot(self): '\n Say if the device can host an extension card based\n on the product name\n ' pass<|docstring|>Say if the device can host an extension card based on the product name<|endoftext|>
8977c3d33fa44afe2fc88d4f5f28ec830d05092caf3e7cdd69eb9db969a3e9ab
def _get_information(self): 'Returns information about the given league.' return requests.get(API_URLS['league_classic'].format(self.league_id)).json()
Returns information about the given league.
fpl/models/classic_league.py
_get_information
emre/fpl
0
python
def _get_information(self): return requests.get(API_URLS['league_classic'].format(self.league_id)).json()
def _get_information(self): return requests.get(API_URLS['league_classic'].format(self.league_id)).json()<|docstring|>Returns information about the given league.<|endoftext|>
12619527e7e1b3477e405d5e0bd41457555a44e6bd716e351af752a38da5c5b0
def get_standings(self): 'Returns league standings for all teams in the league.' standings = [] for page in itertools.count(start=1): url = '{}?ls-page={}'.format(API_URLS['league_classic'].format(self.league_id), page) page_results = requests.get(url).json()['standings']['results'] if page_results: standings.extend(page_results) else: self.standings = standings break
Returns league standings for all teams in the league.
fpl/models/classic_league.py
get_standings
emre/fpl
0
python
def get_standings(self): standings = [] for page in itertools.count(start=1): url = '{}?ls-page={}'.format(API_URLS['league_classic'].format(self.league_id), page) page_results = requests.get(url).json()['standings']['results'] if page_results: standings.extend(page_results) else: self.standings = standings break
def get_standings(self): standings = [] for page in itertools.count(start=1): url = '{}?ls-page={}'.format(API_URLS['league_classic'].format(self.league_id), page) page_results = requests.get(url).json()['standings']['results'] if page_results: standings.extend(page_results) else: self.standings = standings break<|docstring|>Returns league standings for all teams in the league.<|endoftext|>
d9ac5dedb2b3b53007f8f1a54320ba4239ff23e9a37e35c5e4b51fb348170264
def except_error_db_add(testcase, object, Error): '\n\tFails the provided testcase if an error of type Error is NOT excepted when attempting to add the provided object to\n\tthe databse and then committing.\n\t' try: db.session.add(object) db.session.commit() except Error: db.session.rollback() except Exception: testcase.fail('An unexpected exception was raised, rather than the expected {}.'.format(Error.__name__)) else: testcase.fail('Expected an {} to be raised, however it was not.'.format(Error.__name__))
Fails the provided testcase if an error of type Error is NOT excepted when attempting to add the provided object to the databse and then committing.
server/dbentitytests.py
except_error_db_add
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def except_error_db_add(testcase, object, Error): '\n\tFails the provided testcase if an error of type Error is NOT excepted when attempting to add the provided object to\n\tthe databse and then committing.\n\t' try: db.session.add(object) db.session.commit() except Error: db.session.rollback() except Exception: testcase.fail('An unexpected exception was raised, rather than the expected {}.'.format(Error.__name__)) else: testcase.fail('Expected an {} to be raised, however it was not.'.format(Error.__name__))
def except_error_db_add(testcase, object, Error): '\n\tFails the provided testcase if an error of type Error is NOT excepted when attempting to add the provided object to\n\tthe databse and then committing.\n\t' try: db.session.add(object) db.session.commit() except Error: db.session.rollback() except Exception: testcase.fail('An unexpected exception was raised, rather than the expected {}.'.format(Error.__name__)) else: testcase.fail('Expected an {} to be raised, however it was not.'.format(Error.__name__))<|docstring|>Fails the provided testcase if an error of type Error is NOT excepted when attempting to add the provided object to the databse and then committing.<|endoftext|>
5983a3229e59f4abe710f070400a42c794c3184bde2de113971e9b7d0f98cf4d
def except_error_db_commit(testcase, Error): '\n\tFails the provided testcase if an error of type Error is NOT excepted when attempting to make a commit to the\n\tdatabase.\n\t' try: db.session.commit() except Error: db.session.rollback() except Exception: testcase.fail('An unexpected exception was raised, rather than the expected {}.'.format(Error.__name__)) else: testcase.fail('Expected an {} to be raised, however it was not.'.format(Error.__name__))
Fails the provided testcase if an error of type Error is NOT excepted when attempting to make a commit to the database.
server/dbentitytests.py
except_error_db_commit
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def except_error_db_commit(testcase, Error): '\n\tFails the provided testcase if an error of type Error is NOT excepted when attempting to make a commit to the\n\tdatabase.\n\t' try: db.session.commit() except Error: db.session.rollback() except Exception: testcase.fail('An unexpected exception was raised, rather than the expected {}.'.format(Error.__name__)) else: testcase.fail('Expected an {} to be raised, however it was not.'.format(Error.__name__))
def except_error_db_commit(testcase, Error): '\n\tFails the provided testcase if an error of type Error is NOT excepted when attempting to make a commit to the\n\tdatabase.\n\t' try: db.session.commit() except Error: db.session.rollback() except Exception: testcase.fail('An unexpected exception was raised, rather than the expected {}.'.format(Error.__name__)) else: testcase.fail('Expected an {} to be raised, however it was not.'.format(Error.__name__))<|docstring|>Fails the provided testcase if an error of type Error is NOT excepted when attempting to make a commit to the database.<|endoftext|>
c3ada9e94469e761ac1cb6811080d4ae36f2498db9fc7be74c40313f9ceeda8b
def setUp(self): 'Rebuild all tables before test.' db.create_all()
Rebuild all tables before test.
server/dbentitytests.py
setUp
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def setUp(self): db.create_all()
def setUp(self): db.create_all()<|docstring|>Rebuild all tables before test.<|endoftext|>
3f4df6c7b67a8a667dce1ce8dc5fb766955351719974e62af8d03678c6268544
def tearDown(self): 'Drop all tables before test.' db.session.rollback() db.drop_all()
Drop all tables before test.
server/dbentitytests.py
tearDown
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def tearDown(self): db.session.rollback() db.drop_all()
def tearDown(self): db.session.rollback() db.drop_all()<|docstring|>Drop all tables before test.<|endoftext|>
8ee2f8279e8f83e8f9fb5d2305c39bcdb07b36375ec3bbb79f3477b89172dab5
def test_create_reader(self): 'Tests that a reader can be created as expected.' reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(reader) db.session.commit()
Tests that a reader can be created as expected.
server/dbentitytests.py
test_create_reader
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_create_reader(self): reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(reader) db.session.commit()
def test_create_reader(self): reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(reader) db.session.commit()<|docstring|>Tests that a reader can be created as expected.<|endoftext|>
42edd52cfc41e228bd0cd507d4e332b369a89af759c409aa5ed5ab1dee3cef19
def test_datatypes_reader(self): 'Tests that non-string datatype constraints of attributes in Reader are enforced by the database.' reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') reader.id = MAGIC_STRING except_error_db_add(self, reader, IntegrityError) reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(reader) reader.id = MAGIC_STRING except_error_db_commit(self, IntegrityError) reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') reader.card_id = MAGIC_STRING except_error_db_add(self, reader, IntegrityError) reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(reader) reader.card_id = MAGIC_STRING except_error_db_commit(self, IntegrityError)
Tests that non-string datatype constraints of attributes in Reader are enforced by the database.
server/dbentitytests.py
test_datatypes_reader
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_datatypes_reader(self): reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') reader.id = MAGIC_STRING except_error_db_add(self, reader, IntegrityError) reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(reader) reader.id = MAGIC_STRING except_error_db_commit(self, IntegrityError) reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') reader.card_id = MAGIC_STRING except_error_db_add(self, reader, IntegrityError) reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(reader) reader.card_id = MAGIC_STRING except_error_db_commit(self, IntegrityError)
def test_datatypes_reader(self): reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') reader.id = MAGIC_STRING except_error_db_add(self, reader, IntegrityError) reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(reader) reader.id = MAGIC_STRING except_error_db_commit(self, IntegrityError) reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') reader.card_id = MAGIC_STRING except_error_db_add(self, reader, IntegrityError) reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(reader) reader.card_id = MAGIC_STRING except_error_db_commit(self, IntegrityError)<|docstring|>Tests that non-string datatype constraints of attributes in Reader are enforced by the database.<|endoftext|>
2c2fa1ffabc4852919e0d24b72b5a64d725a2255cc879d5b74e2067e4b74ecc1
def test_nullable_false_reader(self): 'Tests that attributes in Reader with nullable=False cannot be None.' except_error_db_add(self, Reader(email=None, password='abc123', name='Peter', surname='Parker'), IntegrityError) except_error_db_add(self, Reader(email='[email protected]', password='abc123', name=None, surname='Parker'), IntegrityError) except_error_db_add(self, Reader(email='[email protected]', password='abc123', name='Peter', surname=None), IntegrityError) try: Reader(email='[email protected]', password=None, name='Peter', surname='Parker') except ValueError: pass except Exception: self.fail('Unexpected exception raised') else: self.fail('ValueError not raised')
Tests that attributes in Reader with nullable=False cannot be None.
server/dbentitytests.py
test_nullable_false_reader
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_nullable_false_reader(self): except_error_db_add(self, Reader(email=None, password='abc123', name='Peter', surname='Parker'), IntegrityError) except_error_db_add(self, Reader(email='[email protected]', password='abc123', name=None, surname='Parker'), IntegrityError) except_error_db_add(self, Reader(email='[email protected]', password='abc123', name='Peter', surname=None), IntegrityError) try: Reader(email='[email protected]', password=None, name='Peter', surname='Parker') except ValueError: pass except Exception: self.fail('Unexpected exception raised') else: self.fail('ValueError not raised')
def test_nullable_false_reader(self): except_error_db_add(self, Reader(email=None, password='abc123', name='Peter', surname='Parker'), IntegrityError) except_error_db_add(self, Reader(email='[email protected]', password='abc123', name=None, surname='Parker'), IntegrityError) except_error_db_add(self, Reader(email='[email protected]', password='abc123', name='Peter', surname=None), IntegrityError) try: Reader(email='[email protected]', password=None, name='Peter', surname='Parker') except ValueError: pass except Exception: self.fail('Unexpected exception raised') else: self.fail('ValueError not raised')<|docstring|>Tests that attributes in Reader with nullable=False cannot be None.<|endoftext|>
9b212e831daaf5fb3a2cdb956176fdffa404e7d81bdef872e4a76bb45912a932
def test_nullable_reader(self): 'Tests that attributes in Reader with nullable=True actually can be None.' reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(reader) db.session.commit() reader.card_id = MAGIC_INTEGER db.session.commit() reader.card_id = None db.session.commit()
Tests that attributes in Reader with nullable=True actually can be None.
server/dbentitytests.py
test_nullable_reader
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_nullable_reader(self): reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(reader) db.session.commit() reader.card_id = MAGIC_INTEGER db.session.commit() reader.card_id = None db.session.commit()
def test_nullable_reader(self): reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(reader) db.session.commit() reader.card_id = MAGIC_INTEGER db.session.commit() reader.card_id = None db.session.commit()<|docstring|>Tests that attributes in Reader with nullable=True actually can be None.<|endoftext|>
c63426d7d01a829924a1141302b21c4be9364bd3ef29ca75600d81e3cce951a6
def test_uniqueness_reader(self): 'Tests that uniqueness constraints of attributes in Reader are enforced by the database.' reader1 = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') reader2 = Reader(email='[email protected]', password='def456', name='Mary Jane', surname='Watson') db.session.add(reader1) db.session.commit() except_error_db_add(self, reader2, IntegrityError) reader1.card_id = MAGIC_INTEGER reader2.email = '[email protected]' reader2.card_id = MAGIC_INTEGER except_error_db_add(self, reader2, IntegrityError)
Tests that uniqueness constraints of attributes in Reader are enforced by the database.
server/dbentitytests.py
test_uniqueness_reader
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_uniqueness_reader(self): reader1 = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') reader2 = Reader(email='[email protected]', password='def456', name='Mary Jane', surname='Watson') db.session.add(reader1) db.session.commit() except_error_db_add(self, reader2, IntegrityError) reader1.card_id = MAGIC_INTEGER reader2.email = '[email protected]' reader2.card_id = MAGIC_INTEGER except_error_db_add(self, reader2, IntegrityError)
def test_uniqueness_reader(self): reader1 = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') reader2 = Reader(email='[email protected]', password='def456', name='Mary Jane', surname='Watson') db.session.add(reader1) db.session.commit() except_error_db_add(self, reader2, IntegrityError) reader1.card_id = MAGIC_INTEGER reader2.email = '[email protected]' reader2.card_id = MAGIC_INTEGER except_error_db_add(self, reader2, IntegrityError)<|docstring|>Tests that uniqueness constraints of attributes in Reader are enforced by the database.<|endoftext|>
6db63f792f3a0a234da56c92a21289b35d2d8f3348d052b24df97914d9b01e55
def test_password_hashed_reader(self): 'Tests that the value of the password attribute in Reader is encrypted.' plaintext_pw = 'abc123' reader = Reader(email='[email protected]', password=plaintext_pw, name='Peter', surname='Parker') db.session.add(reader) db.session.commit() queried_pw = Reader.query.filter_by(email='[email protected]', name='Peter', surname='Parker').first().password self.assertNotEqual(plaintext_pw, queried_pw) self.assertNotEqual(plaintext_pw, reader.password) self.assertEqual(queried_pw, reader.password) self.assertIsNotNone(queried_pw) self.assertIsNotNone(reader.password)
Tests that the value of the password attribute in Reader is encrypted.
server/dbentitytests.py
test_password_hashed_reader
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_password_hashed_reader(self): plaintext_pw = 'abc123' reader = Reader(email='[email protected]', password=plaintext_pw, name='Peter', surname='Parker') db.session.add(reader) db.session.commit() queried_pw = Reader.query.filter_by(email='[email protected]', name='Peter', surname='Parker').first().password self.assertNotEqual(plaintext_pw, queried_pw) self.assertNotEqual(plaintext_pw, reader.password) self.assertEqual(queried_pw, reader.password) self.assertIsNotNone(queried_pw) self.assertIsNotNone(reader.password)
def test_password_hashed_reader(self): plaintext_pw = 'abc123' reader = Reader(email='[email protected]', password=plaintext_pw, name='Peter', surname='Parker') db.session.add(reader) db.session.commit() queried_pw = Reader.query.filter_by(email='[email protected]', name='Peter', surname='Parker').first().password self.assertNotEqual(plaintext_pw, queried_pw) self.assertNotEqual(plaintext_pw, reader.password) self.assertEqual(queried_pw, reader.password) self.assertIsNotNone(queried_pw) self.assertIsNotNone(reader.password)<|docstring|>Tests that the value of the password attribute in Reader is encrypted.<|endoftext|>
8fa3c44db39290bb08e79f3150368a11adfa7370d8f3394848a55656a43ffccd
def test_password_functions_reader(self): 'Tests that password functions in Reader behaves as expected.' reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') pw_before = reader.password reader.set_password('def456') pw_after = reader.password self.assertNotEqual(pw_before, pw_after) self.assertTrue(reader.check_password('def456'))
Tests that password functions in Reader behaves as expected.
server/dbentitytests.py
test_password_functions_reader
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_password_functions_reader(self): reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') pw_before = reader.password reader.set_password('def456') pw_after = reader.password self.assertNotEqual(pw_before, pw_after) self.assertTrue(reader.check_password('def456'))
def test_password_functions_reader(self): reader = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') pw_before = reader.password reader.set_password('def456') pw_after = reader.password self.assertNotEqual(pw_before, pw_after) self.assertTrue(reader.check_password('def456'))<|docstring|>Tests that password functions in Reader behaves as expected.<|endoftext|>
c4cebb0ae17a1540f7115da2cd7fc26ac8eff827f9f43f382bcbeefb8edc2640
def test_id_increment_reader(self): 'Tests that the id attribute in Reader is consistent.' reader1 = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') reader2 = Reader(email='[email protected]', password='def456', name='Mary Jane', surname='Watson') db.session.add(reader1) db.session.add(reader2) db.session.commit() self.assertEqual(Reader.query.filter_by(email='[email protected]', name='Peter', surname='Parker').first().id, 1) self.assertEqual(Reader.query.filter_by(email='[email protected]', name='Mary Jane', surname='Watson').first().id, 2) db.session.delete(reader1) db.session.commit() self.assertIsNone(Reader.query.filter_by(id=1).first()) self.assertEqual(Reader.query.filter_by(email='[email protected]', name='Mary Jane', surname='Watson').first().id, 2) reader3 = Reader(email='[email protected]', password='ghi789', name='Harry', surname='Osborn') db.session.add(reader3) db.session.commit() self.assertIsNone(Reader.query.filter_by(id=1).first()) self.assertEqual(Reader.query.filter_by(email='[email protected]', name='Harry', surname='Osborn').first().id, 3)
Tests that the id attribute in Reader is consistent.
server/dbentitytests.py
test_id_increment_reader
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_id_increment_reader(self): reader1 = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') reader2 = Reader(email='[email protected]', password='def456', name='Mary Jane', surname='Watson') db.session.add(reader1) db.session.add(reader2) db.session.commit() self.assertEqual(Reader.query.filter_by(email='[email protected]', name='Peter', surname='Parker').first().id, 1) self.assertEqual(Reader.query.filter_by(email='[email protected]', name='Mary Jane', surname='Watson').first().id, 2) db.session.delete(reader1) db.session.commit() self.assertIsNone(Reader.query.filter_by(id=1).first()) self.assertEqual(Reader.query.filter_by(email='[email protected]', name='Mary Jane', surname='Watson').first().id, 2) reader3 = Reader(email='[email protected]', password='ghi789', name='Harry', surname='Osborn') db.session.add(reader3) db.session.commit() self.assertIsNone(Reader.query.filter_by(id=1).first()) self.assertEqual(Reader.query.filter_by(email='[email protected]', name='Harry', surname='Osborn').first().id, 3)
def test_id_increment_reader(self): reader1 = Reader(email='[email protected]', password='abc123', name='Peter', surname='Parker') reader2 = Reader(email='[email protected]', password='def456', name='Mary Jane', surname='Watson') db.session.add(reader1) db.session.add(reader2) db.session.commit() self.assertEqual(Reader.query.filter_by(email='[email protected]', name='Peter', surname='Parker').first().id, 1) self.assertEqual(Reader.query.filter_by(email='[email protected]', name='Mary Jane', surname='Watson').first().id, 2) db.session.delete(reader1) db.session.commit() self.assertIsNone(Reader.query.filter_by(id=1).first()) self.assertEqual(Reader.query.filter_by(email='[email protected]', name='Mary Jane', surname='Watson').first().id, 2) reader3 = Reader(email='[email protected]', password='ghi789', name='Harry', surname='Osborn') db.session.add(reader3) db.session.commit() self.assertIsNone(Reader.query.filter_by(id=1).first()) self.assertEqual(Reader.query.filter_by(email='[email protected]', name='Harry', surname='Osborn').first().id, 3)<|docstring|>Tests that the id attribute in Reader is consistent.<|endoftext|>
93a0751f5ec0c1a174e701b186593e0843e8f048c7a2a170aaa970debfc48e22
def test_create_approver(self): 'Tests that an approver can be created as expected.' approver = Approver(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(approver) db.session.commit()
Tests that an approver can be created as expected.
server/dbentitytests.py
test_create_approver
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_create_approver(self): approver = Approver(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(approver) db.session.commit()
def test_create_approver(self): approver = Approver(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(approver) db.session.commit()<|docstring|>Tests that an approver can be created as expected.<|endoftext|>
59714295d607bbdfec6a601d00e135323bbaade51207ad40bb279d2ae3e4a741
def test_inheritance_approver(self): '\n\t\tTests that when an approver is created, this also results in an entry in the Reader table thanks to the\n\t\tinheritance.\n\t\t' approver = Approver(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(approver) db.session.commit() self.assertIsNotNone(Reader.query.filter_by(id=approver.id).first()) self.assertIsNone(Admin.query.filter_by(id=approver.id).first())
Tests that when an approver is created, this also results in an entry in the Reader table thanks to the inheritance.
server/dbentitytests.py
test_inheritance_approver
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_inheritance_approver(self): '\n\t\tTests that when an approver is created, this also results in an entry in the Reader table thanks to the\n\t\tinheritance.\n\t\t' approver = Approver(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(approver) db.session.commit() self.assertIsNotNone(Reader.query.filter_by(id=approver.id).first()) self.assertIsNone(Admin.query.filter_by(id=approver.id).first())
def test_inheritance_approver(self): '\n\t\tTests that when an approver is created, this also results in an entry in the Reader table thanks to the\n\t\tinheritance.\n\t\t' approver = Approver(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(approver) db.session.commit() self.assertIsNotNone(Reader.query.filter_by(id=approver.id).first()) self.assertIsNone(Admin.query.filter_by(id=approver.id).first())<|docstring|>Tests that when an approver is created, this also results in an entry in the Reader table thanks to the inheritance.<|endoftext|>
54507c84a7651bca15d8f4480849cfb47387181281f461925416de93c0236dbd
def test_create_admin(self): 'Tests that an admin can be created as expected.' admin = Admin(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(admin) db.session.commit()
Tests that an admin can be created as expected.
server/dbentitytests.py
test_create_admin
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_create_admin(self): admin = Admin(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(admin) db.session.commit()
def test_create_admin(self): admin = Admin(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(admin) db.session.commit()<|docstring|>Tests that an admin can be created as expected.<|endoftext|>
bb978e2ab86efb48c97ca62bacbaba0a3c8f75a198102a7cca67d2c5e2cc972e
def test_inheritance_admin(self): '\n\t\tTests that when an admin is created, this also results in an entry in the Approver and Reader tables thanks to\n\t\tthe inheritance.\n\t\t' admin = Admin(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(admin) db.session.commit() self.assertIsNotNone(Approver.query.filter_by(id=admin.id).first()) self.assertIsNotNone(Reader.query.filter_by(id=admin.id).first())
Tests that when an admin is created, this also results in an entry in the Approver and Reader tables thanks to the inheritance.
server/dbentitytests.py
test_inheritance_admin
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_inheritance_admin(self): '\n\t\tTests that when an admin is created, this also results in an entry in the Approver and Reader tables thanks to\n\t\tthe inheritance.\n\t\t' admin = Admin(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(admin) db.session.commit() self.assertIsNotNone(Approver.query.filter_by(id=admin.id).first()) self.assertIsNotNone(Reader.query.filter_by(id=admin.id).first())
def test_inheritance_admin(self): '\n\t\tTests that when an admin is created, this also results in an entry in the Approver and Reader tables thanks to\n\t\tthe inheritance.\n\t\t' admin = Admin(email='[email protected]', password='abc123', name='Peter', surname='Parker') db.session.add(admin) db.session.commit() self.assertIsNotNone(Approver.query.filter_by(id=admin.id).first()) self.assertIsNotNone(Reader.query.filter_by(id=admin.id).first())<|docstring|>Tests that when an admin is created, this also results in an entry in the Approver and Reader tables thanks to the inheritance.<|endoftext|>
ff32a0e25894f4935ca006de8eb2803dd5b537495dd8a7dd73ec584b2f077c8c
def test_create_room(self): 'Tests that a room can be created as expected.' room = Room(name='ISYtan1', text_id='ISY1') db.session.add(room) db.session.commit()
Tests that a room can be created as expected.
server/dbentitytests.py
test_create_room
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_create_room(self): room = Room(name='ISYtan1', text_id='ISY1') db.session.add(room) db.session.commit()
def test_create_room(self): room = Room(name='ISYtan1', text_id='ISY1') db.session.add(room) db.session.commit()<|docstring|>Tests that a room can be created as expected.<|endoftext|>
2fefe03375a93ac9893ff0c362cca433d18c358ff66a7835c073abab3f53c969
def test_datatypes_room(self): 'Tests that non-string datatype constraints of attributes in Room are enforced by the database.' room = Room(name='ISYtan1', text_id='ISY1') room.id = MAGIC_STRING except_error_db_add(self, room, IntegrityError) room = Room(name='ISYtan1', text_id='ISY1') db.session.add(room) room.id = MAGIC_STRING except_error_db_commit(self, IntegrityError)
Tests that non-string datatype constraints of attributes in Room are enforced by the database.
server/dbentitytests.py
test_datatypes_room
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_datatypes_room(self): room = Room(name='ISYtan1', text_id='ISY1') room.id = MAGIC_STRING except_error_db_add(self, room, IntegrityError) room = Room(name='ISYtan1', text_id='ISY1') db.session.add(room) room.id = MAGIC_STRING except_error_db_commit(self, IntegrityError)
def test_datatypes_room(self): room = Room(name='ISYtan1', text_id='ISY1') room.id = MAGIC_STRING except_error_db_add(self, room, IntegrityError) room = Room(name='ISYtan1', text_id='ISY1') db.session.add(room) room.id = MAGIC_STRING except_error_db_commit(self, IntegrityError)<|docstring|>Tests that non-string datatype constraints of attributes in Room are enforced by the database.<|endoftext|>
250e0db0411295276742d044db7993a0054a2b62acd7cb57c25cb849c9018807
def test_nullable_false_room(self): 'Tests that attributes in Room with nullable=False cannot be None.' except_error_db_add(self, Room(name=None, text_id='ISY1'), IntegrityError)
Tests that attributes in Room with nullable=False cannot be None.
server/dbentitytests.py
test_nullable_false_room
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_nullable_false_room(self): except_error_db_add(self, Room(name=None, text_id='ISY1'), IntegrityError)
def test_nullable_false_room(self): except_error_db_add(self, Room(name=None, text_id='ISY1'), IntegrityError)<|docstring|>Tests that attributes in Room with nullable=False cannot be None.<|endoftext|>
8fb253ef5e4031a7c3e74ae032e73557a8eca71ec1baa701450cb398388dbe44
def test_nullable_room(self): 'Tests that attributes in Room with nullable=True actually can be None.' room = Room(name='ISYtan1', text_id='ISY1') db.session.add(room) db.session.commit() room.representation_json = MAGIC_STRING db.session.commit() room.representation_json = None db.session.commit()
Tests that attributes in Room with nullable=True actually can be None.
server/dbentitytests.py
test_nullable_room
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_nullable_room(self): room = Room(name='ISYtan1', text_id='ISY1') db.session.add(room) db.session.commit() room.representation_json = MAGIC_STRING db.session.commit() room.representation_json = None db.session.commit()
def test_nullable_room(self): room = Room(name='ISYtan1', text_id='ISY1') db.session.add(room) db.session.commit() room.representation_json = MAGIC_STRING db.session.commit() room.representation_json = None db.session.commit()<|docstring|>Tests that attributes in Room with nullable=True actually can be None.<|endoftext|>
cf3a4417c0a61766044371cbc0f73594596a31f4f8681d6be5f1f78a1955c05f
def test_create_card_reader(self): 'Tests that a card reader can be created as expected.' card_reader1 = CardReader() db.session.add(card_reader1) db.session.commit() room_a = Room(name='ISYtan1', text_id='ISY1') room_b = Room(name='ISYtan2', text_id='ISY2') db.session.add(room_a) db.session.add(room_b) db.session.commit() card_reader2 = CardReader(room_a=room_a, room_b=room_b) db.session.add(card_reader2) db.session.commit()
Tests that a card reader can be created as expected.
server/dbentitytests.py
test_create_card_reader
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_create_card_reader(self): card_reader1 = CardReader() db.session.add(card_reader1) db.session.commit() room_a = Room(name='ISYtan1', text_id='ISY1') room_b = Room(name='ISYtan2', text_id='ISY2') db.session.add(room_a) db.session.add(room_b) db.session.commit() card_reader2 = CardReader(room_a=room_a, room_b=room_b) db.session.add(card_reader2) db.session.commit()
def test_create_card_reader(self): card_reader1 = CardReader() db.session.add(card_reader1) db.session.commit() room_a = Room(name='ISYtan1', text_id='ISY1') room_b = Room(name='ISYtan2', text_id='ISY2') db.session.add(room_a) db.session.add(room_b) db.session.commit() card_reader2 = CardReader(room_a=room_a, room_b=room_b) db.session.add(card_reader2) db.session.commit()<|docstring|>Tests that a card reader can be created as expected.<|endoftext|>
4f13099363fd477dbd661fbf0f0b77eb5804a812598f994697bf6c40b4a17e93
def test_datatypes_card_reader(self): 'Tests that non-string datatype constraints of attributes in CardReader are enforced by the database.' card_reader = CardReader() card_reader.id = MAGIC_STRING except_error_db_add(self, card_reader, IntegrityError) card_reader = CardReader() db.session.add(card_reader) card_reader.id = MAGIC_STRING except_error_db_commit(self, IntegrityError) card_reader1 = CardReader() card_reader1.room_a_id = MAGIC_STRING except_error_db_add(self, card_reader1, IntegrityError) card_reader1 = CardReader() db.session.add(card_reader1) card_reader1.room_a_id = MAGIC_STRING except_error_db_commit(self, IntegrityError) card_reader2 = CardReader() card_reader2.room_b_id = MAGIC_STRING except_error_db_add(self, card_reader2, IntegrityError) card_reader2 = CardReader() db.session.add(card_reader2) card_reader2.room_b_id = MAGIC_STRING except_error_db_commit(self, IntegrityError)
Tests that non-string datatype constraints of attributes in CardReader are enforced by the database.
server/dbentitytests.py
test_datatypes_card_reader
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_datatypes_card_reader(self): card_reader = CardReader() card_reader.id = MAGIC_STRING except_error_db_add(self, card_reader, IntegrityError) card_reader = CardReader() db.session.add(card_reader) card_reader.id = MAGIC_STRING except_error_db_commit(self, IntegrityError) card_reader1 = CardReader() card_reader1.room_a_id = MAGIC_STRING except_error_db_add(self, card_reader1, IntegrityError) card_reader1 = CardReader() db.session.add(card_reader1) card_reader1.room_a_id = MAGIC_STRING except_error_db_commit(self, IntegrityError) card_reader2 = CardReader() card_reader2.room_b_id = MAGIC_STRING except_error_db_add(self, card_reader2, IntegrityError) card_reader2 = CardReader() db.session.add(card_reader2) card_reader2.room_b_id = MAGIC_STRING except_error_db_commit(self, IntegrityError)
def test_datatypes_card_reader(self): card_reader = CardReader() card_reader.id = MAGIC_STRING except_error_db_add(self, card_reader, IntegrityError) card_reader = CardReader() db.session.add(card_reader) card_reader.id = MAGIC_STRING except_error_db_commit(self, IntegrityError) card_reader1 = CardReader() card_reader1.room_a_id = MAGIC_STRING except_error_db_add(self, card_reader1, IntegrityError) card_reader1 = CardReader() db.session.add(card_reader1) card_reader1.room_a_id = MAGIC_STRING except_error_db_commit(self, IntegrityError) card_reader2 = CardReader() card_reader2.room_b_id = MAGIC_STRING except_error_db_add(self, card_reader2, IntegrityError) card_reader2 = CardReader() db.session.add(card_reader2) card_reader2.room_b_id = MAGIC_STRING except_error_db_commit(self, IntegrityError)<|docstring|>Tests that non-string datatype constraints of attributes in CardReader are enforced by the database.<|endoftext|>
3ddb69f0090d26c6d3d6ba9810561ac0473892bf15e05de4a4bdf193d8e1a9e9
def test_create_access_group(self): 'Tests that an access group can be created as expected.' ag = AccessGroup(name='Basic') db.session.add(ag) db.session.commit()
Tests that an access group can be created as expected.
server/dbentitytests.py
test_create_access_group
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_create_access_group(self): ag = AccessGroup(name='Basic') db.session.add(ag) db.session.commit()
def test_create_access_group(self): ag = AccessGroup(name='Basic') db.session.add(ag) db.session.commit()<|docstring|>Tests that an access group can be created as expected.<|endoftext|>
11dfb9fe7e692de6bd6ed07a2ef66064580780f9a2263a4ee5c5eec98d52d82b
def test_uniqueness_access_group(self): 'Tests that uniqueness constraints of attributes in AccessGroup are enforced by the database.' ag1 = AccessGroup(name='Basic') ag2 = AccessGroup(name='Basic') db.session.add(ag1) db.session.commit() except_error_db_add(self, ag2, IntegrityError)
Tests that uniqueness constraints of attributes in AccessGroup are enforced by the database.
server/dbentitytests.py
test_uniqueness_access_group
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_uniqueness_access_group(self): ag1 = AccessGroup(name='Basic') ag2 = AccessGroup(name='Basic') db.session.add(ag1) db.session.commit() except_error_db_add(self, ag2, IntegrityError)
def test_uniqueness_access_group(self): ag1 = AccessGroup(name='Basic') ag2 = AccessGroup(name='Basic') db.session.add(ag1) db.session.commit() except_error_db_add(self, ag2, IntegrityError)<|docstring|>Tests that uniqueness constraints of attributes in AccessGroup are enforced by the database.<|endoftext|>
3b797e8a03d600d12db09acba7accfc84e9f45f5a60a3dfc76bd5b84e35a6e33
def test_create_access_group_request(self): 'Tests that an access group request can be created as expected.' agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) db.session.add(agr) db.session.commit()
Tests that an access group request can be created as expected.
server/dbentitytests.py
test_create_access_group_request
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_create_access_group_request(self): agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) db.session.add(agr) db.session.commit()
def test_create_access_group_request(self): agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) db.session.add(agr) db.session.commit()<|docstring|>Tests that an access group request can be created as expected.<|endoftext|>
0f39f344f7fffb6b419f905b37b37fb5052ca815ce6a6ea9cfa0f219d82b8e68
def test_datatypes_access_group_request(self): '\n\t\tTests that non-string datatype constraints of attributes in AccessGroupRequest are enforced by the database.\n\t\t' agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) agr.id = MAGIC_STRING except_error_db_add(self, agr, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) db.session.add(agr) agr.id = MAGIC_STRING except_error_db_commit(self, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) agr.reader_id = MAGIC_STRING except_error_db_add(self, agr, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) db.session.add(agr) agr.reader_id = MAGIC_STRING except_error_db_commit(self, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) agr.ag_id = MAGIC_STRING except_error_db_add(self, agr, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) db.session.add(agr) agr.ag_id = MAGIC_STRING except_error_db_commit(self, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) agr.datetime_requested = MAGIC_STRING except_error_db_add(self, agr, StatementError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) db.session.add(agr) agr.datetime_requested = MAGIC_STRING except_error_db_commit(self, IntegrityError)
Tests that non-string datatype constraints of attributes in AccessGroupRequest are enforced by the database.
server/dbentitytests.py
test_datatypes_access_group_request
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_datatypes_access_group_request(self): '\n\t\t\n\t\t' agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) agr.id = MAGIC_STRING except_error_db_add(self, agr, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) db.session.add(agr) agr.id = MAGIC_STRING except_error_db_commit(self, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) agr.reader_id = MAGIC_STRING except_error_db_add(self, agr, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) db.session.add(agr) agr.reader_id = MAGIC_STRING except_error_db_commit(self, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) agr.ag_id = MAGIC_STRING except_error_db_add(self, agr, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) db.session.add(agr) agr.ag_id = MAGIC_STRING except_error_db_commit(self, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) agr.datetime_requested = MAGIC_STRING except_error_db_add(self, agr, StatementError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) db.session.add(agr) agr.datetime_requested = MAGIC_STRING except_error_db_commit(self, IntegrityError)
def test_datatypes_access_group_request(self): '\n\t\t\n\t\t' agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) agr.id = MAGIC_STRING except_error_db_add(self, agr, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) db.session.add(agr) agr.id = MAGIC_STRING except_error_db_commit(self, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) agr.reader_id = MAGIC_STRING except_error_db_add(self, agr, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) db.session.add(agr) agr.reader_id = MAGIC_STRING except_error_db_commit(self, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) agr.ag_id = MAGIC_STRING except_error_db_add(self, agr, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) db.session.add(agr) agr.ag_id = MAGIC_STRING except_error_db_commit(self, IntegrityError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) agr.datetime_requested = MAGIC_STRING except_error_db_add(self, agr, StatementError) agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) db.session.add(agr) agr.datetime_requested = MAGIC_STRING except_error_db_commit(self, IntegrityError)<|docstring|>Tests that non-string datatype constraints of attributes in AccessGroupRequest are enforced by the database.<|endoftext|>
a4287e932bfcfaf0268216831b710c1d4bca349a5e2ac173cadf62dfca4f3481
def test_nullable_false_access_group_request(self): 'Tests that attributes in AccessGroupRequest with nullable=False cannot be None.' agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) agr.datetime_requested = None db.session.add(agr) db.session.commit() self.assertIsNotNone(AccessGroupRequest.query.filter_by(id=agr.id).first().datetime_requested)
Tests that attributes in AccessGroupRequest with nullable=False cannot be None.
server/dbentitytests.py
test_nullable_false_access_group_request
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_nullable_false_access_group_request(self): agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) agr.datetime_requested = None db.session.add(agr) db.session.commit() self.assertIsNotNone(AccessGroupRequest.query.filter_by(id=agr.id).first().datetime_requested)
def test_nullable_false_access_group_request(self): agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING) agr.datetime_requested = None db.session.add(agr) db.session.commit() self.assertIsNotNone(AccessGroupRequest.query.filter_by(id=agr.id).first().datetime_requested)<|docstring|>Tests that attributes in AccessGroupRequest with nullable=False cannot be None.<|endoftext|>
957e9e71fb974617ecff0c13b2f072390060db64ff0bc2d57dadacfa1d01e95a
def test_varying_length_access_group_request(self): 'Tests that the value of String attributes in AccessGroupRequest can have different lengths.' agr_lorem1 = AccessGroupRequest(reader=None, ag=None, justification=LOREM1) agr_lorem5 = AccessGroupRequest(reader=None, ag=None, justification=LOREM5) db.session.add(agr_lorem1) db.session.add(agr_lorem5) db.session.commit()
Tests that the value of String attributes in AccessGroupRequest can have different lengths.
server/dbentitytests.py
test_varying_length_access_group_request
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_varying_length_access_group_request(self): agr_lorem1 = AccessGroupRequest(reader=None, ag=None, justification=LOREM1) agr_lorem5 = AccessGroupRequest(reader=None, ag=None, justification=LOREM5) db.session.add(agr_lorem1) db.session.add(agr_lorem5) db.session.commit()
def test_varying_length_access_group_request(self): agr_lorem1 = AccessGroupRequest(reader=None, ag=None, justification=LOREM1) agr_lorem5 = AccessGroupRequest(reader=None, ag=None, justification=LOREM5) db.session.add(agr_lorem1) db.session.add(agr_lorem5) db.session.commit()<|docstring|>Tests that the value of String attributes in AccessGroupRequest can have different lengths.<|endoftext|>
0e0b059d7a74b8128ee869b33c786df1127f9c12d8837745af5023e2b68f0a6c
def test_create_room_request(self): 'Tests that a room request can be created as expected.' rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) db.session.add(rr) db.session.commit()
Tests that a room request can be created as expected.
server/dbentitytests.py
test_create_room_request
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_create_room_request(self): rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) db.session.add(rr) db.session.commit()
def test_create_room_request(self): rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) db.session.add(rr) db.session.commit()<|docstring|>Tests that a room request can be created as expected.<|endoftext|>
3e7b33fc03080205f52dd000bfeb4897338eac0c0bca6efd1db836e5c0f22666
def test_datatypes_room_request(self): 'Tests that non-string datatype constraints of attributes in RoomRequest are enforced by the database.' rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) rr.id = MAGIC_STRING except_error_db_add(self, rr, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) db.session.add(rr) rr.id = MAGIC_STRING except_error_db_commit(self, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) rr.reader_id = MAGIC_STRING except_error_db_add(self, rr, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) db.session.add(rr) rr.reader_id = MAGIC_STRING except_error_db_commit(self, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) rr.room_id = MAGIC_STRING except_error_db_add(self, rr, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) db.session.add(rr) rr.room_id = MAGIC_STRING except_error_db_commit(self, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) rr.datetime_requested = MAGIC_STRING except_error_db_add(self, rr, StatementError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) db.session.add(rr) rr.datetime_requested = MAGIC_STRING except_error_db_commit(self, IntegrityError)
Tests that non-string datatype constraints of attributes in RoomRequest are enforced by the database.
server/dbentitytests.py
test_datatypes_room_request
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_datatypes_room_request(self): rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) rr.id = MAGIC_STRING except_error_db_add(self, rr, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) db.session.add(rr) rr.id = MAGIC_STRING except_error_db_commit(self, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) rr.reader_id = MAGIC_STRING except_error_db_add(self, rr, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) db.session.add(rr) rr.reader_id = MAGIC_STRING except_error_db_commit(self, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) rr.room_id = MAGIC_STRING except_error_db_add(self, rr, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) db.session.add(rr) rr.room_id = MAGIC_STRING except_error_db_commit(self, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) rr.datetime_requested = MAGIC_STRING except_error_db_add(self, rr, StatementError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) db.session.add(rr) rr.datetime_requested = MAGIC_STRING except_error_db_commit(self, IntegrityError)
def test_datatypes_room_request(self): rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) rr.id = MAGIC_STRING except_error_db_add(self, rr, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) db.session.add(rr) rr.id = MAGIC_STRING except_error_db_commit(self, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) rr.reader_id = MAGIC_STRING except_error_db_add(self, rr, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) db.session.add(rr) rr.reader_id = MAGIC_STRING except_error_db_commit(self, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) rr.room_id = MAGIC_STRING except_error_db_add(self, rr, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) db.session.add(rr) rr.room_id = MAGIC_STRING except_error_db_commit(self, IntegrityError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) rr.datetime_requested = MAGIC_STRING except_error_db_add(self, rr, StatementError) rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) db.session.add(rr) rr.datetime_requested = MAGIC_STRING except_error_db_commit(self, IntegrityError)<|docstring|>Tests that non-string datatype constraints of attributes in RoomRequest are enforced by the database.<|endoftext|>
1a5ae728cd471193cb6cf468dc48269da05aa9e0fe08cd1da6a32b47eaf588ab
def test_nullable_false_room_request(self): 'Tests that attributes in RoomRequest with nullable=False cannot be None.' rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) rr.datetime_requested = None db.session.add(rr) db.session.commit() self.assertIsNotNone(RoomRequest.query.filter_by(id=rr.id).first().datetime_requested)
Tests that attributes in RoomRequest with nullable=False cannot be None.
server/dbentitytests.py
test_nullable_false_room_request
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_nullable_false_room_request(self): rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) rr.datetime_requested = None db.session.add(rr) db.session.commit() self.assertIsNotNone(RoomRequest.query.filter_by(id=rr.id).first().datetime_requested)
def test_nullable_false_room_request(self): rr = RoomRequest(reader=None, room=None, justification=MAGIC_STRING) rr.datetime_requested = None db.session.add(rr) db.session.commit() self.assertIsNotNone(RoomRequest.query.filter_by(id=rr.id).first().datetime_requested)<|docstring|>Tests that attributes in RoomRequest with nullable=False cannot be None.<|endoftext|>
c9b7d0feba97f7eb2235f494f975f76963d8f251fe951ab40471e6b5bcc69178
def test_varying_length_room_request(self): 'Tests that the value of String attributes in AccessGroupRequest can have different lengths.' rr_lorem1 = RoomRequest(reader=None, room=None, justification=LOREM1) rr_lorem5 = RoomRequest(reader=None, room=None, justification=LOREM5) db.session.add(rr_lorem1) db.session.add(rr_lorem5) db.session.commit()
Tests that the value of String attributes in AccessGroupRequest can have different lengths.
server/dbentitytests.py
test_varying_length_room_request
TDDD96-Kandidatgrupp-1-2020/Visual1ze
6
python
def test_varying_length_room_request(self): rr_lorem1 = RoomRequest(reader=None, room=None, justification=LOREM1) rr_lorem5 = RoomRequest(reader=None, room=None, justification=LOREM5) db.session.add(rr_lorem1) db.session.add(rr_lorem5) db.session.commit()
def test_varying_length_room_request(self): rr_lorem1 = RoomRequest(reader=None, room=None, justification=LOREM1) rr_lorem5 = RoomRequest(reader=None, room=None, justification=LOREM5) db.session.add(rr_lorem1) db.session.add(rr_lorem5) db.session.commit()<|docstring|>Tests that the value of String attributes in AccessGroupRequest can have different lengths.<|endoftext|>
bd853404b0b3aa578a6d3af561f3e781bcb5a26fc0f61cc55b6ca6f4df9e892a
def act(self, previous_observation: np.ndarray) -> Tuple[(Any, dict)]: 'Choose random action.' return (self.action_space.sample(), {})
Choose random action.
pachinko/time_period_step_agent.py
act
datavaluepeople/pachinko
0
python
def act(self, previous_observation: np.ndarray) -> Tuple[(Any, dict)]: return (self.action_space.sample(), {})
def act(self, previous_observation: np.ndarray) -> Tuple[(Any, dict)]: return (self.action_space.sample(), {})<|docstring|>Choose random action.<|endoftext|>
c6af8949ec9251855652797d1bb6aa72d1c37846567fb619a9546c01f416d9f8
def act(self, previous_observation: np.ndarray) -> Tuple[(Any, dict)]: 'Choose highest conversion rate action (with some exploration).' n_conversions = previous_observation[1] if (self.previous_action is not None): self.conversion_counts[self.previous_action][0] += 1 self.conversion_counts[self.previous_action][1] += n_conversions conversion_rate = {action: ((n_conv / n_chosen) if (n_chosen != 0) else np.inf) for (action, (n_chosen, n_conv)) in self.conversion_counts.items()} best_action = max(conversion_rate, key=(lambda k: conversion_rate[k])) if (np.random.rand() < self.epsilon): action = self.action_space.sample() else: action = best_action self.previous_action = action return (action, {})
Choose highest conversion rate action (with some exploration).
pachinko/time_period_step_agent.py
act
datavaluepeople/pachinko
0
python
def act(self, previous_observation: np.ndarray) -> Tuple[(Any, dict)]: n_conversions = previous_observation[1] if (self.previous_action is not None): self.conversion_counts[self.previous_action][0] += 1 self.conversion_counts[self.previous_action][1] += n_conversions conversion_rate = {action: ((n_conv / n_chosen) if (n_chosen != 0) else np.inf) for (action, (n_chosen, n_conv)) in self.conversion_counts.items()} best_action = max(conversion_rate, key=(lambda k: conversion_rate[k])) if (np.random.rand() < self.epsilon): action = self.action_space.sample() else: action = best_action self.previous_action = action return (action, {})
def act(self, previous_observation: np.ndarray) -> Tuple[(Any, dict)]: n_conversions = previous_observation[1] if (self.previous_action is not None): self.conversion_counts[self.previous_action][0] += 1 self.conversion_counts[self.previous_action][1] += n_conversions conversion_rate = {action: ((n_conv / n_chosen) if (n_chosen != 0) else np.inf) for (action, (n_chosen, n_conv)) in self.conversion_counts.items()} best_action = max(conversion_rate, key=(lambda k: conversion_rate[k])) if (np.random.rand() < self.epsilon): action = self.action_space.sample() else: action = best_action self.previous_action = action return (action, {})<|docstring|>Choose highest conversion rate action (with some exploration).<|endoftext|>
e31f59b79ba7328d3f68cfc47df47e8bafe441a7fc141a4d02fcab41de07e648
@staticmethod def compute_UCB_gamma(total_trials: int, total_successes: int, prior_alpha: float=1.0, prior_beta: float=0.0001, ucb_percentile: float=0.95) -> float: 'Compute Bayesian update on Gamma dist with priors and compute upper percentile value.' alpha = (prior_alpha + total_successes) beta = (prior_beta + total_trials) return gamma.ppf(ucb_percentile, alpha, scale=(1 / beta))
Compute Bayesian update on Gamma dist with priors and compute upper percentile value.
pachinko/time_period_step_agent.py
compute_UCB_gamma
datavaluepeople/pachinko
0
python
@staticmethod def compute_UCB_gamma(total_trials: int, total_successes: int, prior_alpha: float=1.0, prior_beta: float=0.0001, ucb_percentile: float=0.95) -> float: alpha = (prior_alpha + total_successes) beta = (prior_beta + total_trials) return gamma.ppf(ucb_percentile, alpha, scale=(1 / beta))
@staticmethod def compute_UCB_gamma(total_trials: int, total_successes: int, prior_alpha: float=1.0, prior_beta: float=0.0001, ucb_percentile: float=0.95) -> float: alpha = (prior_alpha + total_successes) beta = (prior_beta + total_trials) return gamma.ppf(ucb_percentile, alpha, scale=(1 / beta))<|docstring|>Compute Bayesian update on Gamma dist with priors and compute upper percentile value.<|endoftext|>
f6195283be933eaaeea9bd9b417bc4159a7068086802f0d3cccde7340f7321ae
def act(self, previous_observation: np.ndarray) -> Any: 'Choose action with highest upper confidence bound for step in period.\n\n Also use previous observation to update upper confidence bound for previous step in period.\n ' current_idx = (self.step_number % self.period_length) prev_idx = ((self.step_number - 1) % self.period_length) if (self.previous_action is not None): self.conversion_counts[prev_idx][self.previous_action][0] += 1 self.conversion_counts[prev_idx][self.previous_action][1] += previous_observation[1] self.upper_confidence_bounds[prev_idx][self.previous_action] = self.compute_UCB_gamma(self.conversion_counts[prev_idx][self.previous_action][0], self.conversion_counts[prev_idx][self.previous_action][1]) best_action = max(self.upper_confidence_bounds[current_idx], key=(lambda k: self.upper_confidence_bounds[current_idx][k])) self.previous_action = best_action self.step_number += 1 agent_info = {'ucb_selected_action': self.upper_confidence_bounds[current_idx][best_action]} return (best_action, agent_info)
Choose action with highest upper confidence bound for step in period. Also use previous observation to update upper confidence bound for previous step in period.
pachinko/time_period_step_agent.py
act
datavaluepeople/pachinko
0
python
def act(self, previous_observation: np.ndarray) -> Any: 'Choose action with highest upper confidence bound for step in period.\n\n Also use previous observation to update upper confidence bound for previous step in period.\n ' current_idx = (self.step_number % self.period_length) prev_idx = ((self.step_number - 1) % self.period_length) if (self.previous_action is not None): self.conversion_counts[prev_idx][self.previous_action][0] += 1 self.conversion_counts[prev_idx][self.previous_action][1] += previous_observation[1] self.upper_confidence_bounds[prev_idx][self.previous_action] = self.compute_UCB_gamma(self.conversion_counts[prev_idx][self.previous_action][0], self.conversion_counts[prev_idx][self.previous_action][1]) best_action = max(self.upper_confidence_bounds[current_idx], key=(lambda k: self.upper_confidence_bounds[current_idx][k])) self.previous_action = best_action self.step_number += 1 agent_info = {'ucb_selected_action': self.upper_confidence_bounds[current_idx][best_action]} return (best_action, agent_info)
def act(self, previous_observation: np.ndarray) -> Any: 'Choose action with highest upper confidence bound for step in period.\n\n Also use previous observation to update upper confidence bound for previous step in period.\n ' current_idx = (self.step_number % self.period_length) prev_idx = ((self.step_number - 1) % self.period_length) if (self.previous_action is not None): self.conversion_counts[prev_idx][self.previous_action][0] += 1 self.conversion_counts[prev_idx][self.previous_action][1] += previous_observation[1] self.upper_confidence_bounds[prev_idx][self.previous_action] = self.compute_UCB_gamma(self.conversion_counts[prev_idx][self.previous_action][0], self.conversion_counts[prev_idx][self.previous_action][1]) best_action = max(self.upper_confidence_bounds[current_idx], key=(lambda k: self.upper_confidence_bounds[current_idx][k])) self.previous_action = best_action self.step_number += 1 agent_info = {'ucb_selected_action': self.upper_confidence_bounds[current_idx][best_action]} return (best_action, agent_info)<|docstring|>Choose action with highest upper confidence bound for step in period. Also use previous observation to update upper confidence bound for previous step in period.<|endoftext|>
9104b8302efa2a26074fc42ab8603967b2ca5c2bfa0cea1ad29c7f4d207d6e64
def set_disabled_input(self): ' Метод делающий поля ввода неактивными' self.ui.label_new_message.setText('Для выбора получателя дважды кликните на нем в окне контактов.') self.ui.text_message.clear() if self.history_model: self.history_model.clear() self.ui.btn_clear.setDisabled(True) self.ui.btn_send.setDisabled(True) self.ui.text_message.setDisabled(True) self.encryptor = None self.current_chat = None self.current_chat_key = None
Метод делающий поля ввода неактивными
Lib/site-packages/client/main_window.py
set_disabled_input
fochoao/cpython
0
python
def set_disabled_input(self): ' ' self.ui.label_new_message.setText('Для выбора получателя дважды кликните на нем в окне контактов.') self.ui.text_message.clear() if self.history_model: self.history_model.clear() self.ui.btn_clear.setDisabled(True) self.ui.btn_send.setDisabled(True) self.ui.text_message.setDisabled(True) self.encryptor = None self.current_chat = None self.current_chat_key = None
def set_disabled_input(self): ' ' self.ui.label_new_message.setText('Для выбора получателя дважды кликните на нем в окне контактов.') self.ui.text_message.clear() if self.history_model: self.history_model.clear() self.ui.btn_clear.setDisabled(True) self.ui.btn_send.setDisabled(True) self.ui.text_message.setDisabled(True) self.encryptor = None self.current_chat = None self.current_chat_key = None<|docstring|>Метод делающий поля ввода неактивными<|endoftext|>
cc248e647cad6d95084a14788c060e198e02a926abfd2c56a0491fa3a504c9bd
def history_list_update(self): '\n Метод заполняющий соответствующий QListView\n историей переписки с текущим собеседником.\n ' list = sorted(self.database.get_history(self.current_chat), key=(lambda item: item[3])) if (not self.history_model): self.history_model = QStandardItemModel() self.ui.list_messages.setModel(self.history_model) self.history_model.clear() length = len(list) start_index = 0 if (length > 20): start_index = (length - 20) for i in range(start_index, length): item = list[i] if (item[1] == 'in'): mess = QStandardItem(f'''Входящее от {item[3].replace(microsecond=0)}: {item[2]}''') mess.setEditable(False) mess.setBackground(QBrush(QColor(255, 213, 213))) mess.setTextAlignment(Qt.AlignLeft) self.history_model.appendRow(mess) else: mess = QStandardItem(f'''Исходящее от {item[3].replace(microsecond=0)}: {item[2]}''') mess.setEditable(False) mess.setTextAlignment(Qt.AlignRight) mess.setBackground(QBrush(QColor(204, 255, 204))) self.history_model.appendRow(mess) self.ui.list_messages.scrollToBottom()
Метод заполняющий соответствующий QListView историей переписки с текущим собеседником.
Lib/site-packages/client/main_window.py
history_list_update
fochoao/cpython
0
python
def history_list_update(self): '\n Метод заполняющий соответствующий QListView\n историей переписки с текущим собеседником.\n ' list = sorted(self.database.get_history(self.current_chat), key=(lambda item: item[3])) if (not self.history_model): self.history_model = QStandardItemModel() self.ui.list_messages.setModel(self.history_model) self.history_model.clear() length = len(list) start_index = 0 if (length > 20): start_index = (length - 20) for i in range(start_index, length): item = list[i] if (item[1] == 'in'): mess = QStandardItem(f'Входящее от {item[3].replace(microsecond=0)}: {item[2]}') mess.setEditable(False) mess.setBackground(QBrush(QColor(255, 213, 213))) mess.setTextAlignment(Qt.AlignLeft) self.history_model.appendRow(mess) else: mess = QStandardItem(f'Исходящее от {item[3].replace(microsecond=0)}: {item[2]}') mess.setEditable(False) mess.setTextAlignment(Qt.AlignRight) mess.setBackground(QBrush(QColor(204, 255, 204))) self.history_model.appendRow(mess) self.ui.list_messages.scrollToBottom()
def history_list_update(self): '\n Метод заполняющий соответствующий QListView\n историей переписки с текущим собеседником.\n ' list = sorted(self.database.get_history(self.current_chat), key=(lambda item: item[3])) if (not self.history_model): self.history_model = QStandardItemModel() self.ui.list_messages.setModel(self.history_model) self.history_model.clear() length = len(list) start_index = 0 if (length > 20): start_index = (length - 20) for i in range(start_index, length): item = list[i] if (item[1] == 'in'): mess = QStandardItem(f'Входящее от {item[3].replace(microsecond=0)}: {item[2]}') mess.setEditable(False) mess.setBackground(QBrush(QColor(255, 213, 213))) mess.setTextAlignment(Qt.AlignLeft) self.history_model.appendRow(mess) else: mess = QStandardItem(f'Исходящее от {item[3].replace(microsecond=0)}: {item[2]}') mess.setEditable(False) mess.setTextAlignment(Qt.AlignRight) mess.setBackground(QBrush(QColor(204, 255, 204))) self.history_model.appendRow(mess) self.ui.list_messages.scrollToBottom()<|docstring|>Метод заполняющий соответствующий QListView историей переписки с текущим собеседником.<|endoftext|>
024fc5b51f091775082bf1b99d42397e15bf4f826cfcaa6137d12630541997cf
def select_active_user(self): 'Метод обработчик события двойного клика по списку контактов.' self.current_chat = self.ui.list_contacts.currentIndex().data() self.set_active_user()
Метод обработчик события двойного клика по списку контактов.
Lib/site-packages/client/main_window.py
select_active_user
fochoao/cpython
0
python
def select_active_user(self): self.current_chat = self.ui.list_contacts.currentIndex().data() self.set_active_user()
def select_active_user(self): self.current_chat = self.ui.list_contacts.currentIndex().data() self.set_active_user()<|docstring|>Метод обработчик события двойного клика по списку контактов.<|endoftext|>
e59f8bdf602682e56a9a25f037602eec132bcf1fa9fbe9e2ed58598c1e934e4b
def set_active_user(self): 'Метод активации чата с собеседником.' try: self.current_chat_key = self.transport.key_request(self.current_chat) logger.debug(f'Загружен открытый ключ для {self.current_chat}') if self.current_chat_key: self.encryptor = PKCS1_OAEP.new(RSA.import_key(self.current_chat_key)) except (OSError, json.JSONDecodeError): self.current_chat_key = None self.encryptor = None logger.debug(f'Не удалось получить ключ для {self.current_chat}') if (not self.current_chat_key): self.messages.warning(self, 'Ошибка', 'Для выбранного пользователя нет ключа шифрования.') return self.ui.label_new_message.setText(f'Введите сообщенние для {self.current_chat}:') self.ui.btn_clear.setDisabled(False) self.ui.btn_send.setDisabled(False) self.ui.text_message.setDisabled(False) self.history_list_update()
Метод активации чата с собеседником.
Lib/site-packages/client/main_window.py
set_active_user
fochoao/cpython
0
python
def set_active_user(self): try: self.current_chat_key = self.transport.key_request(self.current_chat) logger.debug(f'Загружен открытый ключ для {self.current_chat}') if self.current_chat_key: self.encryptor = PKCS1_OAEP.new(RSA.import_key(self.current_chat_key)) except (OSError, json.JSONDecodeError): self.current_chat_key = None self.encryptor = None logger.debug(f'Не удалось получить ключ для {self.current_chat}') if (not self.current_chat_key): self.messages.warning(self, 'Ошибка', 'Для выбранного пользователя нет ключа шифрования.') return self.ui.label_new_message.setText(f'Введите сообщенние для {self.current_chat}:') self.ui.btn_clear.setDisabled(False) self.ui.btn_send.setDisabled(False) self.ui.text_message.setDisabled(False) self.history_list_update()
def set_active_user(self): try: self.current_chat_key = self.transport.key_request(self.current_chat) logger.debug(f'Загружен открытый ключ для {self.current_chat}') if self.current_chat_key: self.encryptor = PKCS1_OAEP.new(RSA.import_key(self.current_chat_key)) except (OSError, json.JSONDecodeError): self.current_chat_key = None self.encryptor = None logger.debug(f'Не удалось получить ключ для {self.current_chat}') if (not self.current_chat_key): self.messages.warning(self, 'Ошибка', 'Для выбранного пользователя нет ключа шифрования.') return self.ui.label_new_message.setText(f'Введите сообщенние для {self.current_chat}:') self.ui.btn_clear.setDisabled(False) self.ui.btn_send.setDisabled(False) self.ui.text_message.setDisabled(False) self.history_list_update()<|docstring|>Метод активации чата с собеседником.<|endoftext|>
45d12f4a6287a48eee06c18b47df2d7f17fdd05ab5325ae80301aa188c4b6019
def clients_list_update(self): 'Метод обновляющий список контактов.' contacts_list = self.database.get_contacts() self.contacts_model = QStandardItemModel() for i in sorted(contacts_list): item = QStandardItem(i) item.setEditable(False) self.contacts_model.appendRow(item) self.ui.list_contacts.setModel(self.contacts_model)
Метод обновляющий список контактов.
Lib/site-packages/client/main_window.py
clients_list_update
fochoao/cpython
0
python
def clients_list_update(self): contacts_list = self.database.get_contacts() self.contacts_model = QStandardItemModel() for i in sorted(contacts_list): item = QStandardItem(i) item.setEditable(False) self.contacts_model.appendRow(item) self.ui.list_contacts.setModel(self.contacts_model)
def clients_list_update(self): contacts_list = self.database.get_contacts() self.contacts_model = QStandardItemModel() for i in sorted(contacts_list): item = QStandardItem(i) item.setEditable(False) self.contacts_model.appendRow(item) self.ui.list_contacts.setModel(self.contacts_model)<|docstring|>Метод обновляющий список контактов.<|endoftext|>
25151433b1aaa9782beccab2a378dd7e790e15113ea9b31dda045614f8be2ba7
def add_contact_window(self): 'Метод создающий окно - диалог добавления контакта' global select_dialog select_dialog = AddContactDialog(self.transport, self.database) select_dialog.btn_ok.clicked.connect((lambda : self.add_contact_action(select_dialog))) select_dialog.show()
Метод создающий окно - диалог добавления контакта
Lib/site-packages/client/main_window.py
add_contact_window
fochoao/cpython
0
python
def add_contact_window(self): global select_dialog select_dialog = AddContactDialog(self.transport, self.database) select_dialog.btn_ok.clicked.connect((lambda : self.add_contact_action(select_dialog))) select_dialog.show()
def add_contact_window(self): global select_dialog select_dialog = AddContactDialog(self.transport, self.database) select_dialog.btn_ok.clicked.connect((lambda : self.add_contact_action(select_dialog))) select_dialog.show()<|docstring|>Метод создающий окно - диалог добавления контакта<|endoftext|>
659867b24b95064256dc12a4b3b8014478973e86ec98d44c088aed558705552a
def add_contact_action(self, item): 'Метод обработчк нажатия кнопки "Добавить"' new_contact = item.selector.currentText() self.add_contact(new_contact) item.close()
Метод обработчк нажатия кнопки "Добавить"
Lib/site-packages/client/main_window.py
add_contact_action
fochoao/cpython
0
python
def add_contact_action(self, item): new_contact = item.selector.currentText() self.add_contact(new_contact) item.close()
def add_contact_action(self, item): new_contact = item.selector.currentText() self.add_contact(new_contact) item.close()<|docstring|>Метод обработчк нажатия кнопки "Добавить"<|endoftext|>
d6abbc4dfc5b0888b57036ae18bb1db1e9087bbe5de987be236b99fab6c4f493
def add_contact(self, new_contact): '\n Метод добавляющий контакт в серверную и клиентсткую BD.\n После обновления баз данных обновляет и содержимое окна.\n ' try: self.transport.add_contact(new_contact) except ServerError as err: self.messages.critical(self, 'Ошибка сервера', err.text) except OSError as err: if err.errno: self.messages.critical(self, 'Ошибка', 'Потеряно соединение с сервером!') self.close() self.messages.critical(self, 'Ошибка', 'Таймаут соединения!') else: self.database.add_contact(new_contact) new_contact = QStandardItem(new_contact) new_contact.setEditable(False) self.contacts_model.appendRow(new_contact) logger.info(f'Успешно добавлен контакт {new_contact}') self.messages.information(self, 'Успех', 'Контакт успешно добавлен.')
Метод добавляющий контакт в серверную и клиентсткую BD. После обновления баз данных обновляет и содержимое окна.
Lib/site-packages/client/main_window.py
add_contact
fochoao/cpython
0
python
def add_contact(self, new_contact): '\n Метод добавляющий контакт в серверную и клиентсткую BD.\n После обновления баз данных обновляет и содержимое окна.\n ' try: self.transport.add_contact(new_contact) except ServerError as err: self.messages.critical(self, 'Ошибка сервера', err.text) except OSError as err: if err.errno: self.messages.critical(self, 'Ошибка', 'Потеряно соединение с сервером!') self.close() self.messages.critical(self, 'Ошибка', 'Таймаут соединения!') else: self.database.add_contact(new_contact) new_contact = QStandardItem(new_contact) new_contact.setEditable(False) self.contacts_model.appendRow(new_contact) logger.info(f'Успешно добавлен контакт {new_contact}') self.messages.information(self, 'Успех', 'Контакт успешно добавлен.')
def add_contact(self, new_contact): '\n Метод добавляющий контакт в серверную и клиентсткую BD.\n После обновления баз данных обновляет и содержимое окна.\n ' try: self.transport.add_contact(new_contact) except ServerError as err: self.messages.critical(self, 'Ошибка сервера', err.text) except OSError as err: if err.errno: self.messages.critical(self, 'Ошибка', 'Потеряно соединение с сервером!') self.close() self.messages.critical(self, 'Ошибка', 'Таймаут соединения!') else: self.database.add_contact(new_contact) new_contact = QStandardItem(new_contact) new_contact.setEditable(False) self.contacts_model.appendRow(new_contact) logger.info(f'Успешно добавлен контакт {new_contact}') self.messages.information(self, 'Успех', 'Контакт успешно добавлен.')<|docstring|>Метод добавляющий контакт в серверную и клиентсткую BD. После обновления баз данных обновляет и содержимое окна.<|endoftext|>