bascobasculino commited on
Commit
4f61238
·
1 Parent(s): 3b2900b
Files changed (2) hide show
  1. my_metricv2.py +64 -29
  2. tests.py +34 -14
my_metricv2.py CHANGED
@@ -29,8 +29,10 @@ year={2020}
29
 
30
  # TODO: Add description of the module here
31
  _DESCRIPTION = """\
32
- This new module is designed to solve this great ML task and is crafted with a lot of care.
33
- """
 
 
34
 
35
 
36
  # TODO: Add description of the arguments of the module here
@@ -42,35 +44,70 @@ Args:
42
  should be a string with tokens separated by spaces.
43
  references: list of reference for each prediction. Each
44
  reference should be a string with tokens separated by spaces.
 
 
 
45
  Returns:
46
- accuracy: description of the first score,
47
- another_score: description of the second score,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  Examples:
49
  >>> import numpy as np
50
- >>> mean_iou = evaluate.load("mean_iou")
51
-
52
- >>> # suppose one has 3 different segmentation maps predicted
53
- >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
54
- >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
55
-
56
- >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
57
- >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
58
-
59
- >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
60
- >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
61
-
62
- >>> predicted = [predicted_1, predicted_2, predicted_3]
63
- >>> ground_truth = [actual_1, actual_2, actual_3]
64
-
65
- >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
66
- >>> print(results) # doctest: +NORMALIZE_WHITESPACE
67
- {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
68
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  """
70
 
71
- # TODO: Define external resources urls if needed
72
- BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
73
-
74
 
75
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
76
  class MyMetricv2(evaluate.Metric):
@@ -145,6 +182,4 @@ def calculate(predictions, references, max_iou: float = 0.5):
145
 
146
  mh = mm.metrics.create()
147
  summary = mh.compute(acc)
148
-
149
-
150
- return summary
 
29
 
30
  # TODO: Add description of the module here
31
  _DESCRIPTION = """\
32
+ The MOT Metrics module is designed to evaluate multi-object tracking (MOT)
33
+ algorithms by computing various metrics based on predicted and ground truth bounding
34
+ boxes. It serves as a crucial tool in assessing the performance of MOT systems,
35
+ aiding in the iterative improvement of tracking algorithms."""
36
 
37
 
38
  # TODO: Add description of the arguments of the module here
 
44
  should be a string with tokens separated by spaces.
45
  references: list of reference for each prediction. Each
46
  reference should be a string with tokens separated by spaces.
47
+ max_iou (`float`, *optional*):
48
+ If specified, this is the minimum Intersection over Union (IoU) threshold to consider a detection as a true positive.
49
+ Default is 0.5.
50
  Returns:
51
+ summary: pandas.DataFrame with the following columns:
52
+ - idf1 (IDF1 Score): The F1 score for the identity assignment, computed as 2 * (IDP * IDR) / (IDP + IDR).
53
+ - idp (ID Precision): Identity Precision, representing the ratio of correctly assigned identities to the total number of predicted identities.
54
+ - idr (ID Recall): Identity Recall, representing the ratio of correctly assigned identities to the total number of ground truth identities.
55
+ - recall: Recall, computed as the ratio of the number of correctly tracked objects to the total number of ground truth objects.
56
+ - precision: Precision, computed as the ratio of the number of correctly tracked objects to the total number of predicted objects.
57
+ - num_unique_objects: Total number of unique objects in the ground truth.
58
+ - mostly_tracked: Number of objects that are mostly tracked throughout the sequence.
59
+ - partially_tracked: Number of objects that are partially tracked but not mostly tracked.
60
+ - mostly_lost: Number of objects that are mostly lost throughout the sequence.
61
+ - num_false_positives: Number of false positive detections (predicted objects not present in the ground truth).
62
+ - num_misses: Number of missed detections (ground truth objects not detected in the predictions).
63
+ - num_switches: Number of identity switches.
64
+ - num_fragmentations: Number of fragmented objects (objects that are broken into multiple tracks).
65
+ - mota (MOTA - Multiple Object Tracking Accuracy): Overall tracking accuracy, computed as 1 - ((num_false_positives + num_misses + num_switches) / num_unique_objects).
66
+ - motp (MOTP - Multiple Object Tracking Precision): Average precision of the object localization, computed as the mean of the localization errors of correctly detected objects.
67
+ - num_transfer: Number of track transfers.
68
+ - num_ascend: Number of ascended track IDs.
69
+ - num_migrate: Number of track ID migrations.
70
+
71
  Examples:
72
  >>> import numpy as np
73
+ >>> module = evaluate.load("bascobasculino/my_metricv2")
74
+
75
+ >>> predicted =[
76
+ [1,1,10,20,30,40,0.85],
77
+ [1,2,50,60,70,80,0.92],
78
+ [1,3,80,90,100,110,0.75],
79
+ [2,1,15,25,35,45,0.78],
80
+ [2,2,55,65,75,85,0.95],
81
+ [3,1,20,30,40,50,0.88],
82
+ [3,2,60,70,80,90,0.82],
83
+ [4,1,25,35,45,55,0.91],
84
+ [4,2,65,75,85,95,0.89]
85
+ ]
86
+
87
+ >>> ground_truth = [
88
+ [1, 1, 10, 20, 30, 40],
89
+ [1, 2, 50, 60, 70, 80],
90
+ [1, 3, 85, 95, 105, 115],
91
+ [2, 1, 15, 25, 35, 45],
92
+ [2, 2, 55, 65, 75, 85],
93
+ [3, 1, 20, 30, 40, 50],
94
+ [3, 2, 60, 70, 80, 90],
95
+ [4, 1, 25, 35, 45, 55],
96
+ [5, 1, 30, 40, 50, 60],
97
+ [5, 2, 70, 80, 90, 100]
98
+ ]
99
+ >>> predicted = [np.array(a) for a in predicted]
100
+ >>> ground_truth = [np.array(a) for a in ground_truth]
101
+
102
+ >>> results = module._compute(predictions=predicted, references=ground_truth, max_iou=0.5)
103
+ >>> print(results)
104
+ {'idf1': {0: 0.8421052631578947}, 'idp': {0: 0.8888888888888888}, 'idr': {0: 0.8}, 'recall': {0: 0.8},
105
+ 'precision': {0: 0.8888888888888888}, 'num_unique_objects': {0: 3}, 'mostly_tracked': {0: 2},
106
+ 'partially_tracked': {0: 1}, 'mostly_lost': {0: 0}, 'num_false_positives': {0: 1}, 'num_misses': {0: 2},
107
+ 'num_switches': {0: 0}, 'num_fragmentations': {0: 0}, 'mota': {0: 0.7}, 'motp': {0: 0.02981870229007634},
108
+ 'num_transfer': {0: 0}, 'num_ascend': {0: 0}, 'num_migrate': {0: 0}}
109
  """
110
 
 
 
 
111
 
112
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
113
  class MyMetricv2(evaluate.Metric):
 
182
 
183
  mh = mm.metrics.create()
184
  summary = mh.compute(acc)
185
+ return summary.to_dict()
 
 
tests.py CHANGED
@@ -1,17 +1,37 @@
 
1
  test_cases = [
2
  {
3
- "predictions": [0, 0],
4
- "references": [1, 1],
5
- "result": {"metric_score": 0}
6
- },
7
- {
8
- "predictions": [1, 1],
9
- "references": [1, 1],
10
- "result": {"metric_score": 1}
11
- },
12
- {
13
- "predictions": [1, 0],
14
- "references": [1, 1],
15
- "result": {"metric_score": 0.5}
16
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  ]
 
1
+ import numpy as np
2
  test_cases = [
3
  {
4
+ "predictions": [np.array(a) for a in [
5
+ [1,1,10,20,30,40,0.85],
6
+ [1,2,50,60,70,80,0.92],
7
+ [1,3,80,90,100,110,0.75],
8
+ [2,1,15,25,35,45,0.78],
9
+ [2,2,55,65,75,85,0.95],
10
+ [3,1,20,30,40,50,0.88],
11
+ [3,2,60,70,80,90,0.82],
12
+ [4,1,25,35,45,55,0.91],
13
+ [4,2,65,75,85,95,0.89]
14
+ ]],
15
+ "references": [np.array(a) for a in [
16
+ [1, 1, 10, 20, 30, 40],
17
+ [1, 2, 50, 60, 70, 80],
18
+ [1, 3, 85, 95, 105, 115],
19
+ [2, 1, 15, 25, 35, 45],
20
+ [2, 2, 55, 65, 75, 85],
21
+ [3, 1, 20, 30, 40, 50],
22
+ [3, 2, 60, 70, 80, 90],
23
+ [4, 1, 25, 35, 45, 55],
24
+ [5, 1, 30, 40, 50, 60],
25
+ [5, 2, 70, 80, 90, 100]
26
+ ]],
27
+ "result": {'idf1': {0: 0.8421052631578947}, 'idp': {0: 0.8888888888888888},
28
+ 'idr': {0: 0.8}, 'recall': {0: 0.8}, 'precision': {0: 0.8888888888888888},
29
+ 'num_unique_objects': {0: 3}, 'mostly_tracked': {0: 2},
30
+ 'partially_tracked': {0: 1}, 'mostly_lost': {0: 0},
31
+ 'num_false_positives': {0: 1}, 'num_misses': {0: 2},
32
+ 'num_switches': {0: 0}, 'num_fragmentations': {0: 0},
33
+ 'mota': {0: 0.7}, 'motp': {0: 0.02981870229007634},
34
+ 'num_transfer': {0: 0}, 'num_ascend': {0: 0},
35
+ 'num_migrate': {0: 0}}
36
+ },
37
  ]