kevinconka commited on
Commit
84f01ec
·
1 Parent(s): c59668c

proper use of "add" and "compute" + "add_from_payload"

Browse files
Files changed (1) hide show
  1. det-metrics.py +84 -62
det-metrics.py CHANGED
@@ -13,15 +13,15 @@
13
  # limitations under the License.
14
  """TODO: Add a description here."""
15
 
16
- from typing import List, Tuple, Dict, Literal
17
 
18
  import evaluate
19
  import datasets
20
  import numpy as np
21
 
22
  from seametrics.detection import PrecisionRecallF1Support
23
- from seametrics.fo_utils.utils import _fo_dets_to_metrics_dict
24
- from seametrics.fo_utils.utils import _add_batch
25
 
26
  _CITATION = """\
27
  @InProceedings{coco:2020,
@@ -53,13 +53,16 @@ It is based on a modified version of the commonly used COCO-evaluation metrics.
53
  _KWARGS_DESCRIPTION = """
54
  Calculates object detection metrics given predicted and ground truth bounding boxes for a single image.
55
  Args:
56
- predictions: list of predictions to score. Each prediction should
57
- be a list containing the four co-ordinates that specify the bounding box.
58
- Co-ordinate format is as defined when instantiating the metric
59
- (parameter: bbox_type, defaults to xywh).
60
- references: list of reference for each prediction. Each prediction should
61
- be a list containing the four co-ordinates that specify the bounding box.
62
- Bounding box format should be the same as for the predictions.
 
 
 
63
  Returns:
64
  dict containing dicts for each specified area range with following items:
65
  'range': specified area with [max_px_area, max_px_area]
@@ -84,11 +87,11 @@ Returns:
84
  'nImgs': number of images considered in evaluation
85
  Examples:
86
  >>> import evaluate
87
- >>> from seametrics.fo_to_payload.utils import fo_to_payload
88
- >>> payload = fo_to_payload(..., models=model_list)
89
- >>> for model in payload["models"]:
90
- >>> module = evaluate.load("./detection_metric.py", iou_thresholds=0.9)
91
- >>> module.add_batch(payload)
92
  >>> result = module.compute()
93
  >>> print(result)
94
  {'all': {
@@ -113,27 +116,29 @@ Examples:
113
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
114
  class DetectionMetric(evaluate.Metric):
115
  def __init__(
116
- self,
117
- area_ranges_tuples: List[Tuple[str, List[int]]] = [("all", [0, 1e5 ** 2])],
118
- iou_threshold: float = 1e-10,
119
- class_agnostic: bool = True,
120
- bbox_format: str = "xywh",
121
- iou_type: Literal["bbox", "segm"] = "bbox",
122
- **kwargs
123
- ):
124
  super().__init__(**kwargs)
125
  area_ranges = [v for _, v in area_ranges_tuples]
126
  area_ranges_labels = [k for k, _ in area_ranges_tuples]
 
 
 
127
 
128
- metric_params = dict(
129
- iou_thresholds=[iou_threshold],
130
  area_ranges=area_ranges,
131
  area_ranges_labels=area_ranges_labels,
132
  class_agnostic=class_agnostic,
133
  iou_type=iou_type,
134
- box_format=bbox_format
135
  )
136
- self.coco_metric = PrecisionRecallF1Support(**metric_params)
137
 
138
  def _info(self):
139
  return evaluate.MetricInfo(
@@ -145,46 +150,63 @@ class DetectionMetric(evaluate.Metric):
145
  # This defines the format of each prediction and reference
146
  features=datasets.Features(
147
  {
148
- 'predictions': datasets.Sequence(feature=datasets.Sequence(datasets.Value("float"))),
149
- 'references': datasets.Sequence(feature=datasets.Sequence(datasets.Value("float"))),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  }
151
  ),
152
  # Additional links to the codebase or references
153
- codebase_urls=["https://github.com/SEA-AI/metrics/tree/main",
154
- "https://github.com/cocodataset/cocoapi/tree/master"]
 
 
155
  )
156
-
157
- def add_batch(
158
- self,
159
- data: dict,
160
- model: str = None
161
- ):
162
- """Add predictions and ground truths of a single image to update the metric.
163
-
164
- Args:
165
- data (dict): containing standard payload of data that should be evaluated
166
- format should be as returned by function `fo_to_payload()` in seametrics library
167
- model (str): should be one out of values given in data["models"]
168
- if not defined, defaults to data["models"][0], as only one model can be evaluated a time.
169
- """
170
- # populate two empty lists in format suitable for hugging face metric
171
- # nothing is computed based on them but prevents huggingface error
172
-
173
-
174
- self, predictions,references = _add_batch(self, data, model)
175
-
176
- # prevents hugging face error, doesn't do a lot
177
- super(evaluate.Metric, self).add_batch(
178
- predictions=predictions,
179
- references=references
180
  )
181
 
 
 
 
 
 
 
 
 
182
 
183
- def _compute(
184
- self,
185
- predictions,
186
- references
187
- ):
188
  """Returns the scores"""
189
- result = self.coco_metric.compute()["metrics"]
190
- return result
 
 
 
 
 
 
13
  # limitations under the License.
14
  """TODO: Add a description here."""
15
 
16
+ from typing import List, Tuple, Literal
17
 
18
  import evaluate
19
  import datasets
20
  import numpy as np
21
 
22
  from seametrics.detection import PrecisionRecallF1Support
23
+ from seametrics.detection.utils import payload_to_det_metric
24
+ from seametrics.payload import Payload
25
 
26
  _CITATION = """\
27
  @InProceedings{coco:2020,
 
53
  _KWARGS_DESCRIPTION = """
54
  Calculates object detection metrics given predicted and ground truth bounding boxes for a single image.
55
  Args:
56
+ predictions: list of predictions for each image. Each prediction should
57
+ be a dict containing the following
58
+ - 'boxes': list of bounding boxes, xywh in absolute pixel values
59
+ - 'labels': list of labels for each bounding box
60
+ - 'scores': list of scores for each bounding box
61
+ references: list of ground truth annotations for each image. Each reference should
62
+ be a dict containing the following
63
+ - 'boxes': list of bounding boxes, xywh in absolute pixel values
64
+ - 'labels': list of labels for each bounding box
65
+ - 'area': list of areas for each bounding box
66
  Returns:
67
  dict containing dicts for each specified area range with following items:
68
  'range': specified area with [max_px_area, max_px_area]
 
87
  'nImgs': number of images considered in evaluation
88
  Examples:
89
  >>> import evaluate
90
+ >>> from seametrics.payload import PayloadProcessor
91
+ >>> payload = PayloadProcessor(...).payload
92
+ >>> for model in payload.models:
93
+ >>> module = evaluate.load("SEA-AI/det-metrics", ...)
94
+ >>> module.add_from_payload(payload)
95
  >>> result = module.compute()
96
  >>> print(result)
97
  {'all': {
 
116
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
117
  class DetectionMetric(evaluate.Metric):
118
  def __init__(
119
+ self,
120
+ area_ranges_tuples: List[Tuple[str, List[int]]] = [("all", [0, 1e5**2])],
121
+ iou_threshold: List[float] = [1e-10],
122
+ class_agnostic: bool = True,
123
+ bbox_format: str = "xywh",
124
+ iou_type: Literal["bbox", "segm"] = "bbox",
125
+ **kwargs
126
+ ):
127
  super().__init__(**kwargs)
128
  area_ranges = [v for _, v in area_ranges_tuples]
129
  area_ranges_labels = [k for k, _ in area_ranges_tuples]
130
+ iou_threshold = (
131
+ [iou_threshold] if not isinstance(iou_threshold, list) else iou_threshold
132
+ )
133
 
134
+ self.coco_metric = PrecisionRecallF1Support(
135
+ iou_thresholds=iou_threshold,
136
  area_ranges=area_ranges,
137
  area_ranges_labels=area_ranges_labels,
138
  class_agnostic=class_agnostic,
139
  iou_type=iou_type,
140
+ box_format=bbox_format,
141
  )
 
142
 
143
  def _info(self):
144
  return evaluate.MetricInfo(
 
150
  # This defines the format of each prediction and reference
151
  features=datasets.Features(
152
  {
153
+ "predictions": [
154
+ datasets.Features(
155
+ {
156
+ "boxes": datasets.Sequence(
157
+ datasets.Sequence(datasets.Value("float"))
158
+ ),
159
+ "labels": datasets.Sequence(datasets.Value("int64")),
160
+ "scores": datasets.Sequence(datasets.Value("float")),
161
+ }
162
+ )
163
+ ],
164
+ "references": [
165
+ datasets.Features(
166
+ {
167
+ "boxes": datasets.Sequence(
168
+ datasets.Sequence(datasets.Value("float"))
169
+ ),
170
+ "labels": datasets.Sequence(datasets.Value("int64")),
171
+ "area": datasets.Sequence(datasets.Value("float")),
172
+ }
173
+ )
174
+ ],
175
  }
176
  ),
177
  # Additional links to the codebase or references
178
+ codebase_urls=[
179
+ "https://github.com/SEA-AI/seametrics/tree/main",
180
+ "https://lightning.ai/docs/torchmetrics/stable/detection/mean_average_precision.html",
181
+ ],
182
  )
183
+
184
+ def add(self, *, prediction, reference, **kwargs):
185
+ """Adds a batch of predictions and references to the metric"""
186
+ self.coco_metric.update(prediction, reference)
187
+
188
+ # does not impact the metric, but is required for the interface x_x
189
+ super(evaluate.Metric, self).add(
190
+ prediction=[self._np_to_lists(p) for p in prediction],
191
+ references=[self._np_to_lists(r) for r in reference],
192
+ **kwargs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  )
194
 
195
+ def _np_to_lists(self, d):
196
+ """datasets does not support numpy arrays for type checking"""
197
+ for k, v in d.items():
198
+ if isinstance(v, dict):
199
+ self._np_to_lists(v)
200
+ elif isinstance(v, np.ndarray):
201
+ d[k] = v.tolist()
202
+ return d
203
 
204
+ def _compute(self, *, predictions, references, **kwargs):
 
 
 
 
205
  """Returns the scores"""
206
+ return self.coco_metric.compute()["metrics"]
207
+
208
+ def add_from_payload(self, payload: Payload, model_name: str = None):
209
+ """Converts the payload to the format expected by the metric"""
210
+ predictions, references = payload_to_det_metric(payload, model_name)
211
+ self.add(prediction=predictions, reference=references)
212
+ return self