hichem-abdellali commited on
Commit
2931c23
1 Parent(s): ada2155

Update det-metrics.py

Browse files

moved the _add_batch and _fo_dets_to_metrics_dict to the seametrics utils

Files changed (1) hide show
  1. det-metrics.py +4 -81
det-metrics.py CHANGED
@@ -20,6 +20,8 @@ import datasets
20
  import numpy as np
21
 
22
  from seametrics.detection import PrecisionRecallF1Support
 
 
23
 
24
  _CITATION = """\
25
  @InProceedings{coco:2020,
@@ -167,34 +169,9 @@ class DetectionMetric(evaluate.Metric):
167
  """
168
  # populate two empty lists in format suitable for hugging face metric
169
  # nothing is computed based on them but prevents huggingface error
170
- predictions, references = [], []
171
 
172
- if model is None:
173
- model = data["models"][0]
174
 
175
- for sequence in data["sequence_list"]:
176
- seq_data = data["sequences"][sequence]
177
- gt_normalized = seq_data[data["gt_field_name"]] # shape: (n_frames, m_gts)
178
- pred_normalized = seq_data[model] # shape: (n_frames, l_preds)
179
- img_res = seq_data["resolution"] # (h, w)
180
- for gt_frame, pred_frame in zip(gt_normalized, pred_normalized): # iterate over all frame
181
- processed_pred = self._fo_dets_to_metrics_dict(
182
- fo_dets=pred_frame,
183
- w=img_res[1],
184
- h=img_res[0],
185
- include_scores=True
186
- )
187
- processed_gt = self._fo_dets_to_metrics_dict(
188
- fo_dets=gt_frame,
189
- w=img_res[1],
190
- h=img_res[0],
191
- include_scores=False
192
- )
193
- predictions.append(processed_pred[0]["boxes"].tolist())
194
- references.append(processed_gt[0]["boxes"].tolist())
195
-
196
- # where the magic happens: update metric with data from current frame
197
- self.coco_metric.update(processed_pred, processed_gt)
198
 
199
  # prevents hugging face error, doesn't do a lot
200
  super(evaluate.Metric, self).add_batch(
@@ -202,6 +179,7 @@ class DetectionMetric(evaluate.Metric):
202
  references=references
203
  )
204
 
 
205
  def _compute(
206
  self,
207
  predictions,
@@ -210,58 +188,3 @@ class DetectionMetric(evaluate.Metric):
210
  """Returns the scores"""
211
  result = self.coco_metric.compute()["metrics"]
212
  return result
213
-
214
- @staticmethod
215
- def _fo_dets_to_metrics_dict(fo_dets: list,
216
- w: int,
217
- h: int,
218
- include_scores: bool = False) -> List[Dict[str, np.ndarray]]:
219
- """Convert list of fiftyone detections to format that is
220
- required by PrecisionRecallF1Support() function of seametrics library
221
-
222
- Args:
223
- fo_dets (list): list containing fiftyone detections (or empty if frame without any detections)
224
- note: bounding boxes in fo-detections are in format xywhn
225
- w (int): width in pixel of image
226
- h (int): height in pixel of image
227
-
228
- Returns:
229
- List[Dict[str, np.ndarray]]: list holding single dict with items:
230
- "boxes": denormalized bounding boxes of whole frame in numpy array (shape: n_bboxes, 4)
231
- "scores": confidence scores in numpy array (shape: n_bboxes)
232
- "labels": labels in numpy array (shape: n_bboxes)
233
- """
234
- detections = []
235
- scores = []
236
- labels = [] #TODO: map to numbers
237
- if len(fo_dets) == 0:
238
- return [
239
- dict(
240
- boxes=np.array([]),
241
- scores=np.array([]),
242
- labels=np.array([])
243
- )
244
- ]
245
- for det in fo_dets:
246
- bbox = det["bounding_box"]
247
- detections.append(
248
- [bbox[0]*w, bbox[1]*h, bbox[2]*w, bbox[3]*h]
249
- )
250
- scores.append(det["confidence"] if det["confidence"] is not None else 1.0) # None for gt
251
- labels.append(1)
252
-
253
- if include_scores:
254
- return [
255
- dict(
256
- boxes=np.array(detections),
257
- scores=np.array(scores),
258
- labels=np.array(labels)
259
- )
260
- ]
261
- else:
262
- return [
263
- dict(
264
- boxes=np.array(detections),
265
- labels=np.array(labels)
266
- )
267
- ]
 
20
  import numpy as np
21
 
22
  from seametrics.detection import PrecisionRecallF1Support
23
+ from seametrics.fo_utils.utils import _fo_dets_to_metrics_dict
24
+ from seametrics.fo_utils.utils import _add_batch
25
 
26
  _CITATION = """\
27
  @InProceedings{coco:2020,
 
169
  """
170
  # populate two empty lists in format suitable for hugging face metric
171
  # nothing is computed based on them but prevents huggingface error
 
172
 
 
 
173
 
174
+ self, predictions,references = _add_batch(self, data, model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
 
176
  # prevents hugging face error, doesn't do a lot
177
  super(evaluate.Metric, self).add_batch(
 
179
  references=references
180
  )
181
 
182
+
183
  def _compute(
184
  self,
185
  predictions,
 
188
  """Returns the scores"""
189
  result = self.coco_metric.compute()["metrics"]
190
  return result