franzi2505 commited on
Commit
ccb23fb
·
1 Parent(s): 3359d6e
Files changed (2) hide show
  1. README.md +24 -9
  2. det-metrics.py +32 -14
README.md CHANGED
@@ -22,15 +22,30 @@ This metric can be used to calculate object detection metrics. It has an option
22
  ```
23
  >>> import evaluate
24
  >>> from seametrics.fo_to_payload.utils import fo_to_payload
25
- >>> payload = fo_to_payload(
26
- dataset=dataset,
27
- gt_field=gt_field,
28
- models=model_list
29
- )
30
- >>> for model in payload["models"]:
31
- >>> module = evaluate.load("./detection_metric.py", iou_thresholds=0.9)
32
- >>> module.add_batch(payload, model=model)
33
- >>> result = module.compute()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  ```
35
 
36
  ### Metric Settings
 
22
  ```
23
  >>> import evaluate
24
  >>> from seametrics.fo_to_payload.utils import fo_to_payload
25
+ >>> b = fo_to_payload(
26
+ >>> dataset="SAILING_DATASET_QA",
27
+ >>> gt_field="ground_truth_det",
28
+ >>> models=["yolov5n6_RGB_D2304-v1_9C"],
29
+ >>> sequence_list=["Trip_14_Seq_1"],
30
+ >>> data_type="rgb"
31
+ >>> )
32
+ >>> module = evaluate.load("SEA-AI/det-metrics.py")
33
+ >>> module.add_batch(b)
34
+ >>> res = module.compute()
35
+ >>> print(res)
36
+ {'all': {'range': [0, 10000000000.0],
37
+ 'iouThr': '0.00',
38
+ 'maxDets': 100,
39
+ 'tp': 89,
40
+ 'fp': 13,
41
+ 'fn': 15,
42
+ 'duplicates': 1,
43
+ 'precision': 0.8725490196078431,
44
+ 'recall': 0.8557692307692307,
45
+ 'f1': 0.8640776699029126,
46
+ 'support': 104,
47
+ 'fpi': 0,
48
+ 'nImgs': 22}}
49
  ```
50
 
51
  ### Metric Settings
det-metrics.py CHANGED
@@ -177,9 +177,19 @@ class DetectionMetric(evaluate.Metric):
177
  gt_normalized = seq_data[data["gt_field_name"]] # shape: (n_frames, m_gts)
178
  pred_normalized = seq_data[model] # shape: (n_frames, l_preds)
179
  img_res = seq_data["resolution"] # (h, w)
180
- for gt_frame, pred_frame in zip(gt_normalized, pred_normalized): # iterate over all frames
181
- processed_pred = self._fo_dets_to_metrics_dict(pred_frame, w=img_res[1], h=img_res[0])
182
- processed_gt = self._fo_dets_to_metrics_dict(gt_frame, w=img_res[1], h=img_res[0])
 
 
 
 
 
 
 
 
 
 
183
  predictions.append(processed_pred[0]["boxes"].tolist())
184
  references.append(processed_gt[0]["boxes"].tolist())
185
 
@@ -204,7 +214,8 @@ class DetectionMetric(evaluate.Metric):
204
  @staticmethod
205
  def _fo_dets_to_metrics_dict(fo_dets: list,
206
  w: int,
207
- h: int) -> List[Dict[str, np.ndarray]]:
 
208
  """Convert list of fiftyone detections to format that is
209
  required by PrecisionRecallF1Support() function of seametrics library
210
 
@@ -236,14 +247,21 @@ class DetectionMetric(evaluate.Metric):
236
  detections.append(
237
  [bbox[0]*w, bbox[1]*h, bbox[2]*w, bbox[3]*h]
238
  )
239
- scores.append(det["confidence"]) # None for gt
240
- #labels.append(bbox["label"])
241
  labels.append(1)
242
-
243
- return [
244
- dict(
245
- boxes=np.array(detections),
246
- scores=np.array(scores),
247
- labels=np.array(labels)
248
- )
249
- ]
 
 
 
 
 
 
 
 
 
177
  gt_normalized = seq_data[data["gt_field_name"]] # shape: (n_frames, m_gts)
178
  pred_normalized = seq_data[model] # shape: (n_frames, l_preds)
179
  img_res = seq_data["resolution"] # (h, w)
180
+ for gt_frame, pred_frame in zip(gt_normalized, pred_normalized): # iterate over all frame
181
+ processed_pred = self._fo_dets_to_metrics_dict(
182
+ fo_dets=pred_frame,
183
+ w=img_res[1],
184
+ h=img_res[0],
185
+ include_scores=True
186
+ )
187
+ processed_gt = self._fo_dets_to_metrics_dict(
188
+ fo_dets=gt_frame,
189
+ w=img_res[1],
190
+ h=img_res[0],
191
+ include_scores=False
192
+ )
193
  predictions.append(processed_pred[0]["boxes"].tolist())
194
  references.append(processed_gt[0]["boxes"].tolist())
195
 
 
214
  @staticmethod
215
  def _fo_dets_to_metrics_dict(fo_dets: list,
216
  w: int,
217
+ h: int,
218
+ include_scores: bool = False) -> List[Dict[str, np.ndarray]]:
219
  """Convert list of fiftyone detections to format that is
220
  required by PrecisionRecallF1Support() function of seametrics library
221
 
 
247
  detections.append(
248
  [bbox[0]*w, bbox[1]*h, bbox[2]*w, bbox[3]*h]
249
  )
250
+ scores.append(det["confidence"] if det["confidence"] is not None else 1.0) # None for gt
 
251
  labels.append(1)
252
+
253
+ if include_scores:
254
+ return [
255
+ dict(
256
+ boxes=np.array(detections),
257
+ scores=np.array(scores),
258
+ labels=np.array(labels)
259
+ )
260
+ ]
261
+ else:
262
+ return [
263
+ dict(
264
+ boxes=np.array(detections),
265
+ labels=np.array(labels)
266
+ )
267
+ ]