Spaces:
Running
Running
File size: 9,284 Bytes
930daa4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 |
import evaluate
import datasets
import motmetrics as mm
import numpy as np
from seametrics.payload import Payload
import torch
from utils import bbox_iou, bbox_bep
import datasets
# _DESCRIPTION = """\
# The box-metrics package provides a set of metrics to evaluate
# the performance of object detection algorithms in ther of sizing and positioning
# of the bounding boxes."""
# _KWARGS_DESCRIPTION = """
# Calculates how good are predictions given some references, using certain scores
# Args:
# predictions: list of predictions to score. Each predictions
# should be a string with tokens separated by spaces.
# references: list of reference for each prediction. Each
# reference should be a string with tokens separated by spaces.
# max_iou (`float`, *optional*):
# If specified, this is the minimum Intersection over Union (IoU) threshold to consider a detection as a true positive.
# Default is 0.5.
# """
# _CITATION = """\
# @InProceedings{huggingface:module,
# title = {A great new module},
# authors={huggingface, Inc.},
# year={2020}
# }\
# @article{milan2016mot16,
# title={Are object detection assessment criteria ready for maritime computer vision?},
# author={Dilip K. Prasad1, Deepu Rajan and Chai Quek},
# journal={arXiv:1809.04659v1},
# year={2018}
# }
# """
_CITATION = """\
@InProceedings{huggingface:module,
title = {A great new module},
authors={huggingface, Inc.},
year={2020}
}\
@article{milan2016mot16,
title={MOT16: A benchmark for multi-object tracking},
author={Milan, Anton and Leal-Taix{\'e}, Laura and Reid, Ian and Roth, Stefan and Schindler, Konrad},
journal={arXiv preprint arXiv:1603.00831},
year={2016}
}
"""
_DESCRIPTION = """\
The MOT Metrics module is designed to evaluate multi-object tracking (MOT)
algorithms by computing various metrics based on predicted and ground truth bounding
boxes. It serves as a crucial tool in assessing the performance of MOT systems,
aiding in the iterative improvement of tracking algorithms."""
_KWARGS_DESCRIPTION = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of predictions to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
max_iou (`float`, *optional*):
If specified, this is the minimum Intersection over Union (IoU) threshold to consider a detection as a true positive.
Default is 0.5.
"""
# @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class BoxMetrics(evaluate.Metric):
def __init__(self, max_iou: float = 0.01, **kwargs):
# super().__init__(**kwargs)
self.max_iou = max_iou
self.boxes = {}
self.gt_field = "ground_truth_det"
def _info(self):
# TODO: Specifies the evaluate.EvaluationModuleInfo object
return evaluate.MetricInfo(
# This is the description that will appear on the modules page.
module_type="metric",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
# This defines the format of each prediction and reference
features=datasets.Features({
"predictions": datasets.Sequence(
datasets.Sequence(datasets.Value("float"))
),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("float"))
)
}),
# Additional links to the codebase or references
codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
reference_urls=["http://path.to.reference.url/new_module"]
)
def add_payload(self, payload: Payload):
"""Convert a payload to the format of the tracking metrics library"""
self.add(payload)
def add(self, payload: Payload):
self.gt_field = payload.gt_field_name
for sequence in payload.sequences:
self.boxes[sequence] = {}
target = payload.sequences[sequence][self.gt_field]
resolution = payload.sequences[sequence]["resolution"]
target_tm = self.payload_labels_to_tm(target, resolution)
self.boxes[sequence][self.gt_field] = target_tm
for model in payload.models:
preds = payload.sequences[sequence][model]
preds_tm = self.payload_preds_to_rm(preds, resolution)
self.boxes[sequence][model] = preds_tm
def compute(self):
"""Compute the metric value"""
output = {}
for sequence in self.boxes:
ious = []
beps = []
bottom_x = []
bottom_y = []
widths = []
heights = []
output[sequence] = {}
target = self.boxes[sequence][self.gt_field]
for model in self.boxes[sequence]:
preds = self.boxes[sequence][model]
for i in range(len(preds)):
target_tm_bbs = target[i][:, 1:]
pred_tm_bbs = preds[i][:, :4]
if target_tm_bbs.shape[0] == 0 or pred_tm_bbs.shape[0] == 0:
continue
for t_box in target_tm_bbs:
iou = bbox_iou(t_box.unsqueeze(0), pred_tm_bbs, xywh=False)
bep = bbox_bep(t_box.unsqueeze(0), pred_tm_bbs, xywh=False)
matches = pred_tm_bbs[iou.squeeze(1) > self.max_iou]
bep = bep[iou>self.max_iou]
iou = iou[iou>self.max_iou]
if torch.any(iou <= 0):
raise ValueError("IoU should be greater than 0, pls contact code maintainer")
if torch.any(bep <= 0):
raise ValueError("BEP should be greater than 0, pls contact code maintainer")
ious.extend(iou.tolist())
beps.extend(bep.tolist())
for match in matches:
t_xc = (match[0].item()+match[2].item())/2
p_xc = (t_box[0].item()+t_box[2].item())/2
t_w = t_box[2].item()-t_box[0].item()
p_w = match[2].item()-match[0].item()
t_h = t_box[3].item()-t_box[1].item()
p_h = match[3].item()-match[1].item()
bottom_x.append(abs(t_xc-p_xc))
widths.append(abs(t_w-p_w))
bottom_y.append(abs(t_box[1].item()-match[1].item()))
heights.append(abs(t_h-p_h))
output[sequence][model] = {
"iou_mean": np.mean(ious),
"bep_mean": np.mean(beps),
"bottom_x_mean": np.mean(bottom_x),
"bottom_y_mean": np.mean(bottom_y),
"width_mean": np.mean(widths),
"height_mean": np.mean(heights),
"bottom_x_std": np.std(bottom_x),
"bottom_y_std": np.std(bottom_y),
"width_std": np.std(widths),
"height_std": np.std(heights)
}
return output
@staticmethod
def payload_labels_to_tm(labels, resolution):
"""Convert the labels of a payload sequence to the format of torch metrics"""
target_tm = []
for frame in labels:
target_tm_frame = []
for det in frame:
label = 0
box = det["bounding_box"]
x1, y1, x2, y2 = box[0], box[1], box[0]+box[2], box[1]+box[3]
x1, y1, x2, y2 = x1*resolution.width, y1*resolution.height, x2*resolution.width, y2*resolution.height
target_tm_frame.append([label, x1, y1, x2, y2])
target_tm.append(torch.tensor(target_tm_frame) if len(target_tm_frame) > 0 else torch.empty((0, 5)))
return target_tm
@staticmethod
def payload_preds_to_rm(preds, resolution):
"""Convert the predictions of a payload sequence to the format of torch metrics"""
preds_tm = []
for frame in preds:
pred_tm_frame = []
for det in frame:
label = 0
box = det["bounding_box"]
x1, y1, x2, y2 = box[0], box[1], box[0]+box[2], box[1]+box[3]
x1, y1, x2, y2 = x1*resolution.width, y1*resolution.height, x2*resolution.width, y2*resolution.height
conf = 1
pred_tm_frame.append([x1, y1, x2, y2, conf, label])
preds_tm.append(torch.tensor(pred_tm_frame) if len(pred_tm_frame) > 0 else torch.empty((0, 6)))
return preds_tm
|