Feng Wang
commited on
Commit
·
07f9669
1
Parent(s):
f6fef9a
fix(evaluator): fix attr bug in COCOEvaluator (#1067)
Browse files- tools/eval.py +1 -1
- yolox/evaluators/coco_evaluator.py +12 -10
tools/eval.py
CHANGED
@@ -143,7 +143,7 @@ def main(exp, args, num_gpu):
|
|
143 |
logger.info("Model Structure:\n{}".format(str(model)))
|
144 |
|
145 |
evaluator = exp.get_evaluator(args.batch_size, is_distributed, args.test, args.legacy)
|
146 |
-
evaluator.
|
147 |
evaluator.per_class_AR = True
|
148 |
|
149 |
torch.cuda.set_device(rank)
|
|
|
143 |
logger.info("Model Structure:\n{}".format(str(model)))
|
144 |
|
145 |
evaluator = exp.get_evaluator(args.batch_size, is_distributed, args.test, args.legacy)
|
146 |
+
evaluator.per_class_AP = True
|
147 |
evaluator.per_class_AR = True
|
148 |
|
149 |
torch.cuda.set_device(rank)
|
yolox/evaluators/coco_evaluator.py
CHANGED
@@ -50,8 +50,8 @@ def per_class_AR_table(coco_eval, class_names=COCO_CLASSES, headers=["class", "A
|
|
50 |
return table
|
51 |
|
52 |
|
53 |
-
def
|
54 |
-
|
55 |
precisions = coco_eval.eval["precision"]
|
56 |
# dimension of precisions: [TxRxKxAxM]
|
57 |
# precision has dims (iou, recall, cls, area range, max dets)
|
@@ -63,10 +63,10 @@ def per_class_mAP_table(coco_eval, class_names=COCO_CLASSES, headers=["class", "
|
|
63 |
precision = precisions[:, :, idx, 0, -1]
|
64 |
precision = precision[precision > -1]
|
65 |
ap = np.mean(precision) if precision.size else float("nan")
|
66 |
-
|
67 |
|
68 |
-
num_cols = min(colums, len(
|
69 |
-
result_pair = [x for pair in
|
70 |
row_pair = itertools.zip_longest(*[result_pair[i::num_cols] for i in range(num_cols)])
|
71 |
table_headers = headers * (num_cols // len(headers))
|
72 |
table = tabulate(
|
@@ -89,7 +89,7 @@ class COCOEvaluator:
|
|
89 |
nmsthre: float,
|
90 |
num_classes: int,
|
91 |
testdev: bool = False,
|
92 |
-
|
93 |
per_class_AR: bool = False,
|
94 |
):
|
95 |
"""
|
@@ -100,7 +100,8 @@ class COCOEvaluator:
|
|
100 |
confthre: confidence threshold ranging from 0 to 1, which
|
101 |
is defined in the config file.
|
102 |
nmsthre: IoU threshold of non-max supression ranging from 0 to 1.
|
103 |
-
|
|
|
104 |
"""
|
105 |
self.dataloader = dataloader
|
106 |
self.img_size = img_size
|
@@ -108,7 +109,8 @@ class COCOEvaluator:
|
|
108 |
self.nmsthre = nmsthre
|
109 |
self.num_classes = num_classes
|
110 |
self.testdev = testdev
|
111 |
-
self.
|
|
|
112 |
|
113 |
def evaluate(
|
114 |
self,
|
@@ -278,8 +280,8 @@ class COCOEvaluator:
|
|
278 |
with contextlib.redirect_stdout(redirect_string):
|
279 |
cocoEval.summarize()
|
280 |
info += redirect_string.getvalue()
|
281 |
-
if self.
|
282 |
-
info += "per class
|
283 |
if self.per_class_AR:
|
284 |
info += "per class AR:\n" + per_class_AR_table(cocoEval) + "\n"
|
285 |
return cocoEval.stats[0], cocoEval.stats[1], info
|
|
|
50 |
return table
|
51 |
|
52 |
|
53 |
+
def per_class_AP_table(coco_eval, class_names=COCO_CLASSES, headers=["class", "AP"], colums=6):
|
54 |
+
per_class_AP = {}
|
55 |
precisions = coco_eval.eval["precision"]
|
56 |
# dimension of precisions: [TxRxKxAxM]
|
57 |
# precision has dims (iou, recall, cls, area range, max dets)
|
|
|
63 |
precision = precisions[:, :, idx, 0, -1]
|
64 |
precision = precision[precision > -1]
|
65 |
ap = np.mean(precision) if precision.size else float("nan")
|
66 |
+
per_class_AP[name] = float(ap * 100)
|
67 |
|
68 |
+
num_cols = min(colums, len(per_class_AP) * len(headers))
|
69 |
+
result_pair = [x for pair in per_class_AP.items() for x in pair]
|
70 |
row_pair = itertools.zip_longest(*[result_pair[i::num_cols] for i in range(num_cols)])
|
71 |
table_headers = headers * (num_cols // len(headers))
|
72 |
table = tabulate(
|
|
|
89 |
nmsthre: float,
|
90 |
num_classes: int,
|
91 |
testdev: bool = False,
|
92 |
+
per_class_AP: bool = False,
|
93 |
per_class_AR: bool = False,
|
94 |
):
|
95 |
"""
|
|
|
100 |
confthre: confidence threshold ranging from 0 to 1, which
|
101 |
is defined in the config file.
|
102 |
nmsthre: IoU threshold of non-max supression ranging from 0 to 1.
|
103 |
+
per_class_AP: Show per class AP during evalution or not. Default to False.
|
104 |
+
per_class_AR: Show per class AR during evalution or not. Default to False.
|
105 |
"""
|
106 |
self.dataloader = dataloader
|
107 |
self.img_size = img_size
|
|
|
109 |
self.nmsthre = nmsthre
|
110 |
self.num_classes = num_classes
|
111 |
self.testdev = testdev
|
112 |
+
self.per_class_AP = per_class_AP
|
113 |
+
self.per_class_AR = per_class_AR
|
114 |
|
115 |
def evaluate(
|
116 |
self,
|
|
|
280 |
with contextlib.redirect_stdout(redirect_string):
|
281 |
cocoEval.summarize()
|
282 |
info += redirect_string.getvalue()
|
283 |
+
if self.per_class_AP:
|
284 |
+
info += "per class AP:\n" + per_class_AP_table(cocoEval) + "\n"
|
285 |
if self.per_class_AR:
|
286 |
info += "per class AR:\n" + per_class_AR_table(cocoEval) + "\n"
|
287 |
return cocoEval.stats[0], cocoEval.stats[1], info
|