Spaces:
Runtime error
Runtime error
Younes Belkada
commited on
Commit
·
0189f5d
1
Parent(s):
5aeb8c4
add files
Browse files- coco_utils.py +319 -0
- cocoevaluate.py +49 -5
coco_utils.py
ADDED
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
3 |
+
"""
|
4 |
+
COCO evaluator that works in distributed mode.
|
5 |
+
Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py
|
6 |
+
The difference is that there is less copy-pasting from pycocotools
|
7 |
+
in the end of the file, as python3 can suppress prints with contextlib
|
8 |
+
"""
|
9 |
+
import os
|
10 |
+
import contextlib
|
11 |
+
import copy
|
12 |
+
import numpy as np
|
13 |
+
import torch
|
14 |
+
import torchvision
|
15 |
+
import torch.distributed as dist
|
16 |
+
|
17 |
+
from pycocotools.cocoeval import COCOeval
|
18 |
+
from pycocotools.coco import COCO
|
19 |
+
import pycocotools.mask as mask_util
|
20 |
+
|
21 |
+
import pickle
|
22 |
+
|
23 |
+
def is_dist_avail_and_initialized():
|
24 |
+
if not dist.is_available():
|
25 |
+
return False
|
26 |
+
if not dist.is_initialized():
|
27 |
+
return False
|
28 |
+
return True
|
29 |
+
|
30 |
+
|
31 |
+
def get_world_size():
|
32 |
+
if not is_dist_avail_and_initialized():
|
33 |
+
return 1
|
34 |
+
return dist.get_world_size()
|
35 |
+
|
36 |
+
def all_gather(data):
|
37 |
+
"""
|
38 |
+
Run all_gather on arbitrary picklable data (not necessarily tensors)
|
39 |
+
Args:
|
40 |
+
data: any picklable object
|
41 |
+
Returns:
|
42 |
+
list[data]: list of data gathered from each rank
|
43 |
+
"""
|
44 |
+
world_size = get_world_size()
|
45 |
+
if world_size == 1:
|
46 |
+
return [data]
|
47 |
+
|
48 |
+
# serialized to a Tensor
|
49 |
+
buffer = pickle.dumps(data)
|
50 |
+
storage = torch.ByteStorage.from_buffer(buffer)
|
51 |
+
tensor = torch.ByteTensor(storage).to("cuda")
|
52 |
+
|
53 |
+
# obtain Tensor size of each rank
|
54 |
+
local_size = torch.tensor([tensor.numel()], device="cuda")
|
55 |
+
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
|
56 |
+
dist.all_gather(size_list, local_size)
|
57 |
+
size_list = [int(size.item()) for size in size_list]
|
58 |
+
max_size = max(size_list)
|
59 |
+
|
60 |
+
# receiving Tensor from all ranks
|
61 |
+
# we pad the tensor because torch all_gather does not support
|
62 |
+
# gathering tensors of different shapes
|
63 |
+
tensor_list = []
|
64 |
+
for _ in size_list:
|
65 |
+
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
|
66 |
+
if local_size != max_size:
|
67 |
+
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
|
68 |
+
tensor = torch.cat((tensor, padding), dim=0)
|
69 |
+
dist.all_gather(tensor_list, tensor)
|
70 |
+
|
71 |
+
data_list = []
|
72 |
+
for size, tensor in zip(size_list, tensor_list):
|
73 |
+
buffer = tensor.cpu().numpy().tobytes()[:size]
|
74 |
+
data_list.append(pickle.loads(buffer))
|
75 |
+
|
76 |
+
return data_list
|
77 |
+
|
78 |
+
|
79 |
+
def get_coco_api_from_dataset(dataset):
|
80 |
+
for _ in range(10):
|
81 |
+
# if isinstance(dataset, torchvision.datasets.CocoDetection):
|
82 |
+
# break
|
83 |
+
if isinstance(dataset, torch.utils.data.Subset):
|
84 |
+
dataset = dataset.dataset
|
85 |
+
if isinstance(dataset, torchvision.datasets.CocoDetection):
|
86 |
+
return dataset.coco
|
87 |
+
|
88 |
+
|
89 |
+
class CocoEvaluator(object):
|
90 |
+
def __init__(self, coco_gt, iou_types):
|
91 |
+
assert isinstance(iou_types, (list, tuple))
|
92 |
+
coco_gt = copy.deepcopy(coco_gt)
|
93 |
+
self.coco_gt = coco_gt
|
94 |
+
|
95 |
+
self.iou_types = iou_types
|
96 |
+
self.coco_eval = {}
|
97 |
+
for iou_type in iou_types:
|
98 |
+
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
|
99 |
+
|
100 |
+
self.img_ids = []
|
101 |
+
self.eval_imgs = {k: [] for k in iou_types}
|
102 |
+
|
103 |
+
def update(self, predictions):
|
104 |
+
img_ids = list(np.unique(list(predictions.keys())))
|
105 |
+
self.img_ids.extend(img_ids)
|
106 |
+
|
107 |
+
for iou_type in self.iou_types:
|
108 |
+
results = self.prepare(predictions, iou_type)
|
109 |
+
|
110 |
+
# suppress pycocotools prints
|
111 |
+
with open(os.devnull, 'w') as devnull:
|
112 |
+
with contextlib.redirect_stdout(devnull):
|
113 |
+
coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO()
|
114 |
+
coco_eval = self.coco_eval[iou_type]
|
115 |
+
|
116 |
+
coco_eval.cocoDt = coco_dt
|
117 |
+
coco_eval.params.imgIds = list(img_ids)
|
118 |
+
img_ids, eval_imgs = evaluate(coco_eval)
|
119 |
+
|
120 |
+
self.eval_imgs[iou_type].append(eval_imgs)
|
121 |
+
|
122 |
+
def synchronize_between_processes(self):
|
123 |
+
for iou_type in self.iou_types:
|
124 |
+
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
|
125 |
+
create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
|
126 |
+
|
127 |
+
def accumulate(self):
|
128 |
+
for coco_eval in self.coco_eval.values():
|
129 |
+
coco_eval.accumulate()
|
130 |
+
|
131 |
+
def summarize(self):
|
132 |
+
for iou_type, coco_eval in self.coco_eval.items():
|
133 |
+
print("IoU metric: {}".format(iou_type))
|
134 |
+
coco_eval.summarize()
|
135 |
+
|
136 |
+
def prepare(self, predictions, iou_type):
|
137 |
+
if iou_type == "bbox":
|
138 |
+
return self.prepare_for_coco_detection(predictions)
|
139 |
+
elif iou_type == "segm":
|
140 |
+
return self.prepare_for_coco_segmentation(predictions)
|
141 |
+
elif iou_type == "keypoints":
|
142 |
+
return self.prepare_for_coco_keypoint(predictions)
|
143 |
+
else:
|
144 |
+
raise ValueError("Unknown iou type {}".format(iou_type))
|
145 |
+
|
146 |
+
def prepare_for_coco_detection(self, predictions):
|
147 |
+
coco_results = []
|
148 |
+
for original_id, prediction in predictions.items():
|
149 |
+
if len(prediction) == 0:
|
150 |
+
continue
|
151 |
+
|
152 |
+
boxes = prediction["boxes"]
|
153 |
+
boxes = convert_to_xywh(boxes).tolist()
|
154 |
+
scores = prediction["scores"].tolist()
|
155 |
+
labels = prediction["labels"].tolist()
|
156 |
+
|
157 |
+
coco_results.extend(
|
158 |
+
[
|
159 |
+
{
|
160 |
+
"image_id": original_id,
|
161 |
+
"category_id": labels[k],
|
162 |
+
"bbox": box,
|
163 |
+
"score": scores[k],
|
164 |
+
}
|
165 |
+
for k, box in enumerate(boxes)
|
166 |
+
]
|
167 |
+
)
|
168 |
+
return coco_results
|
169 |
+
|
170 |
+
def prepare_for_coco_segmentation(self, predictions):
|
171 |
+
coco_results = []
|
172 |
+
for original_id, prediction in predictions.items():
|
173 |
+
if len(prediction) == 0:
|
174 |
+
continue
|
175 |
+
|
176 |
+
scores = prediction["scores"]
|
177 |
+
labels = prediction["labels"]
|
178 |
+
masks = prediction["masks"]
|
179 |
+
|
180 |
+
masks = masks > 0.5
|
181 |
+
|
182 |
+
scores = prediction["scores"].tolist()
|
183 |
+
labels = prediction["labels"].tolist()
|
184 |
+
|
185 |
+
rles = [
|
186 |
+
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
|
187 |
+
for mask in masks
|
188 |
+
]
|
189 |
+
for rle in rles:
|
190 |
+
rle["counts"] = rle["counts"].decode("utf-8")
|
191 |
+
|
192 |
+
coco_results.extend(
|
193 |
+
[
|
194 |
+
{
|
195 |
+
"image_id": original_id,
|
196 |
+
"category_id": labels[k],
|
197 |
+
"segmentation": rle,
|
198 |
+
"score": scores[k],
|
199 |
+
}
|
200 |
+
for k, rle in enumerate(rles)
|
201 |
+
]
|
202 |
+
)
|
203 |
+
return coco_results
|
204 |
+
|
205 |
+
def prepare_for_coco_keypoint(self, predictions):
|
206 |
+
coco_results = []
|
207 |
+
for original_id, prediction in predictions.items():
|
208 |
+
if len(prediction) == 0:
|
209 |
+
continue
|
210 |
+
|
211 |
+
boxes = prediction["boxes"]
|
212 |
+
boxes = convert_to_xywh(boxes).tolist()
|
213 |
+
scores = prediction["scores"].tolist()
|
214 |
+
labels = prediction["labels"].tolist()
|
215 |
+
keypoints = prediction["keypoints"]
|
216 |
+
keypoints = keypoints.flatten(start_dim=1).tolist()
|
217 |
+
|
218 |
+
coco_results.extend(
|
219 |
+
[
|
220 |
+
{
|
221 |
+
"image_id": original_id,
|
222 |
+
"category_id": labels[k],
|
223 |
+
'keypoints': keypoint,
|
224 |
+
"score": scores[k],
|
225 |
+
}
|
226 |
+
for k, keypoint in enumerate(keypoints)
|
227 |
+
]
|
228 |
+
)
|
229 |
+
return coco_results
|
230 |
+
|
231 |
+
|
232 |
+
def convert_to_xywh(boxes):
|
233 |
+
xmin, ymin, xmax, ymax = boxes.unbind(1)
|
234 |
+
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
|
235 |
+
|
236 |
+
|
237 |
+
def merge(img_ids, eval_imgs):
|
238 |
+
all_img_ids = all_gather(img_ids)
|
239 |
+
all_eval_imgs = all_gather(eval_imgs)
|
240 |
+
|
241 |
+
merged_img_ids = []
|
242 |
+
for p in all_img_ids:
|
243 |
+
merged_img_ids.extend(p)
|
244 |
+
|
245 |
+
merged_eval_imgs = []
|
246 |
+
for p in all_eval_imgs:
|
247 |
+
merged_eval_imgs.append(p)
|
248 |
+
|
249 |
+
merged_img_ids = np.array(merged_img_ids)
|
250 |
+
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
|
251 |
+
|
252 |
+
# keep only unique (and in sorted order) images
|
253 |
+
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
|
254 |
+
merged_eval_imgs = merged_eval_imgs[..., idx]
|
255 |
+
|
256 |
+
return merged_img_ids, merged_eval_imgs
|
257 |
+
|
258 |
+
|
259 |
+
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
|
260 |
+
img_ids, eval_imgs = merge(img_ids, eval_imgs)
|
261 |
+
img_ids = list(img_ids)
|
262 |
+
eval_imgs = list(eval_imgs.flatten())
|
263 |
+
|
264 |
+
coco_eval.evalImgs = eval_imgs
|
265 |
+
coco_eval.params.imgIds = img_ids
|
266 |
+
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
|
267 |
+
|
268 |
+
|
269 |
+
#################################################################
|
270 |
+
# From pycocotools, just removed the prints and fixed
|
271 |
+
# a Python3 bug about unicode not defined
|
272 |
+
#################################################################
|
273 |
+
|
274 |
+
def evaluate(self):
|
275 |
+
'''
|
276 |
+
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
|
277 |
+
:return: None
|
278 |
+
'''
|
279 |
+
# tic = time.time()
|
280 |
+
# print('Running per image evaluation...')
|
281 |
+
p = self.params
|
282 |
+
# add backward compatibility if useSegm is specified in params
|
283 |
+
if p.useSegm is not None:
|
284 |
+
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
|
285 |
+
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
|
286 |
+
# print('Evaluate annotation type *{}*'.format(p.iouType))
|
287 |
+
p.imgIds = list(np.unique(p.imgIds))
|
288 |
+
if p.useCats:
|
289 |
+
p.catIds = list(np.unique(p.catIds))
|
290 |
+
p.maxDets = sorted(p.maxDets)
|
291 |
+
self.params = p
|
292 |
+
|
293 |
+
self._prepare()
|
294 |
+
# loop through images, area range, max detection number
|
295 |
+
catIds = p.catIds if p.useCats else [-1]
|
296 |
+
|
297 |
+
if p.iouType == 'segm' or p.iouType == 'bbox':
|
298 |
+
computeIoU = self.computeIoU
|
299 |
+
elif p.iouType == 'keypoints':
|
300 |
+
computeIoU = self.computeOks
|
301 |
+
self.ious = {
|
302 |
+
(imgId, catId): computeIoU(imgId, catId)
|
303 |
+
for imgId in p.imgIds
|
304 |
+
for catId in catIds}
|
305 |
+
|
306 |
+
evaluateImg = self.evaluateImg
|
307 |
+
maxDet = p.maxDets[-1]
|
308 |
+
evalImgs = [
|
309 |
+
evaluateImg(imgId, catId, areaRng, maxDet)
|
310 |
+
for catId in catIds
|
311 |
+
for areaRng in p.areaRng
|
312 |
+
for imgId in p.imgIds
|
313 |
+
]
|
314 |
+
# this is NOT in the pycocotools code, but could be done outside
|
315 |
+
evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
|
316 |
+
self._paramsEval = copy.deepcopy(self.params)
|
317 |
+
# toc = time.time()
|
318 |
+
# print('DONE (t={:0.2f}s).'.format(toc-tic))
|
319 |
+
return p.imgIds, evalImgs
|
cocoevaluate.py
CHANGED
@@ -15,7 +15,9 @@
|
|
15 |
|
16 |
import evaluate
|
17 |
import datasets
|
|
|
18 |
|
|
|
19 |
|
20 |
# TODO: Add BibTeX citation
|
21 |
_CITATION = """\
|
@@ -56,10 +58,25 @@ Examples:
|
|
56 |
# TODO: Define external resources urls if needed
|
57 |
BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
|
58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
61 |
class COCOEvaluate(evaluate.Metric):
|
62 |
"""TODO: Short description of my evaluation module."""
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
def _info(self):
|
65 |
# TODO: Specifies the evaluate.EvaluationModuleInfo object
|
@@ -71,8 +88,29 @@ class COCOEvaluate(evaluate.Metric):
|
|
71 |
inputs_description=_KWARGS_DESCRIPTION,
|
72 |
# This defines the format of each prediction and reference
|
73 |
features=datasets.Features({
|
74 |
-
'predictions':
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
}),
|
77 |
# Homepage of the module for documentation
|
78 |
homepage="http://module.homepage",
|
@@ -86,10 +124,16 @@ class COCOEvaluate(evaluate.Metric):
|
|
86 |
# TODO: Download external resources if needed
|
87 |
pass
|
88 |
|
89 |
-
def _compute(self, predictions, references):
|
90 |
"""Returns the scores"""
|
91 |
-
|
92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
return {
|
94 |
"accuracy": accuracy,
|
95 |
}
|
|
|
15 |
|
16 |
import evaluate
|
17 |
import datasets
|
18 |
+
import pyarrow as pa
|
19 |
|
20 |
+
from .coco_utils import CocoEvaluator, get_coco_api_from_dataset
|
21 |
|
22 |
# TODO: Add BibTeX citation
|
23 |
_CITATION = """\
|
|
|
58 |
# TODO: Define external resources urls if needed
|
59 |
BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
|
60 |
|
61 |
+
# lists - summarize long lists similarly to NumPy
|
62 |
+
# arrays/tensors - let the frameworks control formatting
|
63 |
+
def summarize_if_long_list(obj):
|
64 |
+
if not type(obj) == list or len(obj) <= 6:
|
65 |
+
return f"{obj}"
|
66 |
+
|
67 |
+
def format_chunk(chunk):
|
68 |
+
return ", ".join(repr(x) for x in chunk)
|
69 |
+
|
70 |
+
return f"[{format_chunk(obj[:3])}, ..., {format_chunk(obj[-3:])}]"
|
71 |
|
72 |
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
73 |
class COCOEvaluate(evaluate.Metric):
|
74 |
"""TODO: Short description of my evaluation module."""
|
75 |
+
def __init__(self, coco_dataset, iou_types=['bbox'], **kwargs):
|
76 |
+
super().__init__(**kwargs)
|
77 |
+
base_ds = get_coco_api_from_dataset(coco_dataset)
|
78 |
+
self.coco_evaluator = CocoEvaluator(base_ds, iou_types)
|
79 |
+
|
80 |
|
81 |
def _info(self):
|
82 |
# TODO: Specifies the evaluate.EvaluationModuleInfo object
|
|
|
88 |
inputs_description=_KWARGS_DESCRIPTION,
|
89 |
# This defines the format of each prediction and reference
|
90 |
features=datasets.Features({
|
91 |
+
'predictions': [
|
92 |
+
datasets.Features(
|
93 |
+
{
|
94 |
+
'scores': datasets.Array2D(shape=(100,), dtype='float32'),
|
95 |
+
'labels': datasets.Array2D(shape=(100,), dtype='int64'),
|
96 |
+
'boxes': datasets.Array2D(shape=(100, 4), dtype='float32'),
|
97 |
+
})
|
98 |
+
]
|
99 |
+
,
|
100 |
+
'references': [
|
101 |
+
datasets.Features(
|
102 |
+
{
|
103 |
+
'size': datasets.Value('int64'),
|
104 |
+
'image_id': datasets.Value('int64'),
|
105 |
+
'boxes': datasets.Array2D(shape=(20, 4), dtype='float32'),
|
106 |
+
'class_labels': datasets.Value('int64'),
|
107 |
+
'iscrowd': datasets.Value('int64'),
|
108 |
+
'orig_size': datasets.Value('int64'),
|
109 |
+
'area': datasets.Value('float32'),
|
110 |
+
}
|
111 |
+
)
|
112 |
+
],
|
113 |
+
|
114 |
}),
|
115 |
# Homepage of the module for documentation
|
116 |
homepage="http://module.homepage",
|
|
|
124 |
# TODO: Download external resources if needed
|
125 |
pass
|
126 |
|
127 |
+
def _compute(self, predictions, references,):
|
128 |
"""Returns the scores"""
|
129 |
+
for pred, ref in zip(predictions, references):
|
130 |
+
res = {target['image_id'].item(): output for target, output in zip(ref, pred)}
|
131 |
+
self.coco_evaluator.update(res)
|
132 |
+
self.coco_evaluator.synchronize_between_processes()
|
133 |
+
self.coco_evaluator.accumulate()
|
134 |
+
self.coco_evaluator.summarize()
|
135 |
+
|
136 |
+
accuracy = None
|
137 |
return {
|
138 |
"accuracy": accuracy,
|
139 |
}
|