Manan Goel
commited on
Commit
·
3314500
1
Parent(s):
6b771f2
feat(logger): W&B logger with VOC datasets (#1525)
Browse files- .github/workflows/ci.yaml +1 -0
- yolox/data/datasets/voc.py +4 -0
- yolox/utils/logger.py +57 -5
.github/workflows/ci.yaml
CHANGED
@@ -34,6 +34,7 @@ jobs:
|
|
34 |
pip install -r requirements.txt
|
35 |
pip install isort==4.3.21
|
36 |
pip install flake8==3.8.3
|
|
|
37 |
# Runs a set of commands using the runners shell
|
38 |
- name: Format check
|
39 |
run: ./.github/workflows/format_check.sh
|
|
|
34 |
pip install -r requirements.txt
|
35 |
pip install isort==4.3.21
|
36 |
pip install flake8==3.8.3
|
37 |
+
pip install "importlib-metadata<5.0"
|
38 |
# Runs a set of commands using the runners shell
|
39 |
- name: Format check
|
40 |
run: ./.github/workflows/format_check.sh
|
yolox/data/datasets/voc.py
CHANGED
@@ -119,6 +119,10 @@ class VOCDetection(Dataset):
|
|
119 |
self._annopath = os.path.join("%s", "Annotations", "%s.xml")
|
120 |
self._imgpath = os.path.join("%s", "JPEGImages", "%s.jpg")
|
121 |
self._classes = VOC_CLASSES
|
|
|
|
|
|
|
|
|
122 |
self.ids = list()
|
123 |
for (year, name) in image_sets:
|
124 |
self._year = year
|
|
|
119 |
self._annopath = os.path.join("%s", "Annotations", "%s.xml")
|
120 |
self._imgpath = os.path.join("%s", "JPEGImages", "%s.jpg")
|
121 |
self._classes = VOC_CLASSES
|
122 |
+
self.cats = [
|
123 |
+
{"id": idx, "name": val} for idx, val in enumerate(VOC_CLASSES)
|
124 |
+
]
|
125 |
+
self.class_ids = list(range(len(VOC_CLASSES)))
|
126 |
self.ids = list()
|
127 |
for (year, name) in image_sets:
|
128 |
self._year = year
|
yolox/utils/logger.py
CHANGED
@@ -169,6 +169,8 @@ class WandbLogger(object):
|
|
169 |
"Please install wandb using pip install wandb"
|
170 |
)
|
171 |
|
|
|
|
|
172 |
self.project = project
|
173 |
self.name = name
|
174 |
self.id = id
|
@@ -202,7 +204,10 @@ class WandbLogger(object):
|
|
202 |
self.run.define_metric("train/step")
|
203 |
self.run.define_metric("train/*", step_metric="train/step")
|
204 |
|
|
|
|
|
205 |
if val_dataset and self.num_log_images != 0:
|
|
|
206 |
self.cats = val_dataset.cats
|
207 |
self.id_to_class = {
|
208 |
cls['id']: cls['name'] for cls in self.cats
|
@@ -241,8 +246,12 @@ class WandbLogger(object):
|
|
241 |
id = data_point[3]
|
242 |
img = np.transpose(img, (1, 2, 0))
|
243 |
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
|
|
|
|
|
|
|
|
244 |
self.val_table.add_data(
|
245 |
-
id
|
246 |
self.wandb.Image(img)
|
247 |
)
|
248 |
|
@@ -250,6 +259,43 @@ class WandbLogger(object):
|
|
250 |
self.run.use_artifact(self.val_artifact)
|
251 |
self.val_artifact.wait()
|
252 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
253 |
def log_metrics(self, metrics, step=None):
|
254 |
"""
|
255 |
Args:
|
@@ -277,16 +323,23 @@ class WandbLogger(object):
|
|
277 |
for cls in self.cats:
|
278 |
columns.append(cls["name"])
|
279 |
|
|
|
|
|
|
|
280 |
result_table = self.wandb.Table(columns=columns)
|
|
|
281 |
for idx, val in table_ref.iterrows():
|
282 |
|
283 |
avg_scores = defaultdict(int)
|
284 |
num_occurrences = defaultdict(int)
|
285 |
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
|
|
|
|
|
|
|
290 |
for i in range(len(prediction["bboxes"])):
|
291 |
bbox = prediction["bboxes"][i]
|
292 |
x0 = bbox[0]
|
@@ -310,7 +363,6 @@ class WandbLogger(object):
|
|
310 |
boxes.append(box)
|
311 |
else:
|
312 |
boxes = []
|
313 |
-
|
314 |
average_class_score = []
|
315 |
for cls in self.cats:
|
316 |
if cls["name"] not in num_occurrences:
|
|
|
169 |
"Please install wandb using pip install wandb"
|
170 |
)
|
171 |
|
172 |
+
from yolox.data.datasets import VOCDetection
|
173 |
+
|
174 |
self.project = project
|
175 |
self.name = name
|
176 |
self.id = id
|
|
|
204 |
self.run.define_metric("train/step")
|
205 |
self.run.define_metric("train/*", step_metric="train/step")
|
206 |
|
207 |
+
self.voc_dataset = VOCDetection
|
208 |
+
|
209 |
if val_dataset and self.num_log_images != 0:
|
210 |
+
self.val_dataset = val_dataset
|
211 |
self.cats = val_dataset.cats
|
212 |
self.id_to_class = {
|
213 |
cls['id']: cls['name'] for cls in self.cats
|
|
|
246 |
id = data_point[3]
|
247 |
img = np.transpose(img, (1, 2, 0))
|
248 |
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
249 |
+
|
250 |
+
if isinstance(id, torch.Tensor):
|
251 |
+
id = id.item()
|
252 |
+
|
253 |
self.val_table.add_data(
|
254 |
+
id,
|
255 |
self.wandb.Image(img)
|
256 |
)
|
257 |
|
|
|
259 |
self.run.use_artifact(self.val_artifact)
|
260 |
self.val_artifact.wait()
|
261 |
|
262 |
+
def _convert_prediction_format(self, predictions):
|
263 |
+
image_wise_data = defaultdict(int)
|
264 |
+
|
265 |
+
for key, val in predictions.items():
|
266 |
+
img_id = key
|
267 |
+
|
268 |
+
try:
|
269 |
+
bboxes, cls, scores = val
|
270 |
+
except KeyError:
|
271 |
+
bboxes, cls, scores = val["bboxes"], val["categories"], val["scores"]
|
272 |
+
|
273 |
+
# These store information of actual bounding boxes i.e. the ones which are not None
|
274 |
+
act_box = []
|
275 |
+
act_scores = []
|
276 |
+
act_cls = []
|
277 |
+
|
278 |
+
if bboxes is not None:
|
279 |
+
for box, classes, score in zip(bboxes, cls, scores):
|
280 |
+
if box is None or score is None or classes is None:
|
281 |
+
continue
|
282 |
+
act_box.append(box)
|
283 |
+
act_scores.append(score)
|
284 |
+
act_cls.append(classes)
|
285 |
+
|
286 |
+
image_wise_data.update({
|
287 |
+
int(img_id): {
|
288 |
+
"bboxes": [box.numpy().tolist() for box in act_box],
|
289 |
+
"scores": [score.numpy().item() for score in act_scores],
|
290 |
+
"categories": [
|
291 |
+
self.val_dataset.class_ids[int(act_cls[ind])]
|
292 |
+
for ind in range(len(act_box))
|
293 |
+
],
|
294 |
+
}
|
295 |
+
})
|
296 |
+
|
297 |
+
return image_wise_data
|
298 |
+
|
299 |
def log_metrics(self, metrics, step=None):
|
300 |
"""
|
301 |
Args:
|
|
|
323 |
for cls in self.cats:
|
324 |
columns.append(cls["name"])
|
325 |
|
326 |
+
if isinstance(self.val_dataset, self.voc_dataset):
|
327 |
+
predictions = self._convert_prediction_format(predictions)
|
328 |
+
|
329 |
result_table = self.wandb.Table(columns=columns)
|
330 |
+
|
331 |
for idx, val in table_ref.iterrows():
|
332 |
|
333 |
avg_scores = defaultdict(int)
|
334 |
num_occurrences = defaultdict(int)
|
335 |
|
336 |
+
id = val[0]
|
337 |
+
if isinstance(id, list):
|
338 |
+
id = id[0]
|
339 |
|
340 |
+
if id in predictions:
|
341 |
+
prediction = predictions[id]
|
342 |
+
boxes = []
|
343 |
for i in range(len(prediction["bboxes"])):
|
344 |
bbox = prediction["bboxes"][i]
|
345 |
x0 = bbox[0]
|
|
|
363 |
boxes.append(box)
|
364 |
else:
|
365 |
boxes = []
|
|
|
366 |
average_class_score = []
|
367 |
for cls in self.cats:
|
368 |
if cls["name"] not in num_occurrences:
|