✨ [New] inference with coco format
Browse files- yolo/config/dataset/dev.yaml +1 -1
- yolo/config/general.yaml +1 -1
- yolo/lazy.py +6 -10
- yolo/tools/solver.py +14 -2
- yolo/utils/logging_utils.py +45 -13
- yolo/utils/solver_utils.py +46 -0
yolo/config/dataset/dev.yaml
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
path: data/dev
|
2 |
train: train
|
3 |
-
validation:
|
4 |
|
5 |
auto_download:
|
|
|
1 |
path: data/dev
|
2 |
train: train
|
3 |
+
validation: val
|
4 |
|
5 |
auto_download:
|
yolo/config/general.yaml
CHANGED
@@ -9,7 +9,7 @@ out_path: runs
|
|
9 |
exist_ok: True
|
10 |
|
11 |
lucky_number: 10
|
12 |
-
use_wandb:
|
13 |
use_TensorBoard: False
|
14 |
|
15 |
weight: True # Path to weight or True for auto, False for no pretrained weight
|
|
|
9 |
exist_ok: True
|
10 |
|
11 |
lucky_number: 10
|
12 |
+
use_wandb: True
|
13 |
use_TensorBoard: False
|
14 |
|
15 |
weight: True # Path to weight or True for auto, False for no pretrained weight
|
yolo/lazy.py
CHANGED
@@ -28,18 +28,14 @@ def main(cfg: Config):
|
|
28 |
model = model.to(device)
|
29 |
|
30 |
vec2box = Vec2Box(model, cfg.image_size, device)
|
31 |
-
|
32 |
if cfg.task.task == "train":
|
33 |
-
|
34 |
-
trainer.solve(dataloader)
|
35 |
-
|
36 |
-
if cfg.task.task == "inference":
|
37 |
-
tester = ModelTester(cfg, model, vec2box, progress, device)
|
38 |
-
tester.solve(dataloader)
|
39 |
-
|
40 |
if cfg.task.task == "validation":
|
41 |
-
|
42 |
-
|
|
|
|
|
|
|
43 |
|
44 |
|
45 |
if __name__ == "__main__":
|
|
|
28 |
model = model.to(device)
|
29 |
|
30 |
vec2box = Vec2Box(model, cfg.image_size, device)
|
|
|
31 |
if cfg.task.task == "train":
|
32 |
+
solver = ModelTrainer(cfg, model, vec2box, progress, device, use_ddp)
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
if cfg.task.task == "validation":
|
34 |
+
solver = ModelValidator(cfg.task, model, vec2box, progress, device)
|
35 |
+
if cfg.task.task == "inference":
|
36 |
+
solver = ModelTester(cfg, model, vec2box, progress, device)
|
37 |
+
progress.start()
|
38 |
+
solver.solve(dataloader)
|
39 |
|
40 |
|
41 |
if __name__ == "__main__":
|
yolo/tools/solver.py
CHANGED
@@ -1,9 +1,11 @@
|
|
1 |
import json
|
2 |
import os
|
|
|
3 |
import time
|
4 |
|
5 |
import torch
|
6 |
from loguru import logger
|
|
|
7 |
from torch import Tensor
|
8 |
|
9 |
# TODO: We may can't use CUDA?
|
@@ -25,6 +27,7 @@ from yolo.utils.model_utils import (
|
|
25 |
create_scheduler,
|
26 |
predicts_to_json,
|
27 |
)
|
|
|
28 |
|
29 |
|
30 |
class ModelTrainer:
|
@@ -112,7 +115,7 @@ class ModelTrainer:
|
|
112 |
epoch_loss = self.train_one_epoch(dataloader)
|
113 |
self.progress.finish_one_epoch()
|
114 |
|
115 |
-
self.validator.solve(self.validation_dataloader)
|
116 |
|
117 |
|
118 |
class ModelTester:
|
@@ -187,7 +190,12 @@ class ModelValidator:
|
|
187 |
self.post_proccess = PostProccess(vec2box, validation_cfg.nms)
|
188 |
self.json_path = os.path.join(self.progress.save_path, f"predict.json")
|
189 |
|
190 |
-
|
|
|
|
|
|
|
|
|
|
|
191 |
# logger.info("🧪 Start Validation!")
|
192 |
self.model.eval()
|
193 |
predict_json = []
|
@@ -203,3 +211,7 @@ class ModelValidator:
|
|
203 |
self.progress.finish_one_epoch()
|
204 |
with open(self.json_path, "w") as f:
|
205 |
json.dump(predict_json, f)
|
|
|
|
|
|
|
|
|
|
1 |
import json
|
2 |
import os
|
3 |
+
import sys
|
4 |
import time
|
5 |
|
6 |
import torch
|
7 |
from loguru import logger
|
8 |
+
from pycocotools.coco import COCO
|
9 |
from torch import Tensor
|
10 |
|
11 |
# TODO: We may can't use CUDA?
|
|
|
27 |
create_scheduler,
|
28 |
predicts_to_json,
|
29 |
)
|
30 |
+
from yolo.utils.solver_utils import calculate_ap
|
31 |
|
32 |
|
33 |
class ModelTrainer:
|
|
|
115 |
epoch_loss = self.train_one_epoch(dataloader)
|
116 |
self.progress.finish_one_epoch()
|
117 |
|
118 |
+
self.validator.solve(self.validation_dataloader, epoch_idx=epoch)
|
119 |
|
120 |
|
121 |
class ModelTester:
|
|
|
190 |
self.post_proccess = PostProccess(vec2box, validation_cfg.nms)
|
191 |
self.json_path = os.path.join(self.progress.save_path, f"predict.json")
|
192 |
|
193 |
+
sys.stdout = open(os.devnull, "w")
|
194 |
+
# TODO: load with config file
|
195 |
+
self.coco_gt = COCO("data/coco/annotations/instances_val2017.json")
|
196 |
+
sys.stdout = sys.__stdout__
|
197 |
+
|
198 |
+
def solve(self, dataloader, epoch_idx=-1):
|
199 |
# logger.info("🧪 Start Validation!")
|
200 |
self.model.eval()
|
201 |
predict_json = []
|
|
|
211 |
self.progress.finish_one_epoch()
|
212 |
with open(self.json_path, "w") as f:
|
213 |
json.dump(predict_json, f)
|
214 |
+
|
215 |
+
self.progress.run_coco()
|
216 |
+
result = calculate_ap(self.coco_gt, predict_json)
|
217 |
+
self.progress.finish_coco(result, epoch_idx)
|
yolo/utils/logging_utils.py
CHANGED
@@ -13,18 +13,26 @@ Example:
|
|
13 |
|
14 |
import os
|
15 |
import sys
|
|
|
16 |
from typing import Dict, List
|
17 |
|
18 |
import wandb
|
19 |
import wandb.errors.term
|
20 |
from loguru import logger
|
21 |
-
from rich.console import Console
|
22 |
-
from rich.progress import
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
from rich.table import Table
|
24 |
from torch import Tensor
|
25 |
from torch.optim import Optimizer
|
26 |
|
27 |
from yolo.config.config import Config, YOLOLayer
|
|
|
28 |
|
29 |
|
30 |
def custom_logger(quite: bool = False):
|
@@ -38,20 +46,24 @@ def custom_logger(quite: bool = False):
|
|
38 |
)
|
39 |
|
40 |
|
41 |
-
class ProgressLogger:
|
42 |
-
def __init__(self, cfg: Config, exp_name: str):
|
43 |
local_rank = int(os.getenv("LOCAL_RANK", "0"))
|
44 |
self.quite_mode = local_rank or getattr(cfg, "quite", False)
|
45 |
custom_logger(self.quite_mode)
|
46 |
self.save_path = validate_log_directory(cfg, exp_name=cfg.name)
|
47 |
|
48 |
-
|
|
|
49 |
TextColumn("[progress.description]{task.description}"),
|
50 |
BarColumn(bar_width=None),
|
51 |
TextColumn("{task.completed:.0f}/{task.total:.0f}"),
|
52 |
TimeRemainingColumn(),
|
53 |
)
|
54 |
-
self.
|
|
|
|
|
|
|
55 |
|
56 |
self.use_wandb = cfg.use_wandb
|
57 |
if self.use_wandb:
|
@@ -60,21 +72,32 @@ class ProgressLogger:
|
|
60 |
project="YOLO", resume="allow", mode="online", dir=self.save_path, id=None, name=exp_name
|
61 |
)
|
62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
def start_train(self, num_epochs: int):
|
64 |
-
self.task_epoch = self.
|
65 |
|
66 |
def start_one_epoch(self, num_batches: int, optimizer: Optimizer = None, epoch_idx: int = None):
|
67 |
self.num_batches = num_batches
|
68 |
-
if self.use_wandb:
|
69 |
lr_values = [params["lr"] for params in optimizer.param_groups]
|
70 |
lr_names = ["bias", "norm", "conv"]
|
71 |
for lr_name, lr_value in zip(lr_names, lr_values):
|
72 |
self.wandb.log({f"Learning Rate/{lr_name}": lr_value}, step=epoch_idx)
|
73 |
-
self.batch_task = self.
|
74 |
|
75 |
def one_batch(self, loss_dict: Dict[str, Tensor] = None):
|
76 |
if loss_dict is None:
|
77 |
-
self.
|
78 |
return
|
79 |
if self.use_wandb:
|
80 |
for loss_name, loss_value in loss_dict.items():
|
@@ -84,11 +107,20 @@ class ProgressLogger:
|
|
84 |
for loss_name, loss_val in loss_dict.items():
|
85 |
loss_str += f" {loss_val:2.2f} |"
|
86 |
|
87 |
-
self.
|
88 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
def finish_one_epoch(self):
|
91 |
-
self.
|
92 |
|
93 |
def finish_train(self):
|
94 |
self.wandb.finish()
|
|
|
13 |
|
14 |
import os
|
15 |
import sys
|
16 |
+
from collections import deque
|
17 |
from typing import Dict, List
|
18 |
|
19 |
import wandb
|
20 |
import wandb.errors.term
|
21 |
from loguru import logger
|
22 |
+
from rich.console import Console, Group
|
23 |
+
from rich.progress import (
|
24 |
+
BarColumn,
|
25 |
+
Progress,
|
26 |
+
SpinnerColumn,
|
27 |
+
TextColumn,
|
28 |
+
TimeRemainingColumn,
|
29 |
+
)
|
30 |
from rich.table import Table
|
31 |
from torch import Tensor
|
32 |
from torch.optim import Optimizer
|
33 |
|
34 |
from yolo.config.config import Config, YOLOLayer
|
35 |
+
from yolo.utils.solver_utils import make_ap_table
|
36 |
|
37 |
|
38 |
def custom_logger(quite: bool = False):
|
|
|
46 |
)
|
47 |
|
48 |
|
49 |
+
class ProgressLogger(Progress):
|
50 |
+
def __init__(self, cfg: Config, exp_name: str, *args, **kwargs):
|
51 |
local_rank = int(os.getenv("LOCAL_RANK", "0"))
|
52 |
self.quite_mode = local_rank or getattr(cfg, "quite", False)
|
53 |
custom_logger(self.quite_mode)
|
54 |
self.save_path = validate_log_directory(cfg, exp_name=cfg.name)
|
55 |
|
56 |
+
progress_bar = (
|
57 |
+
SpinnerColumn(),
|
58 |
TextColumn("[progress.description]{task.description}"),
|
59 |
BarColumn(bar_width=None),
|
60 |
TextColumn("{task.completed:.0f}/{task.total:.0f}"),
|
61 |
TimeRemainingColumn(),
|
62 |
)
|
63 |
+
self.ap_table = Table()
|
64 |
+
# TODO: load maxlen by config files
|
65 |
+
self.ap_past_list = deque(maxlen=5)
|
66 |
+
super().__init__(*args, *progress_bar, **kwargs)
|
67 |
|
68 |
self.use_wandb = cfg.use_wandb
|
69 |
if self.use_wandb:
|
|
|
72 |
project="YOLO", resume="allow", mode="online", dir=self.save_path, id=None, name=exp_name
|
73 |
)
|
74 |
|
75 |
+
def update_ap_table(self, ap_list, epoch_idx=-1):
|
76 |
+
ap_table, ap_main = make_ap_table(ap_list, self.ap_past_list, epoch_idx)
|
77 |
+
self.ap_past_list.append((epoch_idx, ap_main))
|
78 |
+
self.ap_table = ap_table
|
79 |
+
|
80 |
+
if self.use_wandb:
|
81 |
+
self.wandb.log({f"mAP/AP @ .5:.95": ap_main[1], f"mAP/AP @ .5": ap_main[3]})
|
82 |
+
|
83 |
+
def get_renderable(self):
|
84 |
+
return Group(*self.get_renderables(), self.ap_table)
|
85 |
+
|
86 |
def start_train(self, num_epochs: int):
|
87 |
+
self.task_epoch = self.add_task("[cyan]Epochs [white]| Loss | Box | DFL | BCE |", total=num_epochs)
|
88 |
|
89 |
def start_one_epoch(self, num_batches: int, optimizer: Optimizer = None, epoch_idx: int = None):
|
90 |
self.num_batches = num_batches
|
91 |
+
if self.use_wandb and optimizer is not None:
|
92 |
lr_values = [params["lr"] for params in optimizer.param_groups]
|
93 |
lr_names = ["bias", "norm", "conv"]
|
94 |
for lr_name, lr_value in zip(lr_names, lr_values):
|
95 |
self.wandb.log({f"Learning Rate/{lr_name}": lr_value}, step=epoch_idx)
|
96 |
+
self.batch_task = self.add_task("[green]Batches", total=num_batches)
|
97 |
|
98 |
def one_batch(self, loss_dict: Dict[str, Tensor] = None):
|
99 |
if loss_dict is None:
|
100 |
+
self.update(self.batch_task, advance=1, description=f"[green]Validating")
|
101 |
return
|
102 |
if self.use_wandb:
|
103 |
for loss_name, loss_value in loss_dict.items():
|
|
|
107 |
for loss_name, loss_val in loss_dict.items():
|
108 |
loss_str += f" {loss_val:2.2f} |"
|
109 |
|
110 |
+
self.update(self.batch_task, advance=1, description=f"[green]Batches [white]{loss_str}")
|
111 |
+
self.update(self.task_epoch, advance=1 / self.num_batches)
|
112 |
+
|
113 |
+
def run_coco(self):
|
114 |
+
self.batch_task = self.add_task("[green]Run COCO", total=1)
|
115 |
+
|
116 |
+
def finish_coco(self, result, epoch_idx):
|
117 |
+
self.update_ap_table(result, epoch_idx)
|
118 |
+
self.update(self.batch_task, advance=1)
|
119 |
+
self.refresh()
|
120 |
+
self.remove_task(self.batch_task)
|
121 |
|
122 |
def finish_one_epoch(self):
|
123 |
+
self.remove_task(self.batch_task)
|
124 |
|
125 |
def finish_train(self):
|
126 |
self.wandb.finish()
|
yolo/utils/solver_utils.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
|
4 |
+
from pycocotools.coco import COCO
|
5 |
+
from pycocotools.cocoeval import COCOeval
|
6 |
+
from rich.table import Table
|
7 |
+
|
8 |
+
|
9 |
+
def calculate_ap(coco_gt: COCO, pd_path):
|
10 |
+
sys.stdout = open(os.devnull, "w")
|
11 |
+
coco_dt = coco_gt.loadRes(pd_path)
|
12 |
+
coco_eval = COCOeval(coco_gt, coco_dt, "bbox")
|
13 |
+
coco_eval.evaluate()
|
14 |
+
coco_eval.accumulate()
|
15 |
+
coco_eval.summarize()
|
16 |
+
sys.stdout = sys.__stdout__
|
17 |
+
return coco_eval.stats
|
18 |
+
|
19 |
+
|
20 |
+
def make_ap_table(score, past_result=[], epoch=-1):
|
21 |
+
ap_table = Table()
|
22 |
+
ap_table.add_column("Epoch", justify="center", style="white", width=5)
|
23 |
+
ap_table.add_column("Avg. Precision", justify="left", style="cyan")
|
24 |
+
ap_table.add_column("", justify="right", style="green", width=5)
|
25 |
+
ap_table.add_column("Avg. Recall", justify="left", style="cyan")
|
26 |
+
ap_table.add_column("", justify="right", style="green", width=5)
|
27 |
+
|
28 |
+
for eps, (ap_name1, ap_value1, ap_name2, ap_value2) in past_result:
|
29 |
+
ap_table.add_row(f"{eps: 3d}", ap_name1, f"{ap_value1:.2f}", ap_name2, f"{ap_value2:.2f}")
|
30 |
+
if past_result:
|
31 |
+
ap_table.add_row()
|
32 |
+
|
33 |
+
this_ap = ("AP @ .5:.95", score[0], "AP @ .5", score[1])
|
34 |
+
metrics = [
|
35 |
+
("AP @ .5:.95", score[0], "AR maxDets 1", score[6]),
|
36 |
+
("AP @ .5", score[1], "AR maxDets 10", score[7]),
|
37 |
+
("AP @ .75", score[2], "AR maxDets 100", score[8]),
|
38 |
+
("AP (small)", score[3], "AR (small)", score[9]),
|
39 |
+
("AP (medium)", score[4], "AR (medium)", score[10]),
|
40 |
+
("AP (large)", score[5], "AR (large)", score[11]),
|
41 |
+
]
|
42 |
+
|
43 |
+
for ap_name, ap_value, ar_name, ar_value in metrics:
|
44 |
+
ap_table.add_row(f"{epoch: 3d}", ap_name, f"{ap_value:.2f}", ar_name, f"{ar_value:.2f}")
|
45 |
+
|
46 |
+
return ap_table, this_ap
|