File size: 7,183 Bytes
ecf6aba
a0976c9
ecf6aba
1197f7d
 
669657d
6e46676
 
669657d
1197f7d
97e9dcb
9eb2d4e
97e9dcb
9eb2d4e
d1477fc
2dd2ae5
d58a9b6
dcceddd
 
 
 
 
1197f7d
 
dcceddd
d58a9b6
b5fa3f1
9eb2d4e
2dd2ae5
1197f7d
dcceddd
 
d1477fc
d58a9b6
9eb2d4e
1197f7d
86ef0ef
d58a9b6
b2baf14
6e85a96
dcceddd
1197f7d
 
669657d
1197f7d
f95a3d7
 
1197f7d
669657d
 
f95a3d7
 
 
2dd2ae5
669657d
 
 
 
 
b4bcccb
669657d
f2370d7
1197f7d
 
6e46676
f95a3d7
 
6e46676
 
f2370d7
6e46676
 
 
 
1197f7d
 
669657d
1197f7d
 
 
 
 
 
 
 
 
 
 
9eb2d4e
6e46676
9eb2d4e
6e46676
f2370d7
 
6e46676
 
3e08dd8
c601a4c
3e08dd8
6e46676
86ef0ef
9eb2d4e
 
 
d58a9b6
9eb2d4e
 
2dd2ae5
d58a9b6
9eb2d4e
 
d58a9b6
 
635f41a
7692528
9eb2d4e
8ca39dc
9eb2d4e
f95a3d7
 
9eb2d4e
ecf6aba
 
 
a0976c9
 
8ca39dc
 
 
 
f95a3d7
635f41a
f95a3d7
635f41a
ecf6aba
 
635f41a
a0976c9
 
 
635f41a
ecf6aba
 
635f41a
 
78e3679
635f41a
 
 
 
f3e770a
8ca39dc
 
bbd2c43
 
 
f3e770a
8ca39dc
b2baf14
 
 
 
 
 
 
f95a3d7
b2baf14
d58a9b6
b2baf14
 
f95a3d7
b2baf14
 
f95a3d7
b2baf14
 
86ef0ef
b2baf14
 
86ef0ef
b2baf14
 
86ef0ef
f95a3d7
 
b2baf14
f95a3d7
 
 
b2baf14
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
import os
import time

import torch
from loguru import logger
from torch import Tensor

# TODO: We may can't use CUDA?
from torch.cuda.amp import GradScaler, autocast

from yolo.config.config import Config, TrainConfig, ValidationConfig
from yolo.model.yolo import YOLO
from yolo.tools.data_loader import StreamDataLoader, create_dataloader
from yolo.tools.drawer import draw_bboxes
from yolo.tools.loss_functions import create_loss_function
from yolo.utils.bounding_box_utils import Vec2Box, bbox_nms, calculate_map
from yolo.utils.logging_utils import ProgressLogger
from yolo.utils.model_utils import (
    ExponentialMovingAverage,
    create_optimizer,
    create_scheduler,
)


class ModelTrainer:
    def __init__(self, cfg: Config, model: YOLO, vec2box: Vec2Box, progress: ProgressLogger, device):
        train_cfg: TrainConfig = cfg.task
        self.model = model
        self.vec2box = vec2box
        self.device = device
        self.optimizer = create_optimizer(model, train_cfg.optimizer)
        self.scheduler = create_scheduler(self.optimizer, train_cfg.scheduler)
        self.loss_fn = create_loss_function(cfg, vec2box)
        self.progress = progress
        self.num_epochs = cfg.task.epoch

        self.validation_dataloader = create_dataloader(cfg.task.validation.data, cfg.dataset, cfg.task.validation.task)
        self.validator = ModelValidator(cfg.task.validation, model, vec2box, progress, device, self.progress)

        if getattr(train_cfg.ema, "enabled", False):
            self.ema = ExponentialMovingAverage(model, decay=train_cfg.ema.decay)
        else:
            self.ema = None
        self.scaler = GradScaler()

    def train_one_batch(self, images: Tensor, targets: Tensor):
        images, targets = images.to(self.device), targets.to(self.device)
        self.optimizer.zero_grad()

        with autocast():
            predicts = self.model(images)
            aux_predicts = self.vec2box(predicts["AUX"])
            main_predicts = self.vec2box(predicts["Main"])
            loss, loss_item = self.loss_fn(aux_predicts, main_predicts, targets)

        self.scaler.scale(loss).backward()
        self.scaler.step(self.optimizer)
        self.scaler.update()

        return loss.item(), loss_item

    def train_one_epoch(self, dataloader):
        self.model.train()
        total_loss = 0

        for images, targets in dataloader:
            loss, loss_each = self.train_one_batch(images, targets)

            total_loss += loss
            self.progress.one_batch(loss_each)

        if self.scheduler:
            self.scheduler.step()

        return total_loss / len(dataloader)

    def save_checkpoint(self, epoch: int, filename="checkpoint.pt"):
        checkpoint = {
            "epoch": epoch,
            "model_state_dict": self.model.state_dict(),
            "optimizer_state_dict": self.optimizer.state_dict(),
        }
        if self.ema:
            self.ema.apply_shadow()
            checkpoint["model_state_dict_ema"] = self.model.state_dict()
            self.ema.restore()
        torch.save(checkpoint, filename)

    def solve(self, dataloader):
        logger.info("πŸš„ Start Training!")
        num_epochs = self.num_epochs

        with self.progress.progress:
            self.progress.start_train(num_epochs)
            for epoch in range(num_epochs):

                self.progress.start_one_epoch(len(dataloader), self.optimizer, epoch)
                epoch_loss = self.train_one_epoch(dataloader)
                self.progress.finish_one_epoch()

                self.validator.solve(self.validation_dataloader)


class ModelTester:
    def __init__(self, cfg: Config, model: YOLO, vec2box: Vec2Box, progress: ProgressLogger, device):
        self.model = model
        self.device = device
        self.vec2box = vec2box
        self.progress = progress

        self.nms = cfg.task.nms
        self.save_path = os.path.join(progress.save_path, "images")
        os.makedirs(self.save_path, exist_ok=True)
        self.save_predict = getattr(cfg.task, "save_predict", None)
        self.idx2label = cfg.class_list

    def solve(self, dataloader: StreamDataLoader):
        logger.info("πŸ‘€ Start Inference!")
        if isinstance(self.model, torch.nn.Module):
            self.model.eval()

        if dataloader.is_stream:
            import cv2
            import numpy as np

            last_time = time.time()
        try:
            for idx, images in enumerate(dataloader):
                images = images.to(self.device)
                with torch.no_grad():
                    predicts = self.model(images)
                    predicts = self.vec2box(predicts["Main"])
                nms_out = bbox_nms(predicts[0], predicts[2], self.nms)
                img = draw_bboxes(images[0], nms_out[0], idx2label=self.idx2label)

                if dataloader.is_stream:
                    img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
                    fps = 1 / (time.time() - last_time)
                    cv2.putText(img, f"FPS: {fps:.2f}", (0, 15), 0, 0.5, (100, 255, 0), 1, cv2.LINE_AA)
                    last_time = time.time()
                    cv2.imshow("Prediction", img)
                    if cv2.waitKey(1) & 0xFF == ord("q"):
                        break
                    if not self.save_predict:
                        continue
                if self.save_predict != False:
                    save_image_path = os.path.join(self.save_path, f"frame{idx:03d}.png")
                    img.save(save_image_path)
                    logger.info(f"πŸ’Ύ Saved visualize image at {save_image_path}")

        except (KeyboardInterrupt, Exception) as e:
            dataloader.stop_event.set()
            dataloader.stop()
            if isinstance(e, KeyboardInterrupt):
                logger.error("User Keyboard Interrupt")
            else:
                raise e
        dataloader.stop()


class ModelValidator:
    def __init__(
        self,
        validation_cfg: ValidationConfig,
        model: YOLO,
        vec2box: Vec2Box,
        device,
        progress: ProgressLogger,
    ):
        self.model = model
        self.vec2box = vec2box
        self.device = device
        self.progress = progress

        self.nms = validation_cfg.nms

    def solve(self, dataloader):
        # logger.info("πŸ§ͺ Start Validation!")
        self.model.eval()
        # TODO: choice mAP metrics?
        iou_thresholds = torch.arange(0.5, 1.0, 0.05)
        map_all = []
        self.progress.start_one_epoch(len(dataloader))
        for images, targets in dataloader:
            images, targets = images.to(self.device), targets.to(self.device)
            with torch.no_grad():
                predicts = self.model(images)
            predicts = self.vec2box(predicts["Main"])
            nms_out = bbox_nms(predicts[0], predicts[2], self.nms)
            for idx, predict in enumerate(nms_out):
                map_value = calculate_map(predict, targets[idx], iou_thresholds)
                map_all.append(map_value[0])
            self.progress.one_batch(mapp=torch.Tensor(map_all).mean())

        self.progress.finish_one_epoch()