henry000 commited on
Commit
da1069f
Β·
2 Parent(s): 8ce9eff e8991f8

πŸ”€ [Merge] branch 'TEST'

Browse files
.github/workflows/deploy.yaml CHANGED
@@ -53,17 +53,17 @@ jobs:
53
 
54
  - name: Run Validation
55
  run: |
56
- python yolo/lazy.py task=validation dataset=mock
57
- python yolo/lazy.py task=validation dataset=mock model=v9-s
58
- python yolo/lazy.py task=validation dataset=mock name=AnyNameYouWant
59
 
60
  - name: Run Inference
61
  run: |
62
- python yolo/lazy.py task=inference
63
- python yolo/lazy.py task=inference model=v7
64
- python yolo/lazy.py task=inference +quite=True
65
- python yolo/lazy.py task=inference name=AnyNameYouWant
66
- python yolo/lazy.py task=inference image_size=\[480,640]
67
- python yolo/lazy.py task=inference task.nms.min_confidence=0.1
68
- python yolo/lazy.py task=inference task.fast_inference=deploy
69
- python yolo/lazy.py task=inference task.data.source=tests/data/images/val
 
53
 
54
  - name: Run Validation
55
  run: |
56
+ python yolo/lazy.py task=validation use_wandb=False dataset=mock
57
+ python yolo/lazy.py task=validation use_wandb=False dataset=mock model=v9-s
58
+ python yolo/lazy.py task=validation use_wandb=False dataset=mock name=AnyNameYouWant
59
 
60
  - name: Run Inference
61
  run: |
62
+ python yolo/lazy.py task=inference use_wandb=False
63
+ python yolo/lazy.py task=inference use_wandb=False model=v7
64
+ python yolo/lazy.py task=inference use_wandb=False +quite=True
65
+ python yolo/lazy.py task=inference use_wandb=False name=AnyNameYouWant
66
+ python yolo/lazy.py task=inference use_wandb=False image_size=\[480,640]
67
+ python yolo/lazy.py task=inference use_wandb=False task.nms.min_confidence=0.1
68
+ python yolo/lazy.py task=inference use_wandb=False task.fast_inference=deploy
69
+ python yolo/lazy.py task=inference use_wandb=False task.data.source=tests/data/images/val
requirements.txt CHANGED
@@ -1,6 +1,7 @@
1
  einops
2
  graphviz
3
  hydra-core
 
4
  loguru
5
  numpy
6
  opencv-python
 
1
  einops
2
  graphviz
3
  hydra-core
4
+ lightning
5
  loguru
6
  numpy
7
  opencv-python
tests/conftest.py CHANGED
@@ -4,15 +4,16 @@ from pathlib import Path
4
  import pytest
5
  import torch
6
  from hydra import compose, initialize
 
7
 
8
  project_root = Path(__file__).resolve().parent.parent
9
  sys.path.append(str(project_root))
10
 
11
  from yolo import Anc2Box, Config, Vec2Box, create_converter, create_model
12
  from yolo.model.yolo import YOLO
13
- from yolo.tools.data_loader import StreamDataLoader, YoloDataLoader
14
  from yolo.tools.dataset_preparation import prepare_dataset
15
- from yolo.utils.logging_utils import ProgressLogger, set_seed
16
 
17
 
18
  def pytest_configure(config):
@@ -52,18 +53,6 @@ def device():
52
  return torch.device("cuda" if torch.cuda.is_available() else "cpu")
53
 
54
 
55
- @pytest.fixture(scope="session")
56
- def train_progress_logger(train_cfg: Config):
57
- progress_logger = ProgressLogger(train_cfg, exp_name=train_cfg.name)
58
- return progress_logger
59
-
60
-
61
- @pytest.fixture(scope="session")
62
- def validation_progress_logger(validation_cfg: Config):
63
- progress_logger = ProgressLogger(validation_cfg, exp_name=validation_cfg.name)
64
- return progress_logger
65
-
66
-
67
  @pytest.fixture(scope="session")
68
  def model(train_cfg: Config, device) -> YOLO:
69
  model = create_model(train_cfg.model)
@@ -76,6 +65,24 @@ def model_v7(inference_v7_cfg: Config, device) -> YOLO:
76
  return model.to(device)
77
 
78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  @pytest.fixture(scope="session")
80
  def vec2box(train_cfg: Config, model: YOLO, device) -> Vec2Box:
81
  vec2box = create_converter(train_cfg.model.name, model, train_cfg.model.anchor, train_cfg.image_size, device)
@@ -93,13 +100,13 @@ def anc2box(inference_v7_cfg: Config, model: YOLO, device) -> Anc2Box:
93
  @pytest.fixture(scope="session")
94
  def train_dataloader(train_cfg: Config):
95
  prepare_dataset(train_cfg.dataset, task="train")
96
- return YoloDataLoader(train_cfg.task.data, train_cfg.dataset, train_cfg.task.task)
97
 
98
 
99
  @pytest.fixture(scope="session")
100
  def validation_dataloader(validation_cfg: Config):
101
  prepare_dataset(validation_cfg.dataset, task="val")
102
- return YoloDataLoader(validation_cfg.task.data, validation_cfg.dataset, validation_cfg.task.task)
103
 
104
 
105
  @pytest.fixture(scope="session")
 
4
  import pytest
5
  import torch
6
  from hydra import compose, initialize
7
+ from lightning import Trainer
8
 
9
  project_root = Path(__file__).resolve().parent.parent
10
  sys.path.append(str(project_root))
11
 
12
  from yolo import Anc2Box, Config, Vec2Box, create_converter, create_model
13
  from yolo.model.yolo import YOLO
14
+ from yolo.tools.data_loader import StreamDataLoader, create_dataloader
15
  from yolo.tools.dataset_preparation import prepare_dataset
16
+ from yolo.utils.logging_utils import set_seed, setup
17
 
18
 
19
  def pytest_configure(config):
 
53
  return torch.device("cuda" if torch.cuda.is_available() else "cpu")
54
 
55
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  @pytest.fixture(scope="session")
57
  def model(train_cfg: Config, device) -> YOLO:
58
  model = create_model(train_cfg.model)
 
65
  return model.to(device)
66
 
67
 
68
+ @pytest.fixture(scope="session")
69
+ def solver(train_cfg: Config) -> Trainer:
70
+ train_cfg.use_wandb = False
71
+ callbacks, loggers, save_path = setup(train_cfg)
72
+ trainer = Trainer(
73
+ accelerator="auto",
74
+ max_epochs=getattr(train_cfg.task, "epoch", None),
75
+ precision="16-mixed",
76
+ callbacks=callbacks,
77
+ logger=loggers,
78
+ log_every_n_steps=1,
79
+ gradient_clip_val=10,
80
+ deterministic=True,
81
+ default_root_dir=save_path,
82
+ )
83
+ return trainer
84
+
85
+
86
  @pytest.fixture(scope="session")
87
  def vec2box(train_cfg: Config, model: YOLO, device) -> Vec2Box:
88
  vec2box = create_converter(train_cfg.model.name, model, train_cfg.model.anchor, train_cfg.image_size, device)
 
100
  @pytest.fixture(scope="session")
101
  def train_dataloader(train_cfg: Config):
102
  prepare_dataset(train_cfg.dataset, task="train")
103
+ return create_dataloader(train_cfg.task.data, train_cfg.dataset, train_cfg.task.task)
104
 
105
 
106
  @pytest.fixture(scope="session")
107
  def validation_dataloader(validation_cfg: Config):
108
  prepare_dataset(validation_cfg.dataset, task="val")
109
+ return create_dataloader(validation_cfg.task.data, validation_cfg.dataset, validation_cfg.task.task)
110
 
111
 
112
  @pytest.fixture(scope="session")
tests/test_tools/test_data_loader.py CHANGED
@@ -1,11 +1,13 @@
1
  import sys
2
  from pathlib import Path
3
 
 
 
4
  project_root = Path(__file__).resolve().parent.parent.parent
5
  sys.path.append(str(project_root))
6
 
7
  from yolo.config.config import Config
8
- from yolo.tools.data_loader import StreamDataLoader, YoloDataLoader, create_dataloader
9
 
10
 
11
  def test_create_dataloader_cache(train_cfg: Config):
@@ -25,7 +27,7 @@ def test_create_dataloader_cache(train_cfg: Config):
25
  assert m_image_paths == l_image_paths
26
 
27
 
28
- def test_training_data_loader_correctness(train_dataloader: YoloDataLoader):
29
  """Test that the training data loader produces correctly shaped data and metadata."""
30
  batch_size, images, _, reverse_tensors, image_paths = next(iter(train_dataloader))
31
  assert batch_size == 2
@@ -38,7 +40,7 @@ def test_training_data_loader_correctness(train_dataloader: YoloDataLoader):
38
  assert list(image_paths) == list(expected_paths)
39
 
40
 
41
- def test_validation_data_loader_correctness(validation_dataloader: YoloDataLoader):
42
  batch_size, images, targets, reverse_tensors, image_paths = next(iter(validation_dataloader))
43
  assert batch_size == 4
44
  assert images.shape == (4, 3, 640, 640)
 
1
  import sys
2
  from pathlib import Path
3
 
4
+ from torch.utils.data import DataLoader
5
+
6
  project_root = Path(__file__).resolve().parent.parent.parent
7
  sys.path.append(str(project_root))
8
 
9
  from yolo.config.config import Config
10
+ from yolo.tools.data_loader import StreamDataLoader, create_dataloader
11
 
12
 
13
  def test_create_dataloader_cache(train_cfg: Config):
 
27
  assert m_image_paths == l_image_paths
28
 
29
 
30
+ def test_training_data_loader_correctness(train_dataloader: DataLoader):
31
  """Test that the training data loader produces correctly shaped data and metadata."""
32
  batch_size, images, _, reverse_tensors, image_paths = next(iter(train_dataloader))
33
  assert batch_size == 2
 
40
  assert list(image_paths) == list(expected_paths)
41
 
42
 
43
+ def test_validation_data_loader_correctness(validation_dataloader: DataLoader):
44
  batch_size, images, targets, reverse_tensors, image_paths = next(iter(validation_dataloader))
45
  assert batch_size == 4
46
  assert images.shape == (4, 3, 640, 640)
tests/test_tools/test_loss_functions.py CHANGED
@@ -1,4 +1,5 @@
1
  import sys
 
2
  from pathlib import Path
3
 
4
  import pytest
@@ -51,6 +52,6 @@ def test_yolo_loss(loss_function, data):
51
  predicts, targets = data
52
  loss, loss_dict = loss_function(predicts, predicts, targets)
53
  assert torch.isnan(loss)
54
- assert torch.isnan(loss_dict["BoxLoss"])
55
- assert torch.isnan(loss_dict["DFLoss"])
56
- assert torch.isinf(loss_dict["BCELoss"])
 
1
  import sys
2
+ from math import isinf, isnan
3
  from pathlib import Path
4
 
5
  import pytest
 
52
  predicts, targets = data
53
  loss, loss_dict = loss_function(predicts, predicts, targets)
54
  assert torch.isnan(loss)
55
+ assert isnan(loss_dict["Loss/BoxLoss"])
56
+ assert isnan(loss_dict["Loss/DFLLoss"])
57
+ assert isinf(loss_dict["Loss/BCELoss"])
tests/test_tools/test_solver.py CHANGED
@@ -1,79 +1,81 @@
1
  import sys
 
2
  from pathlib import Path
3
 
4
  import pytest
5
- from torch import allclose, tensor
 
6
 
7
  project_root = Path(__file__).resolve().parent.parent.parent
8
  sys.path.append(str(project_root))
9
 
10
  from yolo.config.config import Config
11
  from yolo.model.yolo import YOLO
12
- from yolo.tools.data_loader import StreamDataLoader, YoloDataLoader
13
- from yolo.tools.solver import ModelTester, ModelTrainer, ModelValidator
14
  from yolo.utils.bounding_box_utils import Anc2Box, Vec2Box
15
 
16
 
17
  @pytest.fixture
18
- def model_validator(validation_cfg: Config, model: YOLO, vec2box: Vec2Box, validation_progress_logger, device):
19
- validator = ModelValidator(
20
- validation_cfg.task, validation_cfg.dataset, model, vec2box, validation_progress_logger, device
21
- )
22
  return validator
23
 
24
 
25
- def test_model_validator_initialization(model_validator: ModelValidator):
26
  assert isinstance(model_validator.model, YOLO)
27
- assert hasattr(model_validator, "solve")
28
 
29
 
30
- def test_model_validator_solve_mock_dataset(model_validator: ModelValidator, validation_dataloader: YoloDataLoader):
31
- mAPs = model_validator.solve(validation_dataloader)
32
- except_mAPs = {"mAP.5": tensor(0.6969), "mAP.5:.95": tensor(0.4195)}
33
- assert allclose(mAPs["mAP.5"], except_mAPs["mAP.5"], rtol=0.1)
34
- print(mAPs)
35
- assert allclose(mAPs["mAP.5:.95"], except_mAPs["mAP.5:.95"], rtol=0.1)
 
36
 
37
 
38
  @pytest.fixture
39
- def model_tester(inference_cfg: Config, model: YOLO, vec2box: Vec2Box, validation_progress_logger, device):
40
- tester = ModelTester(inference_cfg, model, vec2box, validation_progress_logger, device)
41
  return tester
42
 
43
 
44
  @pytest.fixture
45
- def modelv7_tester(inference_v7_cfg: Config, model_v7: YOLO, anc2box: Anc2Box, validation_progress_logger, device):
46
- tester = ModelTester(inference_v7_cfg, model_v7, anc2box, validation_progress_logger, device)
47
  return tester
48
 
49
 
50
- def test_model_tester_initialization(model_tester: ModelTester):
51
  assert isinstance(model_tester.model, YOLO)
52
- assert hasattr(model_tester, "solve")
53
 
54
 
55
- def test_model_tester_solve_single_image(model_tester: ModelTester, file_stream_data_loader: StreamDataLoader):
56
- model_tester.solve(file_stream_data_loader)
 
 
57
 
58
 
59
- def test_modelv7_tester_solve_single_image(modelv7_tester: ModelTester, file_stream_data_loader_v7: StreamDataLoader):
60
- modelv7_tester.solve(file_stream_data_loader_v7)
 
 
61
 
62
 
63
  @pytest.fixture
64
- def model_trainer(train_cfg: Config, model: YOLO, vec2box: Vec2Box, train_progress_logger, device):
65
  train_cfg.task.epoch = 2
66
- trainer = ModelTrainer(train_cfg, model, vec2box, train_progress_logger, device, use_ddp=False)
67
  return trainer
68
 
69
 
70
- def test_model_trainer_initialization(model_trainer: ModelTrainer):
71
-
72
  assert isinstance(model_trainer.model, YOLO)
73
- assert hasattr(model_trainer, "solve")
74
- assert model_trainer.optimizer is not None
75
- assert model_trainer.scheduler is not None
76
- assert model_trainer.loss_fn is not None
77
 
78
 
79
  # def test_model_trainer_solve_mock_dataset(model_trainer: ModelTrainer, train_dataloader: YoloDataLoader):
 
1
  import sys
2
+ from math import isclose
3
  from pathlib import Path
4
 
5
  import pytest
6
+ from lightning.pytorch import Trainer
7
+ from torch.utils.data import DataLoader
8
 
9
  project_root = Path(__file__).resolve().parent.parent.parent
10
  sys.path.append(str(project_root))
11
 
12
  from yolo.config.config import Config
13
  from yolo.model.yolo import YOLO
14
+ from yolo.tools.data_loader import StreamDataLoader
15
+ from yolo.tools.solver import InferenceModel, TrainModel, ValidateModel
16
  from yolo.utils.bounding_box_utils import Anc2Box, Vec2Box
17
 
18
 
19
  @pytest.fixture
20
+ def model_validator(validation_cfg: Config):
21
+ validator = ValidateModel(validation_cfg)
 
 
22
  return validator
23
 
24
 
25
+ def test_model_validator_initialization(solver: Trainer, model_validator: ValidateModel):
26
  assert isinstance(model_validator.model, YOLO)
27
+ assert hasattr(solver, "validate")
28
 
29
 
30
+ def test_model_validator_solve_mock_dataset(
31
+ solver: Trainer, model_validator: ValidateModel, validation_dataloader: DataLoader
32
+ ):
33
+ mAPs = solver.validate(model_validator, dataloaders=validation_dataloader)[0]
34
+ except_mAPs = {"map_50": 0.7379, "map": 0.5617}
35
+ assert isclose(mAPs["map_50"], except_mAPs["map_50"], abs_tol=1e-4)
36
+ assert isclose(mAPs["map"], except_mAPs["map"], abs_tol=0.1)
37
 
38
 
39
  @pytest.fixture
40
+ def model_tester(inference_cfg: Config):
41
+ tester = InferenceModel(inference_cfg)
42
  return tester
43
 
44
 
45
  @pytest.fixture
46
+ def modelv7_tester(inference_v7_cfg: Config):
47
+ tester = InferenceModel(inference_v7_cfg)
48
  return tester
49
 
50
 
51
+ def test_model_tester_initialization(solver: Trainer, model_tester: InferenceModel):
52
  assert isinstance(model_tester.model, YOLO)
53
+ assert hasattr(solver, "predict")
54
 
55
 
56
+ def test_model_tester_solve_single_image(
57
+ solver: Trainer, model_tester: InferenceModel, file_stream_data_loader: StreamDataLoader
58
+ ):
59
+ solver.predict(model_tester, file_stream_data_loader)
60
 
61
 
62
+ def test_modelv7_tester_solve_single_image(
63
+ solver: Trainer, modelv7_tester: InferenceModel, file_stream_data_loader_v7: StreamDataLoader
64
+ ):
65
+ solver.predict(modelv7_tester, file_stream_data_loader_v7)
66
 
67
 
68
  @pytest.fixture
69
+ def model_trainer(train_cfg: Config):
70
  train_cfg.task.epoch = 2
71
+ trainer = TrainModel(train_cfg)
72
  return trainer
73
 
74
 
75
+ def test_model_trainer_initialization(solver: Trainer, model_trainer: TrainModel):
 
76
  assert isinstance(model_trainer.model, YOLO)
77
+ assert hasattr(solver, "fit")
78
+ assert solver.optimizers is not None
 
 
79
 
80
 
81
  # def test_model_trainer_solve_mock_dataset(model_trainer: ModelTrainer, train_dataloader: YoloDataLoader):
tests/test_utils/test_bounding_box_utils.py CHANGED
@@ -216,9 +216,8 @@ def test_calculate_map():
216
  ground_truths = tensor([[0, 50, 50, 150, 150], [0, 30, 30, 100, 100]]) # [class, x1, y1, x2, y2]
217
 
218
  mAP = calculate_map(predictions, ground_truths)
 
 
219
 
220
- expected_ap50 = tensor(0.5)
221
- expected_ap50_95 = tensor(0.2)
222
-
223
- assert isclose(mAP["mAP.5"], expected_ap50, atol=1e-5), f"AP50 mismatch"
224
- assert isclose(mAP["mAP.5:.95"], expected_ap50_95, atol=1e-5), f"Mean AP mismatch"
 
216
  ground_truths = tensor([[0, 50, 50, 150, 150], [0, 30, 30, 100, 100]]) # [class, x1, y1, x2, y2]
217
 
218
  mAP = calculate_map(predictions, ground_truths)
219
+ expected_ap50 = tensor(0.5050)
220
+ expected_ap50_95 = tensor(0.2020)
221
 
222
+ assert isclose(mAP["map_50"], expected_ap50, atol=1e-4), f"AP50 mismatch"
223
+ assert isclose(mAP["map"], expected_ap50_95, atol=1e-4), f"Mean AP mismatch"
 
 
 
yolo/__init__.py CHANGED
@@ -10,7 +10,7 @@ from yolo.utils.logging_utils import (
10
  YOLORichModelSummary,
11
  YOLORichProgressBar,
12
  )
13
- from yolo.utils.model_utils import PostProccess
14
 
15
  all = [
16
  "create_model",
@@ -29,5 +29,5 @@ all = [
29
  "create_dataloader",
30
  "FastModelLoader",
31
  "TrainModel",
32
- "PostProccess",
33
  ]
 
10
  YOLORichModelSummary,
11
  YOLORichProgressBar,
12
  )
13
+ from yolo.utils.model_utils import PostProcess
14
 
15
  all = [
16
  "create_model",
 
29
  "create_dataloader",
30
  "FastModelLoader",
31
  "TrainModel",
32
+ "PostProcess",
33
  ]
yolo/lazy.py CHANGED
@@ -14,10 +14,10 @@ from yolo.utils.logging_utils import setup
14
 
15
  @hydra.main(config_path="config", config_name="config", version_base=None)
16
  def main(cfg: Config):
17
- callbacks, loggers = setup(cfg)
18
 
19
  trainer = Trainer(
20
- accelerator="cuda",
21
  max_epochs=getattr(cfg.task, "epoch", None),
22
  precision="16-mixed",
23
  callbacks=callbacks,
@@ -25,18 +25,19 @@ def main(cfg: Config):
25
  log_every_n_steps=1,
26
  gradient_clip_val=10,
27
  deterministic=True,
 
 
28
  )
29
 
30
- match cfg.task.task:
31
- case "train":
32
- model = TrainModel(cfg)
33
- trainer.fit(model)
34
- case "validation":
35
- model = ValidateModel(cfg)
36
- trainer.validate(model)
37
- case "inference":
38
- model = InferenceModel(cfg)
39
- trainer.predict(model)
40
 
41
 
42
  if __name__ == "__main__":
 
14
 
15
  @hydra.main(config_path="config", config_name="config", version_base=None)
16
  def main(cfg: Config):
17
+ callbacks, loggers, save_path = setup(cfg)
18
 
19
  trainer = Trainer(
20
+ accelerator="auto",
21
  max_epochs=getattr(cfg.task, "epoch", None),
22
  precision="16-mixed",
23
  callbacks=callbacks,
 
25
  log_every_n_steps=1,
26
  gradient_clip_val=10,
27
  deterministic=True,
28
+ enable_progress_bar=not getattr(cfg, "quite", False),
29
+ default_root_dir=save_path,
30
  )
31
 
32
+ if cfg.task.task == "train":
33
+ model = TrainModel(cfg)
34
+ trainer.fit(model)
35
+ if cfg.task.task == "validation":
36
+ model = ValidateModel(cfg)
37
+ trainer.validate(model)
38
+ if cfg.task.task == "inference":
39
+ model = InferenceModel(cfg)
40
+ trainer.predict(model)
 
41
 
42
 
43
  if __name__ == "__main__":
yolo/tools/data_loader.py CHANGED
@@ -170,7 +170,7 @@ def collate_fn(batch: List[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, List[Tensor]
170
  """
171
  batch_size = len(batch)
172
  target_sizes = [item[1].size(0) for item in batch]
173
- # TODO: Improve readability of these proccess
174
  # TODO: remove maxBbox or reduce loss function memory usage
175
  batch_targets = torch.zeros(batch_size, min(max(target_sizes), 100), 5)
176
  batch_targets[:, :, 0] = -1
 
170
  """
171
  batch_size = len(batch)
172
  target_sizes = [item[1].size(0) for item in batch]
173
+ # TODO: Improve readability of these process
174
  # TODO: remove maxBbox or reduce loss function memory usage
175
  batch_targets = torch.zeros(batch_size, min(max(target_sizes), 100), 5)
176
  batch_targets[:, :, 0] = -1
yolo/tools/loss_functions.py CHANGED
@@ -119,7 +119,7 @@ class DualLoss:
119
 
120
  def __call__(
121
  self, aux_predicts: List[Tensor], main_predicts: List[Tensor], targets: Tensor
122
- ) -> Tuple[Tensor, Dict[str, Tensor]]:
123
  # TODO: Need Refactor this region, make it flexible!
124
  aux_iou, aux_dfl, aux_cls = self.loss(aux_predicts, targets)
125
  main_iou, main_dfl, main_cls = self.loss(main_predicts, targets)
 
119
 
120
  def __call__(
121
  self, aux_predicts: List[Tensor], main_predicts: List[Tensor], targets: Tensor
122
+ ) -> Tuple[Tensor, Dict[str, float]]:
123
  # TODO: Need Refactor this region, make it flexible!
124
  aux_iou, aux_dfl, aux_cls = self.loss(aux_predicts, targets)
125
  main_iou, main_dfl, main_cls = self.loss(main_predicts, targets)
yolo/tools/solver.py CHANGED
@@ -1,8 +1,5 @@
1
- import time
2
  from pathlib import Path
3
 
4
- import cv2
5
- import numpy as np
6
  from lightning import LightningModule
7
  from torchmetrics.detection import MeanAveragePrecision
8
 
@@ -12,7 +9,7 @@ from yolo.tools.data_loader import create_dataloader
12
  from yolo.tools.drawer import draw_bboxes
13
  from yolo.tools.loss_functions import create_loss_function
14
  from yolo.utils.bounding_box_utils import create_converter, to_metrics_format
15
- from yolo.utils.model_utils import PostProccess, create_optimizer, create_scheduler
16
 
17
 
18
  class BaseModel(LightningModule):
@@ -40,14 +37,14 @@ class ValidateModel(BaseModel):
40
  self.vec2box = create_converter(
41
  self.cfg.model.name, self.model, self.cfg.model.anchor, self.cfg.image_size, self.device
42
  )
43
- self.post_proccess = PostProccess(self.vec2box, self.validation_cfg.nms)
44
 
45
  def val_dataloader(self):
46
  return self.val_loader
47
 
48
  def validation_step(self, batch, batch_idx):
49
  batch_size, images, targets, rev_tensor, img_paths = batch
50
- predicts = self.post_proccess(self(images))
51
  batch_metrics = self.metric(
52
  [to_metrics_format(predict) for predict in predicts], [to_metrics_format(target) for target in targets]
53
  )
@@ -139,16 +136,7 @@ class InferenceModel(BaseModel):
139
  self._save_image(img, batch_idx)
140
  return img, fps
141
 
142
- def _display_stream(self, img):
143
- img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
144
- fps = 1 / (time.time() - self.trainer.current_epoch_start_time)
145
- cv2.putText(img, f"FPS: {fps:.2f}", (0, 15), 0, 0.5, (100, 255, 0), 1, cv2.LINE_AA)
146
- cv2.imshow("Prediction", img)
147
- if cv2.waitKey(1) & 0xFF == ord("q"):
148
- self.trainer.should_stop = True
149
- return fps
150
-
151
  def _save_image(self, img, batch_idx):
152
- save_image_path = Path(self.logger.save_dir) / f"frame{batch_idx:03d}.png"
153
  img.save(save_image_path)
154
  print(f"πŸ’Ύ Saved visualize image at {save_image_path}")
 
 
1
  from pathlib import Path
2
 
 
 
3
  from lightning import LightningModule
4
  from torchmetrics.detection import MeanAveragePrecision
5
 
 
9
  from yolo.tools.drawer import draw_bboxes
10
  from yolo.tools.loss_functions import create_loss_function
11
  from yolo.utils.bounding_box_utils import create_converter, to_metrics_format
12
+ from yolo.utils.model_utils import PostProcess, create_optimizer, create_scheduler
13
 
14
 
15
  class BaseModel(LightningModule):
 
37
  self.vec2box = create_converter(
38
  self.cfg.model.name, self.model, self.cfg.model.anchor, self.cfg.image_size, self.device
39
  )
40
+ self.post_process = PostProcess(self.vec2box, self.validation_cfg.nms)
41
 
42
  def val_dataloader(self):
43
  return self.val_loader
44
 
45
  def validation_step(self, batch, batch_idx):
46
  batch_size, images, targets, rev_tensor, img_paths = batch
47
+ predicts = self.post_process(self(images))
48
  batch_metrics = self.metric(
49
  [to_metrics_format(predict) for predict in predicts], [to_metrics_format(target) for target in targets]
50
  )
 
136
  self._save_image(img, batch_idx)
137
  return img, fps
138
 
 
 
 
 
 
 
 
 
 
139
  def _save_image(self, img, batch_idx):
140
+ save_image_path = Path(self.trainer.default_root_dir) / f"frame{batch_idx:03d}.png"
141
  img.save(save_image_path)
142
  print(f"πŸ’Ύ Saved visualize image at {save_image_path}")
yolo/utils/bounding_box_utils.py CHANGED
@@ -4,10 +4,11 @@ from typing import Dict, List, Optional, Tuple, Union
4
  import torch
5
  import torch.nn.functional as F
6
  from einops import rearrange
7
- from torch import Tensor, arange, tensor
 
8
  from torchvision.ops import batched_nms
9
 
10
- from yolo.config.config import AnchorConfig, MatcherConfig, ModelConfig, NMSConfig
11
  from yolo.model.yolo import YOLO
12
  from yolo.utils.logger import logger
13
 
@@ -431,50 +432,9 @@ def bbox_nms(cls_dist: Tensor, bbox: Tensor, nms_cfg: NMSConfig, confidence: Opt
431
  return predicts_nms
432
 
433
 
434
- def calculate_map(predictions, ground_truths, iou_thresholds=arange(0.5, 1, 0.05)) -> Dict[str, Tensor]:
435
- # TODO: Refactor this block, Flexible for calculate different mAP condition?
436
- device = predictions.device
437
- n_preds = predictions.size(0)
438
- n_gts = (ground_truths[:, 0] != -1).sum()
439
- ground_truths = ground_truths[:n_gts]
440
- aps = []
441
-
442
- ious = calculate_iou(predictions[:, 1:-1], ground_truths[:, 1:]) # [n_preds, n_gts]
443
-
444
- for threshold in iou_thresholds:
445
- tp = torch.zeros(n_preds, device=device, dtype=bool)
446
-
447
- max_iou, max_indices = ious.max(dim=1)
448
- above_threshold = max_iou >= threshold
449
- matched_classes = predictions[:, 0] == ground_truths[max_indices, 0]
450
- max_match = torch.zeros_like(ious)
451
- max_match[arange(n_preds), max_indices] = max_iou
452
- if max_match.size(0):
453
- tp[max_match.argmax(dim=0)] = True
454
- tp[~above_threshold | ~matched_classes] = False
455
-
456
- _, indices = torch.sort(predictions[:, 1], descending=True)
457
- tp = tp[indices]
458
-
459
- tp_cumsum = torch.cumsum(tp, dim=0)
460
- fp_cumsum = torch.cumsum(~tp, dim=0)
461
-
462
- precision = tp_cumsum / (tp_cumsum + fp_cumsum + 1e-6)
463
- recall = tp_cumsum / (n_gts + 1e-6)
464
-
465
- precision = torch.cat([torch.ones(1, device=device), precision, torch.zeros(1, device=device)])
466
- recall = torch.cat([torch.zeros(1, device=device), recall, torch.ones(1, device=device)])
467
-
468
- precision, _ = torch.cummax(precision.flip(0), dim=0)
469
- precision = precision.flip(0)
470
-
471
- ap = torch.trapezoid(precision, recall)
472
- aps.append(ap)
473
-
474
- mAP = {
475
- "mAP.5": aps[0],
476
- "mAP.5:.95": torch.mean(torch.stack(aps)),
477
- }
478
  return mAP
479
 
480
 
 
4
  import torch
5
  import torch.nn.functional as F
6
  from einops import rearrange
7
+ from torch import Tensor, tensor
8
+ from torchmetrics.detection import MeanAveragePrecision
9
  from torchvision.ops import batched_nms
10
 
11
+ from yolo.config.config import AnchorConfig, MatcherConfig, NMSConfig
12
  from yolo.model.yolo import YOLO
13
  from yolo.utils.logger import logger
14
 
 
432
  return predicts_nms
433
 
434
 
435
+ def calculate_map(predictions, ground_truths) -> Dict[str, Tensor]:
436
+ metric = MeanAveragePrecision(iou_type="bbox", box_format="xyxy")
437
+ mAP = metric([to_metrics_format(predictions)], [to_metrics_format(ground_truths)])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
438
  return mAP
439
 
440
 
yolo/utils/logging_utils.py CHANGED
@@ -223,7 +223,7 @@ class ImageLogger(Callback):
223
  logger.log_image("Prediction", images, step=step, boxes=[log_bbox(pred_boxes)])
224
 
225
 
226
- def setup_logger(logger_name):
227
  class EmojiFormatter(logging.Formatter):
228
  def format(self, record, emoji=":high_voltage:"):
229
  return f"{emoji} {super().format(record)}"
@@ -234,16 +234,14 @@ def setup_logger(logger_name):
234
  if rich_logger:
235
  rich_logger.handlers.clear()
236
  rich_logger.addHandler(rich_handler)
 
 
237
 
238
 
239
  def setup(cfg: Config):
240
- # seed_everything(cfg.lucky_number)
241
- if hasattr(cfg, "quite"):
242
- logger.removeHandler("YOLO_logger")
243
- return
244
-
245
- setup_logger("lightning.fabric")
246
- setup_logger("lightning.pytorch")
247
 
248
  def custom_wandb_log(string="", level=int, newline=True, repeat=True, prefix=True, silent=False):
249
  if silent:
@@ -256,6 +254,11 @@ def setup(cfg: Config):
256
  save_path = validate_log_directory(cfg, cfg.name)
257
 
258
  progress, loggers = [], []
 
 
 
 
 
259
  progress.append(YOLORichProgressBar())
260
  progress.append(YOLORichModelSummary())
261
  progress.append(ImageLogger())
@@ -264,7 +267,7 @@ def setup(cfg: Config):
264
  if cfg.use_wandb:
265
  loggers.append(WandbLogger(project="YOLO", name=cfg.name, save_dir=save_path, id=None))
266
 
267
- return progress, loggers
268
 
269
 
270
  def log_model_structure(model: Union[ModuleList, YOLOLayer, YOLO]):
@@ -312,7 +315,8 @@ def validate_log_directory(cfg: Config, exp_name: str) -> Path:
312
  )
313
 
314
  save_path.mkdir(parents=True, exist_ok=True)
315
- logger.info(f"πŸ“„ Created log folder: [blue b u]{save_path}[/]")
 
316
  logger.addHandler(FileHandler(save_path / "output.log"))
317
  return save_path
318
 
 
223
  logger.log_image("Prediction", images, step=step, boxes=[log_bbox(pred_boxes)])
224
 
225
 
226
+ def setup_logger(logger_name, quite=False):
227
  class EmojiFormatter(logging.Formatter):
228
  def format(self, record, emoji=":high_voltage:"):
229
  return f"{emoji} {super().format(record)}"
 
234
  if rich_logger:
235
  rich_logger.handlers.clear()
236
  rich_logger.addHandler(rich_handler)
237
+ if quite:
238
+ rich_logger.setLevel(logging.ERROR)
239
 
240
 
241
  def setup(cfg: Config):
242
+ quite = hasattr(cfg, "quite")
243
+ setup_logger("lightning.fabric", quite=quite)
244
+ setup_logger("lightning.pytorch", quite=quite)
 
 
 
 
245
 
246
  def custom_wandb_log(string="", level=int, newline=True, repeat=True, prefix=True, silent=False):
247
  if silent:
 
254
  save_path = validate_log_directory(cfg, cfg.name)
255
 
256
  progress, loggers = [], []
257
+
258
+ if quite:
259
+ logger.setLevel(logging.ERROR)
260
+ return progress, loggers, save_path
261
+
262
  progress.append(YOLORichProgressBar())
263
  progress.append(YOLORichModelSummary())
264
  progress.append(ImageLogger())
 
267
  if cfg.use_wandb:
268
  loggers.append(WandbLogger(project="YOLO", name=cfg.name, save_dir=save_path, id=None))
269
 
270
+ return progress, loggers, save_path
271
 
272
 
273
  def log_model_structure(model: Union[ModuleList, YOLOLayer, YOLO]):
 
315
  )
316
 
317
  save_path.mkdir(parents=True, exist_ok=True)
318
+ if not getattr(cfg, "quite", False):
319
+ logger.info(f"πŸ“„ Created log folder: [blue b u]{save_path}[/]")
320
  logger.addHandler(FileHandler(save_path / "output.log"))
321
  return save_path
322
 
yolo/utils/model_utils.py CHANGED
@@ -124,7 +124,7 @@ def get_device(device_spec: Union[str, int, List[int]]) -> torch.device:
124
  return device, ddp_flag
125
 
126
 
127
- class PostProccess:
128
  """
129
  TODO: function document
130
  scale back the prediction and do nms for pred_bbox
 
124
  return device, ddp_flag
125
 
126
 
127
+ class PostProcess:
128
  """
129
  TODO: function document
130
  scale back the prediction and do nms for pred_bbox