henry000 commited on
Commit
fa09d11
·
1 Parent(s): 73b88fc

♻️ [Refactor] os.path -> pathlib Path, more safely

Browse files
yolo/config/task/inference.yaml CHANGED
@@ -1,6 +1,6 @@
1
  task: inference
2
 
3
- fast_inference: # onnx, trt or Empty
4
  data:
5
  source: demo/images/inference/image.png
6
  image_size: ${image_size}
 
1
  task: inference
2
 
3
+ fast_inference: # onnx, trt, deploy or Empty
4
  data:
5
  source: demo/images/inference/image.png
6
  image_size: ${image_size}
yolo/model/yolo.py CHANGED
@@ -1,5 +1,5 @@
1
- import os
2
- from typing import Dict, List, Optional, Union
3
 
4
  import torch
5
  from loguru import logger
@@ -116,7 +116,7 @@ class YOLO(nn.Module):
116
  raise ValueError(f"Unsupported layer type: {layer_type}")
117
 
118
 
119
- def create_model(model_cfg: ModelConfig, weight_path: Union[bool, str] = True, class_num: int = 80) -> YOLO:
120
  """Constructs and returns a model from a Dictionary configuration file.
121
 
122
  Args:
@@ -129,11 +129,11 @@ def create_model(model_cfg: ModelConfig, weight_path: Union[bool, str] = True, c
129
  model = YOLO(model_cfg, class_num)
130
  if weight_path:
131
  if weight_path == True:
132
- weight_path = os.path.join("weights", f"{model_cfg.name}.pt")
133
- if not os.path.exists(weight_path):
134
  logger.info(f"🌐 Weight {weight_path} not found, try downloading")
135
  prepare_weight(weight_path=weight_path)
136
- if os.path.exists(weight_path):
137
  model.model.load_state_dict(torch.load(weight_path, map_location=torch.device("cpu")), strict=False)
138
  logger.info("✅ Success load model & weight")
139
  else:
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Union
3
 
4
  import torch
5
  from loguru import logger
 
116
  raise ValueError(f"Unsupported layer type: {layer_type}")
117
 
118
 
119
+ def create_model(model_cfg: ModelConfig, weight_path: Union[bool, Path] = True, class_num: int = 80) -> YOLO:
120
  """Constructs and returns a model from a Dictionary configuration file.
121
 
122
  Args:
 
129
  model = YOLO(model_cfg, class_num)
130
  if weight_path:
131
  if weight_path == True:
132
+ weight_path = Path("weights") / f"{model_cfg.name}.pt"
133
+ if not weight_path.exists():
134
  logger.info(f"🌐 Weight {weight_path} not found, try downloading")
135
  prepare_weight(weight_path=weight_path)
136
+ if weight_path.exists():
137
  model.model.load_state_dict(torch.load(weight_path, map_location=torch.device("cpu")), strict=False)
138
  logger.info("✅ Success load model & weight")
139
  else:
yolo/tools/data_conversion.py CHANGED
@@ -1,5 +1,5 @@
1
  import json
2
- import os
3
  from typing import Dict, List, Optional
4
 
5
  from tqdm import tqdm
@@ -17,14 +17,14 @@ def discretize_categories(categories: List[Dict[str, int]]) -> Dict[int, int]:
17
  def process_annotations(
18
  image_annotations: Dict[int, List[Dict]],
19
  image_info_dict: Dict[int, tuple],
20
- output_dir: str,
21
  id_to_idx: Optional[Dict[int, int]] = None,
22
  ) -> None:
23
  """
24
  Process and save annotations to files, with option to remap category IDs.
25
  """
26
  for image_id, annotations in tqdm(image_annotations.items(), desc="Processing annotations"):
27
- file_path = os.path.join(output_dir, f"{image_id:0>12}.txt")
28
  if not annotations:
29
  continue
30
  with open(file_path, "w") as file:
@@ -73,7 +73,7 @@ def convert_annotations(json_file: str, output_dir: str) -> None:
73
  with open(json_file) as file:
74
  data = json.load(file)
75
 
76
- os.makedirs(output_dir, exist_ok=True)
77
 
78
  image_info_dict = {img["id"]: (img["width"], img["height"]) for img in data.get("images", [])}
79
  id_to_idx = discretize_categories(data.get("categories", [])) if "categories" in data else None
 
1
  import json
2
+ from pathlib import Path
3
  from typing import Dict, List, Optional
4
 
5
  from tqdm import tqdm
 
17
  def process_annotations(
18
  image_annotations: Dict[int, List[Dict]],
19
  image_info_dict: Dict[int, tuple],
20
+ output_dir: Path,
21
  id_to_idx: Optional[Dict[int, int]] = None,
22
  ) -> None:
23
  """
24
  Process and save annotations to files, with option to remap category IDs.
25
  """
26
  for image_id, annotations in tqdm(image_annotations.items(), desc="Processing annotations"):
27
+ file_path = output_dir / "{image_id:0>12}.txt"
28
  if not annotations:
29
  continue
30
  with open(file_path, "w") as file:
 
73
  with open(json_file) as file:
74
  data = json.load(file)
75
 
76
+ Path(output_dir).mkdir(exist_ok=True)
77
 
78
  image_info_dict = {img["id"]: (img["width"], img["height"]) for img in data.get("images", [])}
79
  id_to_idx = discretize_categories(data.get("categories", [])) if "categories" in data else None
yolo/tools/data_loader.py CHANGED
@@ -1,5 +1,4 @@
1
- import os
2
- from os import path
3
  from queue import Empty, Queue
4
  from threading import Event, Thread
5
  from typing import Generator, List, Tuple, Union
@@ -39,22 +38,22 @@ class YoloDataset(Dataset):
39
  transforms = [eval(aug)(prob) for aug, prob in augment_cfg.items()]
40
  self.transform = AugmentationComposer(transforms, self.image_size)
41
  self.transform.get_more_data = self.get_more_data
42
- self.data = self.load_data(dataset_cfg.path, phase_name)
43
 
44
- def load_data(self, dataset_path, phase_name):
45
  """
46
  Loads data from a cache or generates a new cache for a specific dataset phase.
47
 
48
  Parameters:
49
- dataset_path (str): The root path to the dataset directory.
50
  phase_name (str): The specific phase of the dataset (e.g., 'train', 'test') to load or generate data for.
51
 
52
  Returns:
53
  dict: The loaded data from the cache for the specified phase.
54
  """
55
- cache_path = path.join(dataset_path, f"{phase_name}.cache")
56
 
57
- if not path.isfile(cache_path):
58
  logger.info("🏭 Generating {} cache", phase_name)
59
  data = self.filter_data(dataset_path, phase_name)
60
  torch.save(data, cache_path)
@@ -63,20 +62,20 @@ class YoloDataset(Dataset):
63
  logger.info("📦 Loaded {} cache", phase_name)
64
  return data
65
 
66
- def filter_data(self, dataset_path: str, phase_name: str) -> list:
67
  """
68
  Filters and collects dataset information by pairing images with their corresponding labels.
69
 
70
  Parameters:
71
- images_path (str): Path to the directory containing image files.
72
  labels_path (str): Path to the directory containing label files.
73
 
74
  Returns:
75
  list: A list of tuples, each containing the path to an image file and its associated segmentation as a tensor.
76
  """
77
- images_path = path.join(dataset_path, "images", phase_name)
78
  labels_path, data_type = locate_label_paths(dataset_path, phase_name)
79
- images_list = sorted(os.listdir(images_path))
80
  if data_type == "json":
81
  annotations_index, image_info_dict = create_image_metadata(labels_path)
82
 
@@ -85,7 +84,7 @@ class YoloDataset(Dataset):
85
  for image_name in track(images_list, description="Filtering data"):
86
  if not image_name.lower().endswith((".jpg", ".jpeg", ".png")):
87
  continue
88
- image_id, _ = path.splitext(image_name)
89
 
90
  if data_type == "json":
91
  image_info = image_info_dict.get(image_id, None)
@@ -97,8 +96,8 @@ class YoloDataset(Dataset):
97
  continue
98
 
99
  elif data_type == "txt":
100
- label_path = path.join(labels_path, f"{image_id}.txt")
101
- if not path.isfile(label_path):
102
  continue
103
  with open(label_path, "r") as file:
104
  image_seg_annotations = [list(map(float, line.strip().split())) for line in file]
@@ -107,13 +106,13 @@ class YoloDataset(Dataset):
107
 
108
  labels = self.load_valid_labels(image_id, image_seg_annotations)
109
 
110
- img_path = path.join(images_path, image_name)
111
  data.append((img_path, labels))
112
  valid_inputs += 1
113
  logger.info("Recorded {}/{} valid inputs", valid_inputs, len(images_list))
114
  return data
115
 
116
- def load_valid_labels(self, label_path, seg_data_one_img) -> Union[torch.Tensor, None]:
117
  """
118
  Loads and validates bounding box data is [0, 1] from a label file.
119
 
@@ -215,9 +214,9 @@ def create_dataloader(data_cfg: DataConfig, dataset_cfg: DatasetConfig, task: st
215
 
216
  class StreamDataLoader:
217
  def __init__(self, data_cfg: DataConfig):
218
- self.source = data_cfg.source
219
  self.running = True
220
- self.is_stream = isinstance(self.source, int) or self.source.lower().startswith("rtmp://")
221
 
222
  self.transform = AugmentationComposer([], data_cfg.image_size)
223
  self.stop_event = Event()
@@ -230,20 +229,20 @@ class StreamDataLoader:
230
  self.thread.start()
231
 
232
  def load_source(self):
233
- if os.path.isdir(self.source): # image folder
234
  self.load_image_folder(self.source)
235
- elif any(self.source.lower().endswith(ext) for ext in [".mp4", ".avi", ".mkv"]): # Video file
236
  self.load_video_file(self.source)
237
  else: # Single image
238
  self.process_image(self.source)
239
 
240
  def load_image_folder(self, folder):
241
- for root, _, files in os.walk(folder):
242
- for file in files:
243
- if self.stop_event.is_set():
244
- break
245
- if any(file.lower().endswith(ext) for ext in [".jpg", ".jpeg", ".png", ".bmp"]):
246
- self.process_image(os.path.join(root, file))
247
 
248
  def process_image(self, image_path):
249
  image = Image.open(image_path).convert("RGB")
 
1
+ from pathlib import Path
 
2
  from queue import Empty, Queue
3
  from threading import Event, Thread
4
  from typing import Generator, List, Tuple, Union
 
38
  transforms = [eval(aug)(prob) for aug, prob in augment_cfg.items()]
39
  self.transform = AugmentationComposer(transforms, self.image_size)
40
  self.transform.get_more_data = self.get_more_data
41
+ self.data = self.load_data(Path(dataset_cfg.path), phase_name)
42
 
43
+ def load_data(self, dataset_path: Path, phase_name: str):
44
  """
45
  Loads data from a cache or generates a new cache for a specific dataset phase.
46
 
47
  Parameters:
48
+ dataset_path (Path): The root path to the dataset directory.
49
  phase_name (str): The specific phase of the dataset (e.g., 'train', 'test') to load or generate data for.
50
 
51
  Returns:
52
  dict: The loaded data from the cache for the specified phase.
53
  """
54
+ cache_path = dataset_path / f"{phase_name}.cache"
55
 
56
+ if not cache_path.exists():
57
  logger.info("🏭 Generating {} cache", phase_name)
58
  data = self.filter_data(dataset_path, phase_name)
59
  torch.save(data, cache_path)
 
62
  logger.info("📦 Loaded {} cache", phase_name)
63
  return data
64
 
65
+ def filter_data(self, dataset_path: Path, phase_name: str) -> list:
66
  """
67
  Filters and collects dataset information by pairing images with their corresponding labels.
68
 
69
  Parameters:
70
+ images_path (Path): Path to the directory containing image files.
71
  labels_path (str): Path to the directory containing label files.
72
 
73
  Returns:
74
  list: A list of tuples, each containing the path to an image file and its associated segmentation as a tensor.
75
  """
76
+ images_path = dataset_path / "images" / phase_name
77
  labels_path, data_type = locate_label_paths(dataset_path, phase_name)
78
+ images_list = sorted([p.name for p in Path(images_path).iterdir() if p.is_file()])
79
  if data_type == "json":
80
  annotations_index, image_info_dict = create_image_metadata(labels_path)
81
 
 
84
  for image_name in track(images_list, description="Filtering data"):
85
  if not image_name.lower().endswith((".jpg", ".jpeg", ".png")):
86
  continue
87
+ image_id = Path(image_name).stem
88
 
89
  if data_type == "json":
90
  image_info = image_info_dict.get(image_id, None)
 
96
  continue
97
 
98
  elif data_type == "txt":
99
+ label_path = labels_path / f"{image_id}.txt"
100
+ if not label_path.is_file():
101
  continue
102
  with open(label_path, "r") as file:
103
  image_seg_annotations = [list(map(float, line.strip().split())) for line in file]
 
106
 
107
  labels = self.load_valid_labels(image_id, image_seg_annotations)
108
 
109
+ img_path = images_path / image_name
110
  data.append((img_path, labels))
111
  valid_inputs += 1
112
  logger.info("Recorded {}/{} valid inputs", valid_inputs, len(images_list))
113
  return data
114
 
115
+ def load_valid_labels(self, label_path: str, seg_data_one_img: list) -> Union[torch.Tensor, None]:
116
  """
117
  Loads and validates bounding box data is [0, 1] from a label file.
118
 
 
214
 
215
  class StreamDataLoader:
216
  def __init__(self, data_cfg: DataConfig):
217
+ self.source = Path(data_cfg.source)
218
  self.running = True
219
+ self.is_stream = isinstance(self.source, int) or str(self.source).lower().startswith("rtmp://")
220
 
221
  self.transform = AugmentationComposer([], data_cfg.image_size)
222
  self.stop_event = Event()
 
229
  self.thread.start()
230
 
231
  def load_source(self):
232
+ if self.source.is_dir(): # image folder
233
  self.load_image_folder(self.source)
234
+ elif any(self.source.suffix.lower().endswith(ext) for ext in [".mp4", ".avi", ".mkv"]): # Video file
235
  self.load_video_file(self.source)
236
  else: # Single image
237
  self.process_image(self.source)
238
 
239
  def load_image_folder(self, folder):
240
+ folder_path = Path(folder)
241
+ for file_path in folder_path.rglob("*"):
242
+ if self.stop_event.is_set():
243
+ break
244
+ if file_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".bmp"]:
245
+ self.process_image(file_path)
246
 
247
  def process_image(self, image_path):
248
  image = Image.open(image_path).convert("RGB")
yolo/tools/dataset_preparation.py CHANGED
@@ -1,5 +1,5 @@
1
- import os
2
  import zipfile
 
3
  from typing import Optional
4
 
5
  import requests
@@ -9,7 +9,7 @@ from rich.progress import BarColumn, Progress, TextColumn, TimeRemainingColumn
9
  from yolo.config.config import DatasetConfig
10
 
11
 
12
- def download_file(url, destination):
13
  """
14
  Downloads a file from the specified URL to the destination path with progress logging.
15
  """
@@ -25,7 +25,7 @@ def download_file(url, destination):
25
  "•",
26
  TimeRemainingColumn(),
27
  ) as progress:
28
- task = progress.add_task(f"📥 Downloading {os.path.basename(destination)}...", total=total_size)
29
  with open(destination, "wb") as file:
30
  for data in response.iter_content(chunk_size=1024 * 1024): # 1 MB chunks
31
  file.write(data)
@@ -33,14 +33,14 @@ def download_file(url, destination):
33
  logger.info("✅ Download completed.")
34
 
35
 
36
- def unzip_file(source, destination):
37
  """
38
  Extracts a ZIP file to the specified directory and removes the ZIP file after extraction.
39
  """
40
- logger.info(f"Unzipping {os.path.basename(source)}...")
41
  with zipfile.ZipFile(source, "r") as zip_ref:
42
  zip_ref.extractall(destination)
43
- os.remove(source)
44
  logger.info(f"Removed {source}.")
45
 
46
 
@@ -48,7 +48,7 @@ def check_files(directory, expected_count=None):
48
  """
49
  Returns True if the number of files in the directory matches expected_count, False otherwise.
50
  """
51
- files = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
52
  return len(files) == expected_count if expected_count is not None else bool(files)
53
 
54
 
@@ -57,7 +57,7 @@ def prepare_dataset(dataset_cfg: DatasetConfig, task: str):
57
  Prepares dataset by downloading and unzipping if necessary.
58
  """
59
  # TODO: do EDA of dataset
60
- data_dir = dataset_cfg.path
61
  for data_type, settings in dataset_cfg.auto_download.items():
62
  base_url = settings["base_url"]
63
  for dataset_type, dataset_args in settings.items():
@@ -65,16 +65,16 @@ def prepare_dataset(dataset_cfg: DatasetConfig, task: str):
65
  continue
66
  file_name = f"{dataset_args.get('file_name', dataset_type)}.zip"
67
  url = f"{base_url}{file_name}"
68
- local_zip_path = os.path.join(data_dir, file_name)
69
- extract_to = os.path.join(data_dir, data_type) if data_type != "annotations" else data_dir
70
- final_place = os.path.join(extract_to, dataset_type)
71
 
72
- os.makedirs(final_place, exist_ok=True)
73
  if check_files(final_place, dataset_args.get("file_num")):
74
  logger.info(f"✅ Dataset {dataset_type: <12} already verified.")
75
  continue
76
 
77
- if not os.path.exists(local_zip_path):
78
  download_file(url, local_zip_path)
79
  unzip_file(local_zip_path, extract_to)
80
 
@@ -82,16 +82,16 @@ def prepare_dataset(dataset_cfg: DatasetConfig, task: str):
82
  logger.error(f"Error verifying the {dataset_type} dataset after extraction.")
83
 
84
 
85
- def prepare_weight(download_link: Optional[str] = None, weight_path: str = "v9-c.pt"):
86
- weight_name = os.path.basename(weight_path)
87
  if download_link is None:
88
  download_link = "https://github.com/WongKinYiu/yolov9mit/releases/download/v1.0-alpha/"
89
  weight_link = f"{download_link}{weight_name}"
90
 
91
- if not os.path.isdir(os.path.dirname(weight_path)):
92
- os.makedirs(os.path.dirname(weight_path))
93
 
94
- if os.path.exists(weight_path):
95
  logger.info(f"Weight file '{weight_path}' already exists.")
96
  try:
97
  download_file(weight_link, weight_path)
 
 
1
  import zipfile
2
+ from pathlib import Path
3
  from typing import Optional
4
 
5
  import requests
 
9
  from yolo.config.config import DatasetConfig
10
 
11
 
12
+ def download_file(url, destination: Path):
13
  """
14
  Downloads a file from the specified URL to the destination path with progress logging.
15
  """
 
25
  "•",
26
  TimeRemainingColumn(),
27
  ) as progress:
28
+ task = progress.add_task(f"📥 Downloading {destination.name }...", total=total_size)
29
  with open(destination, "wb") as file:
30
  for data in response.iter_content(chunk_size=1024 * 1024): # 1 MB chunks
31
  file.write(data)
 
33
  logger.info("✅ Download completed.")
34
 
35
 
36
+ def unzip_file(source: Path, destination: Path):
37
  """
38
  Extracts a ZIP file to the specified directory and removes the ZIP file after extraction.
39
  """
40
+ logger.info(f"Unzipping {source.name}...")
41
  with zipfile.ZipFile(source, "r") as zip_ref:
42
  zip_ref.extractall(destination)
43
+ source.unlink()
44
  logger.info(f"Removed {source}.")
45
 
46
 
 
48
  """
49
  Returns True if the number of files in the directory matches expected_count, False otherwise.
50
  """
51
+ files = [f.name for f in Path(directory).iterdir() if f.is_file()]
52
  return len(files) == expected_count if expected_count is not None else bool(files)
53
 
54
 
 
57
  Prepares dataset by downloading and unzipping if necessary.
58
  """
59
  # TODO: do EDA of dataset
60
+ data_dir = Path(dataset_cfg.path)
61
  for data_type, settings in dataset_cfg.auto_download.items():
62
  base_url = settings["base_url"]
63
  for dataset_type, dataset_args in settings.items():
 
65
  continue
66
  file_name = f"{dataset_args.get('file_name', dataset_type)}.zip"
67
  url = f"{base_url}{file_name}"
68
+ local_zip_path = data_dir / file_name
69
+ extract_to = data_dir / data_type if data_type != "annotations" else data_dir
70
+ final_place = extract_to / dataset_type
71
 
72
+ final_place.mkdir(exist_ok=True)
73
  if check_files(final_place, dataset_args.get("file_num")):
74
  logger.info(f"✅ Dataset {dataset_type: <12} already verified.")
75
  continue
76
 
77
+ if not local_zip_path.exists():
78
  download_file(url, local_zip_path)
79
  unzip_file(local_zip_path, extract_to)
80
 
 
82
  logger.error(f"Error verifying the {dataset_type} dataset after extraction.")
83
 
84
 
85
+ def prepare_weight(download_link: Optional[str] = None, weight_path: Path = "v9-c.pt"):
86
+ weight_name = weight_path.name
87
  if download_link is None:
88
  download_link = "https://github.com/WongKinYiu/yolov9mit/releases/download/v1.0-alpha/"
89
  weight_link = f"{download_link}{weight_name}"
90
 
91
+ if not weight_path.parent.is_dir():
92
+ weight_path.parent.mkdir(parents=True, exist_ok=True)
93
 
94
+ if weight_path.exists():
95
  logger.info(f"Weight file '{weight_path}' already exists.")
96
  try:
97
  download_file(weight_link, weight_path)
yolo/tools/drawer.py CHANGED
@@ -1,4 +1,3 @@
1
- import os
2
  import random
3
  from typing import List, Optional, Union
4
 
 
 
1
  import random
2
  from typing import List, Optional, Union
3
 
yolo/tools/solver.py CHANGED
@@ -1,6 +1,7 @@
 
 
1
  import json
2
  import os
3
- import sys
4
  import time
5
  from collections import defaultdict
6
  from typing import Dict, Optional
@@ -46,7 +47,8 @@ class ModelTrainer:
46
  self.num_epochs = cfg.task.epoch
47
  self.mAPs_dict = defaultdict(list)
48
 
49
- os.makedirs(os.path.join(self.progress.save_path, "weights"), exist_ok=True)
 
50
 
51
  if not progress.quite_mode:
52
  log_model_structure(model.model)
@@ -102,7 +104,7 @@ class ModelTrainer:
102
 
103
  def save_checkpoint(self, epoch_idx: int, file_name: Optional[str] = None):
104
  file_name = file_name or f"E{epoch_idx:03d}.pt"
105
- file_path = os.path.join(self.progress.save_path, "weights", file_name)
106
 
107
  checkpoint = {
108
  "epoch": epoch_idx,
@@ -152,7 +154,7 @@ class ModelTester:
152
  self.progress = progress
153
 
154
  self.post_proccess = PostProccess(vec2box, cfg.task.nms)
155
- self.save_path = os.path.join(progress.save_path, "images")
156
  os.makedirs(self.save_path, exist_ok=True)
157
  self.save_predict = getattr(cfg.task, "save_predict", None)
158
  self.idx2label = cfg.class_list
@@ -187,7 +189,7 @@ class ModelTester:
187
  if not self.save_predict:
188
  continue
189
  if self.save_predict != False:
190
- save_image_path = os.path.join(self.save_path, f"frame{idx:03d}.png")
191
  img.save(save_image_path)
192
  logger.info(f"💾 Saved visualize image at {save_image_path}")
193
 
@@ -215,12 +217,11 @@ class ModelValidator:
215
  self.progress = progress
216
 
217
  self.post_proccess = PostProccess(vec2box, validation_cfg.nms)
218
- self.json_path = os.path.join(self.progress.save_path, f"predict.json")
219
 
220
- sys.stdout = open(os.devnull, "w")
221
- # TODO: load with config file
222
- self.coco_gt = COCO("data/coco/annotations/instances_val2017.json")
223
- sys.stdout = sys.__stdout__
224
 
225
  def solve(self, dataloader, epoch_idx=-1):
226
  # logger.info("🧪 Start Validation!")
 
1
+ import contextlib
2
+ import io
3
  import json
4
  import os
 
5
  import time
6
  from collections import defaultdict
7
  from typing import Dict, Optional
 
47
  self.num_epochs = cfg.task.epoch
48
  self.mAPs_dict = defaultdict(list)
49
 
50
+ self.weights_dir = self.progress.save_path / "weights"
51
+ self.weights_dir.mkdir(exist_ok=True)
52
 
53
  if not progress.quite_mode:
54
  log_model_structure(model.model)
 
104
 
105
  def save_checkpoint(self, epoch_idx: int, file_name: Optional[str] = None):
106
  file_name = file_name or f"E{epoch_idx:03d}.pt"
107
+ file_path = self.weights_dir / file_name
108
 
109
  checkpoint = {
110
  "epoch": epoch_idx,
 
154
  self.progress = progress
155
 
156
  self.post_proccess = PostProccess(vec2box, cfg.task.nms)
157
+ self.save_path = progress.save_path / "images"
158
  os.makedirs(self.save_path, exist_ok=True)
159
  self.save_predict = getattr(cfg.task, "save_predict", None)
160
  self.idx2label = cfg.class_list
 
189
  if not self.save_predict:
190
  continue
191
  if self.save_predict != False:
192
+ save_image_path = self.save_path / f"frame{idx:03d}.png"
193
  img.save(save_image_path)
194
  logger.info(f"💾 Saved visualize image at {save_image_path}")
195
 
 
217
  self.progress = progress
218
 
219
  self.post_proccess = PostProccess(vec2box, validation_cfg.nms)
220
+ self.json_path = self.progress.save_path / "predict.json"
221
 
222
+ with contextlib.redirect_stdout(io.StringIO()):
223
+ # TODO: load with config file
224
+ self.coco_gt = COCO("data/coco/annotations/instances_val2017.json")
 
225
 
226
  def solve(self, dataloader, epoch_idx=-1):
227
  # logger.info("🧪 Start Validation!")
yolo/utils/bounding_box_utils.py CHANGED
@@ -1,5 +1,5 @@
1
  import math
2
- from typing import List, Tuple
3
 
4
  import torch
5
  import torch.nn.functional as F
@@ -333,7 +333,7 @@ def bbox_nms(cls_dist: Tensor, bbox: Tensor, nms_cfg: NMSConfig):
333
  return predicts_nms
334
 
335
 
336
- def calculate_map(predictions, ground_truths, iou_thresholds=arange(0.5, 1, 0.05)):
337
  # TODO: Refactor this block, Flexible for calculate different mAP condition?
338
  device = predictions.device
339
  n_preds = predictions.size(0)
@@ -375,5 +375,8 @@ def calculate_map(predictions, ground_truths, iou_thresholds=arange(0.5, 1, 0.05
375
 
376
  aps.append(ap)
377
 
378
- mean_ap = torch.mean(torch.stack(aps))
379
- return mean_ap, aps[0]
 
 
 
 
1
  import math
2
+ from typing import Dict, List, Tuple
3
 
4
  import torch
5
  import torch.nn.functional as F
 
333
  return predicts_nms
334
 
335
 
336
+ def calculate_map(predictions, ground_truths, iou_thresholds=arange(0.5, 1, 0.05)) -> Dict[str, Tensor]:
337
  # TODO: Refactor this block, Flexible for calculate different mAP condition?
338
  device = predictions.device
339
  n_preds = predictions.size(0)
 
375
 
376
  aps.append(ap)
377
 
378
+ mAP = {
379
+ "mAP.5": torch.mean(torch.stack(aps)),
380
+ "mAP.5:.95": aps[0],
381
+ }
382
+ return mAP
yolo/utils/dataset_utils.py CHANGED
@@ -1,7 +1,7 @@
1
  import json
2
  import os
3
  from itertools import chain
4
- from os import path
5
  from typing import Any, Dict, List, Optional, Tuple
6
 
7
  import numpy as np
@@ -10,25 +10,25 @@ from loguru import logger
10
  from yolo.tools.data_conversion import discretize_categories
11
 
12
 
13
- def locate_label_paths(dataset_path: str, phase_name: str):
14
  """
15
  Find the path to label files for a specified dataset and phase(e.g. training).
16
 
17
  Args:
18
- dataset_path (str): The path to the root directory of the dataset.
19
- phase_name (str): The name of the phase for which labels are being searched (e.g., "train", "val", "test").
20
 
21
  Returns:
22
- Tuple[str, str]: A tuple containing the path to the labels file and the file format ("json" or "txt").
23
  """
24
- json_labels_path = path.join(dataset_path, "annotations", f"instances_{phase_name}.json")
25
 
26
- txt_labels_path = path.join(dataset_path, "labels", phase_name)
27
 
28
- if path.isfile(json_labels_path):
29
  return json_labels_path, "json"
30
 
31
- elif path.isdir(txt_labels_path):
32
  txt_files = [f for f in os.listdir(txt_labels_path) if f.endswith(".txt")]
33
  if txt_files:
34
  return txt_labels_path, "txt"
@@ -52,7 +52,7 @@ def create_image_metadata(labels_path: str) -> Tuple[Dict[str, List], Dict[str,
52
  labels_data = json.load(file)
53
  id_to_idx = discretize_categories(labels_data.get("categories", [])) if "categories" in labels_data else None
54
  annotations_index = organize_annotations_by_image(labels_data, id_to_idx) # check lookup is a good name?
55
- image_info_dict = {path.splitext(img["file_name"])[0]: img for img in labels_data["images"]}
56
  return annotations_index, image_info_dict
57
 
58
 
 
1
  import json
2
  import os
3
  from itertools import chain
4
+ from pathlib import Path
5
  from typing import Any, Dict, List, Optional, Tuple
6
 
7
  import numpy as np
 
10
  from yolo.tools.data_conversion import discretize_categories
11
 
12
 
13
+ def locate_label_paths(dataset_path: Path, phase_name: Path) -> Tuple[Path, Path]:
14
  """
15
  Find the path to label files for a specified dataset and phase(e.g. training).
16
 
17
  Args:
18
+ dataset_path (Path): The path to the root directory of the dataset.
19
+ phase_name (Path): The name of the phase for which labels are being searched (e.g., "train", "val", "test").
20
 
21
  Returns:
22
+ Tuple[Path, Path]: A tuple containing the path to the labels file and the file format ("json" or "txt").
23
  """
24
+ json_labels_path = dataset_path / "annotations" / f"instances_{phase_name}.json"
25
 
26
+ txt_labels_path = dataset_path / "labels" / phase_name
27
 
28
+ if json_labels_path.is_file():
29
  return json_labels_path, "json"
30
 
31
+ elif txt_labels_path.is_dir():
32
  txt_files = [f for f in os.listdir(txt_labels_path) if f.endswith(".txt")]
33
  if txt_files:
34
  return txt_labels_path, "txt"
 
52
  labels_data = json.load(file)
53
  id_to_idx = discretize_categories(labels_data.get("categories", [])) if "categories" in labels_data else None
54
  annotations_index = organize_annotations_by_image(labels_data, id_to_idx) # check lookup is a good name?
55
+ image_info_dict = {Path(img["file_name"]).stem: img for img in labels_data["images"]}
56
  return annotations_index, image_info_dict
57
 
58
 
yolo/utils/deploy_utils.py CHANGED
@@ -1,4 +1,4 @@
1
- import os
2
 
3
  import torch
4
  from loguru import logger
@@ -14,8 +14,8 @@ class FastModelLoader:
14
  self.compiler = cfg.task.fast_inference
15
  self._validate_compiler()
16
  if cfg.weight == True:
17
- cfg.weight = os.path.join("weights", f"{cfg.model.name}.pt")
18
- self.model_path = f"{os.path.splitext(cfg.weight)[0]}.{self.compiler}"
19
 
20
  def _validate_compiler(self):
21
  if self.compiler not in ["onnx", "trt", "deploy"]:
 
1
+ from pathlib import Path
2
 
3
  import torch
4
  from loguru import logger
 
14
  self.compiler = cfg.task.fast_inference
15
  self._validate_compiler()
16
  if cfg.weight == True:
17
+ cfg.weight = Path("weights") / f"{cfg.model.name}.pt"
18
+ self.model_path = f"{Path(cfg.weight).stem}.{self.compiler}"
19
 
20
  def _validate_compiler(self):
21
  if self.compiler not in ["onnx", "trt", "deploy"]:
yolo/utils/logging_utils.py CHANGED
@@ -14,8 +14,10 @@ Example:
14
  import os
15
  import sys
16
  from collections import deque
 
17
  from typing import Any, Dict, List
18
 
 
19
  import wandb
20
  import wandb.errors.term
21
  from loguru import logger
@@ -64,6 +66,7 @@ class ProgressLogger(Progress):
64
  self.ap_table = Table()
65
  # TODO: load maxlen by config files
66
  self.ap_past_list = deque(maxlen=5)
 
67
  super().__init__(*args, *progress_bar, **kwargs)
68
 
69
  self.use_wandb = cfg.use_wandb
@@ -121,17 +124,20 @@ class ProgressLogger(Progress):
121
  self.batch_task = self.add_task("[green]Run pycocotools", total=1)
122
 
123
  def finish_pycocotools(self, result, epoch_idx=-1):
124
- ap_table, ap_main = make_ap_table(result, self.ap_past_list, epoch_idx)
 
125
  self.ap_past_list.append((epoch_idx, ap_main))
126
  self.ap_table = ap_table
127
 
128
  if self.use_wandb:
129
- self.wandb.log({"PyCOCO/AP @ .5:.95": ap_main[1], "PyCOCO/AP @ .5": ap_main[3]})
130
  self.update(self.batch_task, advance=1)
131
  self.refresh()
132
  self.remove_task(self.batch_task)
133
 
134
  def finish_train(self):
 
 
135
  self.wandb.finish()
136
 
137
 
@@ -167,23 +173,23 @@ def log_model_structure(model: List[YOLOLayer]):
167
  console.print(table)
168
 
169
 
170
- def validate_log_directory(cfg: Config, exp_name: str):
171
- base_path = os.path.join(cfg.out_path, cfg.task.task)
172
- save_path = os.path.join(base_path, exp_name)
173
 
174
  if not cfg.exist_ok:
175
  index = 1
176
  old_exp_name = exp_name
177
- while os.path.isdir(save_path):
178
  exp_name = f"{old_exp_name}{index}"
179
- save_path = os.path.join(base_path, exp_name)
180
  index += 1
181
  if index > 1:
182
  logger.opt(colors=True).warning(
183
  f"🔀 Experiment directory exists! Changed <red>{old_exp_name}</> to <green>{exp_name}</>"
184
  )
185
 
186
- os.makedirs(save_path, exist_ok=True)
187
  logger.opt(colors=True).info(f"📄 Created log folder: <u><fg #808080>{save_path}</></>")
188
- logger.add(os.path.join(save_path, "output.log"), mode="w", backtrace=True, diagnose=True)
189
  return save_path
 
14
  import os
15
  import sys
16
  from collections import deque
17
+ from pathlib import Path
18
  from typing import Any, Dict, List
19
 
20
+ import numpy as np
21
  import wandb
22
  import wandb.errors.term
23
  from loguru import logger
 
66
  self.ap_table = Table()
67
  # TODO: load maxlen by config files
68
  self.ap_past_list = deque(maxlen=5)
69
+ self.last_result = 0
70
  super().__init__(*args, *progress_bar, **kwargs)
71
 
72
  self.use_wandb = cfg.use_wandb
 
124
  self.batch_task = self.add_task("[green]Run pycocotools", total=1)
125
 
126
  def finish_pycocotools(self, result, epoch_idx=-1):
127
+ ap_table, ap_main = make_ap_table(result, self.ap_past_list, self.last_result, epoch_idx)
128
+ self.last_result = np.maximum(result, self.last_result)
129
  self.ap_past_list.append((epoch_idx, ap_main))
130
  self.ap_table = ap_table
131
 
132
  if self.use_wandb:
133
+ self.wandb.log({"PyCOCO/AP @ .5:.95": ap_main[2], "PyCOCO/AP @ .5": ap_main[5]})
134
  self.update(self.batch_task, advance=1)
135
  self.refresh()
136
  self.remove_task(self.batch_task)
137
 
138
  def finish_train(self):
139
+ self.remove_task(self.task_epoch)
140
+ self.stop()
141
  self.wandb.finish()
142
 
143
 
 
173
  console.print(table)
174
 
175
 
176
+ def validate_log_directory(cfg: Config, exp_name: str) -> Path:
177
+ base_path = Path(cfg.out_path, cfg.task.task)
178
+ save_path = base_path / exp_name
179
 
180
  if not cfg.exist_ok:
181
  index = 1
182
  old_exp_name = exp_name
183
+ while save_path.is_dir():
184
  exp_name = f"{old_exp_name}{index}"
185
+ save_path = base_path / exp_name
186
  index += 1
187
  if index > 1:
188
  logger.opt(colors=True).warning(
189
  f"🔀 Experiment directory exists! Changed <red>{old_exp_name}</> to <green>{exp_name}</>"
190
  )
191
 
192
+ save_path.mkdir(exist_ok=True)
193
  logger.opt(colors=True).info(f"📄 Created log folder: <u><fg #808080>{save_path}</></>")
194
+ logger.add(save_path / "output.log", mode="w", backtrace=True, diagnose=True)
195
  return save_path
yolo/utils/solver_utils.py CHANGED
@@ -1,6 +1,7 @@
1
  import contextlib
2
  import io
3
 
 
4
  from pycocotools.coco import COCO
5
  from pycocotools.cocoeval import COCOeval
6
  from rich.table import Table
@@ -16,7 +17,7 @@ def calculate_ap(coco_gt: COCO, pd_path):
16
  return coco_eval.stats
17
 
18
 
19
- def make_ap_table(score, past_result=[], epoch=-1):
20
  ap_table = Table()
21
  ap_table.add_column("Epoch", justify="center", style="white", width=5)
22
  ap_table.add_column("Avg. Precision", justify="left", style="cyan")
@@ -24,22 +25,24 @@ def make_ap_table(score, past_result=[], epoch=-1):
24
  ap_table.add_column("Avg. Recall", justify="left", style="cyan")
25
  ap_table.add_column("", justify="right", style="green", width=5)
26
 
27
- for eps, (ap_name1, ap_value1, ap_name2, ap_value2) in past_result:
28
- ap_table.add_row(f"{eps: 3d}", ap_name1, f"{ap_value1:.2f}", ap_name2, f"{ap_value2:.2f}")
29
  if past_result:
30
  ap_table.add_row()
31
 
32
- this_ap = ("AP @ .5:.95", score[0], "AP @ .5", score[1])
 
 
33
  metrics = [
34
- ("AP @ .5:.95", score[0], "AR maxDets 1", score[6]),
35
- ("AP @ .5", score[1], "AR maxDets 10", score[7]),
36
- ("AP @ .75", score[2], "AR maxDets 100", score[8]),
37
- ("AP (small)", score[3], "AR (small)", score[9]),
38
- ("AP (medium)", score[4], "AR (medium)", score[10]),
39
- ("AP (large)", score[5], "AR (large)", score[11]),
40
  ]
41
 
42
- for ap_name, ap_value, ar_name, ar_value in metrics:
43
- ap_table.add_row(f"{epoch: 3d}", ap_name, f"{ap_value:.2f}", ar_name, f"{ar_value:.2f}")
44
 
45
  return ap_table, this_ap
 
1
  import contextlib
2
  import io
3
 
4
+ import numpy as np
5
  from pycocotools.coco import COCO
6
  from pycocotools.cocoeval import COCOeval
7
  from rich.table import Table
 
17
  return coco_eval.stats
18
 
19
 
20
+ def make_ap_table(score, past_result=[], last_score=None, epoch=-1):
21
  ap_table = Table()
22
  ap_table.add_column("Epoch", justify="center", style="white", width=5)
23
  ap_table.add_column("Avg. Precision", justify="left", style="cyan")
 
25
  ap_table.add_column("Avg. Recall", justify="left", style="cyan")
26
  ap_table.add_column("", justify="right", style="green", width=5)
27
 
28
+ for eps, (ap_name1, ap_color1, ap_value1, ap_name2, ap_color2, ap_value2) in past_result:
29
+ ap_table.add_row(f"{eps: 3d}", ap_name1, f"{ap_color1}{ap_value1:.2f}", ap_name2, f"{ap_color2}{ap_value2:.2f}")
30
  if past_result:
31
  ap_table.add_row()
32
 
33
+ color = np.where(last_score <= score, "[green]", "[red]")
34
+
35
+ this_ap = ("AP @ .5:.95", color[0], score[0], "AP @ .5", color[1], score[1])
36
  metrics = [
37
+ ("AP @ .5:.95", color[0], score[0], "AR maxDets 1", color[6], score[6]),
38
+ ("AP @ .5", color[1], score[1], "AR maxDets 10", color[7], score[7]),
39
+ ("AP @ .75", color[2], score[2], "AR maxDets 100", color[8], score[8]),
40
+ ("AP (small)", color[3], score[3], "AR (small)", color[9], score[9]),
41
+ ("AP (medium)", color[4], score[4], "AR (medium)", color[10], score[10]),
42
+ ("AP (large)", color[5], score[5], "AR (large)", color[11], score[11]),
43
  ]
44
 
45
+ for ap_name, ap_color, ap_value, ar_name, ar_color, ar_value in metrics:
46
+ ap_table.add_row(f"{epoch: 3d}", ap_name, f"{ap_color}{ap_value:.2f}", ar_name, f"{ar_color}{ar_value:.2f}")
47
 
48
  return ap_table, this_ap