henry000 commited on
Commit
802cb12
Β·
1 Parent(s): 3d0f0de

πŸ› [Fix] display emoji bugs, change it to shortcode

Browse files
yolo/model/yolo.py CHANGED
@@ -32,10 +32,10 @@ class YOLO(nn.Module):
32
  def build_model(self, model_arch: Dict[str, List[Dict[str, Dict[str, Dict]]]]):
33
  self.layer_index = {}
34
  output_dim, layer_idx = [3], 1
35
- logger.info(f"🚜 Building YOLO")
36
  for arch_name in model_arch:
37
  if model_arch[arch_name]:
38
- logger.info(f" πŸ—οΈ Building {arch_name}")
39
  for layer_idx, layer_spec in enumerate(model_arch[arch_name], start=layer_idx):
40
  layer_type, layer_info = next(iter(layer_spec.items()))
41
  layer_args = layer_info.get("args", {})
@@ -123,7 +123,7 @@ class YOLO(nn.Module):
123
  weights: A OrderedDict containing the new weights.
124
  """
125
  if isinstance(weights, Path):
126
- weights = torch.load(weights, map_location=torch.device("cpu"))
127
  if "model_state_dict" in weights:
128
  weights = weights["model_state_dict"]
129
 
@@ -144,7 +144,7 @@ class YOLO(nn.Module):
144
 
145
  for error_name, error_set in error_dict.items():
146
  for weight_name in error_set:
147
- logger.warning(f"⚠️ Weight {error_name} for key: {'.'.join(weight_name)}")
148
 
149
  self.model.load_state_dict(model_state_dict)
150
 
@@ -171,7 +171,7 @@ def create_model(model_cfg: ModelConfig, weight_path: Union[bool, Path] = True,
171
  prepare_weight(weight_path=weight_path)
172
  if weight_path.exists():
173
  model.save_load_weights(weight_path)
174
- logger.info("βœ… Success load model & weight")
175
  else:
176
- logger.info("βœ… Success load model")
177
  return model
 
32
  def build_model(self, model_arch: Dict[str, List[Dict[str, Dict[str, Dict]]]]):
33
  self.layer_index = {}
34
  output_dim, layer_idx = [3], 1
35
+ logger.info(f":tractor: Building YOLO")
36
  for arch_name in model_arch:
37
  if model_arch[arch_name]:
38
+ logger.info(f" :building_construction: Building {arch_name}")
39
  for layer_idx, layer_spec in enumerate(model_arch[arch_name], start=layer_idx):
40
  layer_type, layer_info = next(iter(layer_spec.items()))
41
  layer_args = layer_info.get("args", {})
 
123
  weights: A OrderedDict containing the new weights.
124
  """
125
  if isinstance(weights, Path):
126
+ weights = torch.load(weights, map_location=torch.device("cpu"), weights_only=False)
127
  if "model_state_dict" in weights:
128
  weights = weights["model_state_dict"]
129
 
 
144
 
145
  for error_name, error_set in error_dict.items():
146
  for weight_name in error_set:
147
+ logger.warning(f":warning: Weight {error_name} for key: {'.'.join(weight_name)}")
148
 
149
  self.model.load_state_dict(model_state_dict)
150
 
 
171
  prepare_weight(weight_path=weight_path)
172
  if weight_path.exists():
173
  model.save_load_weights(weight_path)
174
+ logger.info(":white_check_mark: Success load model & weight")
175
  else:
176
+ logger.info(":white_check_mark: Success load model")
177
  return model
yolo/tools/data_loader.py CHANGED
@@ -48,12 +48,12 @@ class YoloDataset(Dataset):
48
  cache_path = dataset_path / f"{phase_name}.cache"
49
 
50
  if not cache_path.exists():
51
- logger.info(f"🏭 Generating {phase_name} cache")
52
  data = self.filter_data(dataset_path, phase_name)
53
  torch.save(data, cache_path)
54
  else:
55
  data = torch.load(cache_path, weights_only=False)
56
- logger.info(f"πŸ“¦ Loaded {phase_name} cache")
57
  return data
58
 
59
  def filter_data(self, dataset_path: Path, phase_name: str) -> list:
 
48
  cache_path = dataset_path / f"{phase_name}.cache"
49
 
50
  if not cache_path.exists():
51
+ logger.info(f":factory: Generating {phase_name} cache")
52
  data = self.filter_data(dataset_path, phase_name)
53
  torch.save(data, cache_path)
54
  else:
55
  data = torch.load(cache_path, weights_only=False)
56
+ logger.info(f":package: Loaded {phase_name} cache")
57
  return data
58
 
59
  def filter_data(self, dataset_path: Path, phase_name: str) -> list:
yolo/tools/dataset_preparation.py CHANGED
@@ -30,7 +30,7 @@ def download_file(url, destination: Path):
30
  for data in response.iter_content(chunk_size=1024 * 1024): # 1 MB chunks
31
  file.write(data)
32
  progress.update(task, advance=len(data))
33
- logger.info("βœ… Download completed.")
34
 
35
 
36
  def unzip_file(source: Path, destination: Path):
@@ -71,7 +71,7 @@ def prepare_dataset(dataset_cfg: DatasetConfig, task: str):
71
 
72
  final_place.mkdir(parents=True, exist_ok=True)
73
  if check_files(final_place, dataset_args.get("file_num")):
74
- logger.info(f"βœ… Dataset {dataset_type: <12} already verified.")
75
  continue
76
 
77
  if not local_zip_path.exists():
 
30
  for data in response.iter_content(chunk_size=1024 * 1024): # 1 MB chunks
31
  file.write(data)
32
  progress.update(task, advance=len(data))
33
+ logger.info(":white_check_mark: Download completed.")
34
 
35
 
36
  def unzip_file(source: Path, destination: Path):
 
71
 
72
  final_place.mkdir(parents=True, exist_ok=True)
73
  if check_files(final_place, dataset_args.get("file_num")):
74
+ logger.info(f":white_check_mark: Dataset {dataset_type: <12} already verified.")
75
  continue
76
 
77
  if not local_zip_path.exists():
yolo/tools/drawer.py CHANGED
@@ -121,6 +121,6 @@ def draw_model(*, model_cfg: ModelConfig = None, model: YOLO = None, v7_base=Fal
121
  dot.edge(str(idx), str(jdx))
122
  try:
123
  dot.render("Model-arch", format="png", cleanup=True)
124
- logger.info("🎨 Drawing Model Architecture at Model-arch.png")
125
  except:
126
- logger.warning("⚠️ Could not find graphviz backend, continue without drawing the model architecture")
 
121
  dot.edge(str(idx), str(jdx))
122
  try:
123
  dot.render("Model-arch", format="png", cleanup=True)
124
+ logger.info(":artist_palette: Drawing Model Architecture at Model-arch.png")
125
  except:
126
+ logger.warning(":warning: Could not find graphviz backend, continue without drawing the model architecture")
yolo/tools/loss_functions.py CHANGED
@@ -136,5 +136,5 @@ class DualLoss:
136
  def create_loss_function(cfg: Config, vec2box) -> DualLoss:
137
  # TODO: make it flexible, if cfg doesn't contain aux, only use SingleLoss
138
  loss_function = DualLoss(cfg, vec2box)
139
- logger.info("βœ… Success load loss function")
140
  return loss_function
 
136
  def create_loss_function(cfg: Config, vec2box) -> DualLoss:
137
  # TODO: make it flexible, if cfg doesn't contain aux, only use SingleLoss
138
  loss_function = DualLoss(cfg, vec2box)
139
+ logger.info(":white_check_mark: Success load loss function")
140
  return loss_function
yolo/utils/bounding_box_utils.py CHANGED
@@ -270,7 +270,7 @@ class Vec2Box:
270
  self.device = device
271
 
272
  if hasattr(anchor_cfg, "strides"):
273
- logger.info(f"🈢 Found stride of model {anchor_cfg.strides}")
274
  self.strides = anchor_cfg.strides
275
  else:
276
  logger.info("🧸 Found no stride of model, performed a dummy test for auto-anchor size")
@@ -314,7 +314,7 @@ class Anc2Box:
314
  self.device = device
315
 
316
  if hasattr(anchor_cfg, "strides"):
317
- logger.info(f"🈢 Found stride of model {anchor_cfg.strides}")
318
  self.strides = anchor_cfg.strides
319
  else:
320
  logger.info("🧸 Found no stride of model, performed a dummy test for auto-anchor size")
 
270
  self.device = device
271
 
272
  if hasattr(anchor_cfg, "strides"):
273
+ logger.info(f":japanese_not_free_of_charge_button: Found stride of model {anchor_cfg.strides}")
274
  self.strides = anchor_cfg.strides
275
  else:
276
  logger.info("🧸 Found no stride of model, performed a dummy test for auto-anchor size")
 
314
  self.device = device
315
 
316
  if hasattr(anchor_cfg, "strides"):
317
+ logger.info(f":japanese_not_free_of_charge_button: Found stride of model {anchor_cfg.strides}")
318
  self.strides = anchor_cfg.strides
319
  else:
320
  logger.info("🧸 Found no stride of model, performed a dummy test for auto-anchor size")
yolo/utils/deploy_utils.py CHANGED
@@ -21,10 +21,10 @@ class FastModelLoader:
21
 
22
  def _validate_compiler(self):
23
  if self.compiler not in ["onnx", "trt", "deploy"]:
24
- logger.warning(f"⚠️ Compiler '{self.compiler}' is not supported. Using original model.")
25
  self.compiler = None
26
  if self.cfg.device == "mps" and self.compiler == "trt":
27
- logger.warning("🍎 TensorRT does not support MPS devices. Using original model.")
28
  self.compiler = None
29
 
30
  def load_model(self, device):
@@ -59,7 +59,7 @@ class FastModelLoader:
59
  providers = ["CUDAExecutionProvider"]
60
  try:
61
  ort_session = InferenceSession(self.model_path, providers=providers)
62
- logger.info("πŸš€ Using ONNX as MODEL frameworks!")
63
  except Exception as e:
64
  logger.warning(f"🈳 Error loading ONNX model: {e}")
65
  ort_session = self._create_onnx_model(providers)
@@ -79,7 +79,7 @@ class FastModelLoader:
79
  output_names=["output"],
80
  dynamic_axes={"input": {0: "batch_size"}, "output": {0: "batch_size"}},
81
  )
82
- logger.info(f"πŸ“₯ ONNX model saved to {self.model_path}")
83
  return InferenceSession(self.model_path, providers=providers)
84
 
85
  def _load_trt_model(self):
@@ -88,7 +88,7 @@ class FastModelLoader:
88
  try:
89
  model_trt = TRTModule()
90
  model_trt.load_state_dict(torch.load(self.model_path))
91
- logger.info("πŸš€ Using TensorRT as MODEL frameworks!")
92
  except FileNotFoundError:
93
  logger.warning(f"🈳 No found model weight at {self.model_path}")
94
  model_trt = self._create_trt_model()
@@ -102,5 +102,5 @@ class FastModelLoader:
102
  logger.info(f"♻️ Creating TensorRT model")
103
  model_trt = torch2trt(model.cuda(), [dummy_input])
104
  torch.save(model_trt.state_dict(), self.model_path)
105
- logger.info(f"πŸ“₯ TensorRT model saved to {self.model_path}")
106
  return model_trt
 
21
 
22
  def _validate_compiler(self):
23
  if self.compiler not in ["onnx", "trt", "deploy"]:
24
+ logger.warning(f":warning: Compiler '{self.compiler}' is not supported. Using original model.")
25
  self.compiler = None
26
  if self.cfg.device == "mps" and self.compiler == "trt":
27
+ logger.warning(":red_apple: TensorRT does not support MPS devices. Using original model.")
28
  self.compiler = None
29
 
30
  def load_model(self, device):
 
59
  providers = ["CUDAExecutionProvider"]
60
  try:
61
  ort_session = InferenceSession(self.model_path, providers=providers)
62
+ logger.info(":rocket: Using ONNX as MODEL frameworks!")
63
  except Exception as e:
64
  logger.warning(f"🈳 Error loading ONNX model: {e}")
65
  ort_session = self._create_onnx_model(providers)
 
79
  output_names=["output"],
80
  dynamic_axes={"input": {0: "batch_size"}, "output": {0: "batch_size"}},
81
  )
82
+ logger.info(f":inbox_tray: ONNX model saved to {self.model_path}")
83
  return InferenceSession(self.model_path, providers=providers)
84
 
85
  def _load_trt_model(self):
 
88
  try:
89
  model_trt = TRTModule()
90
  model_trt.load_state_dict(torch.load(self.model_path))
91
+ logger.info(":rocket: Using TensorRT as MODEL frameworks!")
92
  except FileNotFoundError:
93
  logger.warning(f"🈳 No found model weight at {self.model_path}")
94
  model_trt = self._create_trt_model()
 
102
  logger.info(f"♻️ Creating TensorRT model")
103
  model_trt = torch2trt(model.cuda(), [dummy_input])
104
  torch.save(model_trt.state_dict(), self.model_path)
105
+ logger.info(f":inbox_tray: TensorRT model saved to {self.model_path}")
106
  return model_trt
yolo/utils/logger.py CHANGED
@@ -3,8 +3,8 @@ import logging
3
  from rich.console import Console
4
  from rich.logging import RichHandler
5
 
6
- logger = logging.getLogger("YOLO_logger")
7
  logger.setLevel(logging.DEBUG)
8
  logger.propagate = False
9
  if not logger.hasHandlers():
10
- logger.addHandler(RichHandler(console=Console(), show_level=True, show_path=True, show_time=True))
 
3
  from rich.console import Console
4
  from rich.logging import RichHandler
5
 
6
+ logger = logging.getLogger(__name__)
7
  logger.setLevel(logging.DEBUG)
8
  logger.propagate = False
9
  if not logger.hasHandlers():
10
+ logger.addHandler(RichHandler(console=Console(), show_level=True, show_path=True, show_time=True, markup=True))