henry000 commited on
Commit
7f8fc3e
·
1 Parent(s): 7e9db93

✨ [Add] TensorBoard support, finish #52

Browse files
requirements-dev.txt CHANGED
@@ -3,3 +3,4 @@ pytest
3
  pytest-cov
4
  pre-commit
5
  pycocotools
 
 
3
  pytest-cov
4
  pre-commit
5
  pycocotools
6
+ tensorboard
yolo/config/config.py CHANGED
@@ -144,7 +144,6 @@ class Config:
144
  device: Union[str, int, List[int]]
145
  cpu_num: int
146
 
147
- class_idx_id: List[int]
148
  image_size: List[int]
149
 
150
  out_path: str
 
144
  device: Union[str, int, List[int]]
145
  cpu_num: int
146
 
 
147
  image_size: List[int]
148
 
149
  out_path: str
yolo/utils/logging_utils.py CHANGED
@@ -16,7 +16,7 @@ import random
16
  import sys
17
  from collections import deque
18
  from pathlib import Path
19
- from typing import Any, Dict, Union
20
 
21
  import numpy as np
22
  import torch
@@ -93,6 +93,12 @@ class ProgressLogger(Progress):
93
  project="YOLO", resume="allow", mode="online", dir=self.save_path, id=None, name=exp_name
94
  )
95
 
 
 
 
 
 
 
96
  def get_renderable(self):
97
  renderable = Group(*self.get_renderables(), self.ap_table)
98
  return renderable
@@ -108,11 +114,17 @@ class ProgressLogger(Progress):
108
  if hasattr(self, "task_epoch"):
109
  self.update(self.task_epoch, description=f"[cyan] Preparing Data")
110
 
111
- if self.use_wandb and optimizer is not None:
112
  lr_values = [params["lr"] for params in optimizer.param_groups]
113
  lr_names = ["Learning Rate/bias", "Learning Rate/norm", "Learning Rate/conv"]
114
- for lr_name, lr_value in zip(lr_names, lr_values):
115
- self.wandb.log({lr_name: lr_value}, step=epoch_idx)
 
 
 
 
 
 
116
  self.batch_task = self.add_task(f"[green] Phase: {task}", total=num_batches)
117
 
118
  def one_batch(self, batch_info: Dict[str, Tensor] = None):
@@ -135,6 +147,10 @@ class ProgressLogger(Progress):
135
  batch_info = {f"{prefix}/{key}": value for key, value in batch_info.items()}
136
  if self.use_wandb:
137
  self.wandb.log(batch_info, step=epoch_idx)
 
 
 
 
138
  self.remove_task(self.batch_task)
139
 
140
  def start_pycocotools(self):
@@ -148,6 +164,11 @@ class ProgressLogger(Progress):
148
 
149
  if self.use_wandb:
150
  self.wandb.log({"PyCOCO/AP @ .5:.95": ap_main[2], "PyCOCO/AP @ .5": ap_main[5]})
 
 
 
 
 
151
  self.update(self.batch_task, advance=1)
152
  self.refresh()
153
  self.remove_task(self.batch_task)
@@ -157,6 +178,8 @@ class ProgressLogger(Progress):
157
  self.stop()
158
  if self.use_wandb:
159
  self.wandb.finish()
 
 
160
 
161
 
162
  def custom_wandb_log(string="", level=int, newline=True, repeat=True, prefix=True, silent=False):
 
16
  import sys
17
  from collections import deque
18
  from pathlib import Path
19
+ from typing import Any, Dict, List, Optional, Tuple, Union
20
 
21
  import numpy as np
22
  import torch
 
93
  project="YOLO", resume="allow", mode="online", dir=self.save_path, id=None, name=exp_name
94
  )
95
 
96
+ self.use_tensorboard = cfg.use_tensorboard
97
+ if self.use_tensorboard:
98
+ from torch.utils.tensorboard import SummaryWriter
99
+
100
+ self.tb_writer = SummaryWriter(log_dir=self.save_path / "tensorboard")
101
+
102
  def get_renderable(self):
103
  renderable = Group(*self.get_renderables(), self.ap_table)
104
  return renderable
 
114
  if hasattr(self, "task_epoch"):
115
  self.update(self.task_epoch, description=f"[cyan] Preparing Data")
116
 
117
+ if optimizer is not None:
118
  lr_values = [params["lr"] for params in optimizer.param_groups]
119
  lr_names = ["Learning Rate/bias", "Learning Rate/norm", "Learning Rate/conv"]
120
+ if self.use_wandb:
121
+ for lr_name, lr_value in zip(lr_names, lr_values):
122
+ self.wandb.log({lr_name: lr_value}, step=epoch_idx)
123
+
124
+ if self.use_tensorboard:
125
+ for lr_name, lr_value in zip(lr_names, lr_values):
126
+ self.tb_writer.add_scalar(lr_name, lr_value, global_step=epoch_idx)
127
+
128
  self.batch_task = self.add_task(f"[green] Phase: {task}", total=num_batches)
129
 
130
  def one_batch(self, batch_info: Dict[str, Tensor] = None):
 
147
  batch_info = {f"{prefix}/{key}": value for key, value in batch_info.items()}
148
  if self.use_wandb:
149
  self.wandb.log(batch_info, step=epoch_idx)
150
+ if self.use_tensorboard:
151
+ for key, value in batch_info.items():
152
+ self.tb_writer.add_scalar(key, value, epoch_idx)
153
+
154
  self.remove_task(self.batch_task)
155
 
156
  def start_pycocotools(self):
 
164
 
165
  if self.use_wandb:
166
  self.wandb.log({"PyCOCO/AP @ .5:.95": ap_main[2], "PyCOCO/AP @ .5": ap_main[5]})
167
+ if self.use_tensorboard:
168
+ # TODO: waiting torch bugs fix, https://github.com/pytorch/pytorch/issues/32651
169
+ self.tb_writer.add_scalar("PyCOCO/AP @ .5:.95", ap_main[2], epoch_idx)
170
+ self.tb_writer.add_scalar("PyCOCO/AP @ .5", ap_main[5], epoch_idx)
171
+
172
  self.update(self.batch_task, advance=1)
173
  self.refresh()
174
  self.remove_task(self.batch_task)
 
178
  self.stop()
179
  if self.use_wandb:
180
  self.wandb.finish()
181
+ if self.use_tensorboard:
182
+ self.tb_writer.close()
183
 
184
 
185
  def custom_wandb_log(string="", level=int, newline=True, repeat=True, prefix=True, silent=False):