W&B: track batch size after autobatch (#6039)
Browse files* track batch size after autobatch
* remove redundant import
* Update __init__.py
* Update __init__.py
Co-authored-by: Glenn Jocher <[email protected]>
- train.py +1 -0
- utils/callbacks.py +1 -1
- utils/loggers/__init__.py +6 -0
train.py
CHANGED
@@ -138,6 +138,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|
138 |
# Batch size
|
139 |
if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
|
140 |
batch_size = check_train_batch_size(model, imgsz)
|
|
|
141 |
|
142 |
# Optimizer
|
143 |
nbs = 64 # nominal batch size
|
|
|
138 |
# Batch size
|
139 |
if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
|
140 |
batch_size = check_train_batch_size(model, imgsz)
|
141 |
+
loggers.on_params_update({"batch_size": batch_size})
|
142 |
|
143 |
# Optimizer
|
144 |
nbs = 64 # nominal batch size
|
utils/callbacks.py
CHANGED
@@ -32,7 +32,7 @@ class Callbacks:
|
|
32 |
'on_fit_epoch_end': [], # fit = train + val
|
33 |
'on_model_save': [],
|
34 |
'on_train_end': [],
|
35 |
-
|
36 |
'teardown': [],
|
37 |
}
|
38 |
|
|
|
32 |
'on_fit_epoch_end': [], # fit = train + val
|
33 |
'on_model_save': [],
|
34 |
'on_train_end': [],
|
35 |
+
'on_params_update': [],
|
36 |
'teardown': [],
|
37 |
}
|
38 |
|
utils/loggers/__init__.py
CHANGED
@@ -157,3 +157,9 @@ class Loggers():
|
|
157 |
else:
|
158 |
self.wandb.finish_run()
|
159 |
self.wandb = WandbLogger(self.opt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
else:
|
158 |
self.wandb.finish_run()
|
159 |
self.wandb = WandbLogger(self.opt)
|
160 |
+
|
161 |
+
def on_params_update(self, params):
|
162 |
+
# Update hyperparams or configs of the experiment
|
163 |
+
# params: A dict containing {param: value} pairs
|
164 |
+
if self.wandb:
|
165 |
+
self.wandb.wandb_run.config.update(params, allow_val_change=True)
|