astoken commited on
Commit
490f1e7
·
1 Parent(s): 4418809

add save_dir arg to plot_lr_scheduler, default to current dir.

Browse files

Uncomment plot_lr_scheduler in train() and pass log_dir as save location

Files changed (2) hide show
  1. train.py +1 -1
  2. utils/utils.py +1 -1
train.py CHANGED
@@ -148,7 +148,7 @@ def train(hyp):
148
  scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
149
  scheduler.last_epoch = start_epoch - 1 # do not move
150
  # https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822
151
- # plot_lr_scheduler(optimizer, scheduler, epochs)
152
 
153
  # Initialize distributed training
154
  if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available():
 
148
  scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
149
  scheduler.last_epoch = start_epoch - 1 # do not move
150
  # https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822
151
+ plot_lr_scheduler(optimizer, scheduler, epochs, save_dir = log_dir)
152
 
153
  # Initialize distributed training
154
  if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available():
utils/utils.py CHANGED
@@ -1005,7 +1005,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max
1005
  return mosaic
1006
 
1007
 
1008
- def plot_lr_scheduler(optimizer, scheduler, epochs=300):
1009
  # Plot LR simulating training for full epochs
1010
  optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
1011
  y = []
 
1005
  return mosaic
1006
 
1007
 
1008
+ def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir='./'):
1009
  # Plot LR simulating training for full epochs
1010
  optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
1011
  y = []