Nanobit commited on
Commit
3a56cac
·
unverified ·
1 Parent(s): b75c432

Fix batch-size on resume for multi-gpu (#1942)

Browse files
Files changed (1) hide show
  1. train.py +1 -1
train.py CHANGED
@@ -477,7 +477,7 @@ if __name__ == '__main__':
477
  apriori = opt.global_rank, opt.local_rank
478
  with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
479
  opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace
480
- opt.cfg, opt.weights, opt.resume, opt.global_rank, opt.local_rank = '', ckpt, True, *apriori # reinstate
481
  logger.info('Resuming training from %s' % ckpt)
482
  else:
483
  # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
 
477
  apriori = opt.global_rank, opt.local_rank
478
  with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
479
  opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace
480
+ opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate
481
  logger.info('Resuming training from %s' % ckpt)
482
  else:
483
  # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')