HoneyTian commited on
Commit
1a4d07a
·
1 Parent(s): 6c8bea2
examples/mpnet_aishell/step_2_train_model.py CHANGED
@@ -204,11 +204,11 @@ def main():
204
  logger.info(f"load state dict for optim_g.")
205
  with open(optim_g_pth.as_posix(), "rb") as f:
206
  state_dict = torch.load(f, map_location="cpu", weights_only=True)
207
- optim_g.load_state_dict(state_dict, strict=True)
208
  logger.info(f"load state dict for optim_d.")
209
  with open(optim_d_pth.as_posix(), "rb") as f:
210
  state_dict = torch.load(f, map_location="cpu", weights_only=True)
211
- optim_d.load_state_dict(state_dict, strict=True)
212
 
213
  scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=config.lr_decay, last_epoch=last_epoch)
214
  scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=config.lr_decay, last_epoch=last_epoch)
 
204
  logger.info(f"load state dict for optim_g.")
205
  with open(optim_g_pth.as_posix(), "rb") as f:
206
  state_dict = torch.load(f, map_location="cpu", weights_only=True)
207
+ optim_g.load_state_dict(state_dict)
208
  logger.info(f"load state dict for optim_d.")
209
  with open(optim_d_pth.as_posix(), "rb") as f:
210
  state_dict = torch.load(f, map_location="cpu", weights_only=True)
211
+ optim_d.load_state_dict(state_dict)
212
 
213
  scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=config.lr_decay, last_epoch=last_epoch)
214
  scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=config.lr_decay, last_epoch=last_epoch)