wasmdashai commited on
Commit
4e77db4
·
verified ·
1 Parent(s): b93784b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -77
app.py CHANGED
@@ -407,47 +407,47 @@ class TrinerModelVITS:
407
  self.len_dataset=len(self.DataSets['train'])
408
  self.load_model()
409
  self.init_wandb()
410
- # self.training_args=load_training_args(self.path_training_args)
411
- # training_args= self.training_args
412
- # scaler = GradScaler(enabled=True)
413
- # for disc in self.model.discriminator.discriminators:
414
- # disc.apply_weight_norm()
415
- # self.model.decoder.apply_weight_norm()
416
- # # torch.nn.utils.weight_norm(self.decoder.conv_pre)
417
- # # torch.nn.utils.weight_norm(self.decoder.conv_post)
418
- # for flow in self.model.flow.flows:
419
- # torch.nn.utils.weight_norm(flow.conv_pre)
420
- # torch.nn.utils.weight_norm(flow.conv_post)
421
 
422
- # discriminator = self.model.discriminator
423
- # self.model.discriminator = None
424
 
425
- # optimizer = torch.optim.AdamW(
426
- # self.model.parameters(),
427
- # 2e-4,
428
- # betas=[0.8, 0.99],
429
- # # eps=training_args.adam_epsilon,
430
- # )
431
 
432
- # # Hack to be able to train on multiple device
433
- # disc_optimizer = torch.optim.AdamW(
434
- # discriminator.parameters(),
435
- # 2e-4,
436
- # betas=[0.8, 0.99],
437
- # # eps=training_args.adam_epsilon,
438
- # )
439
- # lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
440
- # optimizer,gamma=0.999875, last_epoch=-1
441
- # )
442
- # disc_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
443
- # disc_optimizer, gamma=0.999875,last_epoch=-1
444
- # )
445
- # self.models=(self.model,discriminator)
446
- # self.optimizers=(optimizer,disc_optimizer,scaler)
447
- # self.lr_schedulers=(lr_scheduler,disc_lr_scheduler)
448
- # self.tools=load_tools()
449
- # self.stute_mode=True
450
- # print(self.lr_schedulers)
451
 
452
 
453
 
@@ -502,49 +502,49 @@ class TrinerModelVITS:
502
  training_args.num_train_epochs=4
503
  training_args.eval_steps=1000
504
 
505
- set_seed(training_args.seed)
506
- scaler = GradScaler(enabled=training_args.fp16)
507
 
508
 
509
- # Initialize optimizer, lr_scheduler
510
- for disc in self.model.discriminator.discriminators:
511
- disc.apply_weight_norm()
512
- self.model.decoder.apply_weight_norm()
513
- # torch.nn.utils.weight_norm(self.decoder.conv_pre)
514
- # torch.nn.utils.weight_norm(self.decoder.conv_post)
515
- for flow in self.model.flow.flows:
516
- torch.nn.utils.weight_norm(flow.conv_pre)
517
- torch.nn.utils.weight_norm(flow.conv_post)
518
 
519
- discriminator = self.model.discriminator
520
- self.model.discriminator = None
521
 
522
- optimizer = torch.optim.AdamW(
523
- self.model.parameters(),
524
- training_args.learning_rate,
525
- betas=[training_args.adam_beta1, training_args.adam_beta2],
526
- eps=training_args.adam_epsilon,
527
- )
528
 
529
- # Hack to be able to train on multiple device
530
- disc_optimizer = torch.optim.AdamW(
531
- discriminator.parameters(),
532
- training_args.d_learning_rate,
533
- betas=[training_args.d_adam_beta1, training_args.d_adam_beta2],
534
- eps=training_args.adam_epsilon,
535
- )
536
- lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
537
- optimizer, gamma=training_args.lr_decay, last_epoch=-1
538
- )
539
- disc_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
540
- disc_optimizer, gamma=training_args.lr_decay, last_epoch=-1
541
- )
542
- self.models=(self.model,discriminator)
543
- self.optimizers=(optimizer,disc_optimizer,scaler)
544
- self.lr_schedulers=(lr_scheduler,disc_lr_scheduler)
545
- self.tools=load_tools()
546
- self.stute_mode=True
547
- print(self.lr_schedulers)
548
 
549
 
550
 
@@ -633,7 +633,7 @@ pro=TrinerModelVITS(dir_model=dir_model,
633
  device=device
634
  )
635
 
636
- @spaces.GPU(duration=120)
637
  def run_train_epoch(num):
638
  if num >0:
639
  pro.init_training()
 
407
  self.len_dataset=len(self.DataSets['train'])
408
  self.load_model()
409
  self.init_wandb()
410
+ self.training_args=load_training_args(self.path_training_args)
411
+ training_args= self.training_args
412
+ scaler = GradScaler(enabled=True)
413
+ for disc in self.model.discriminator.discriminators:
414
+ disc.apply_weight_norm()
415
+ self.model.decoder.apply_weight_norm()
416
+ # torch.nn.utils.weight_norm(self.decoder.conv_pre)
417
+ # torch.nn.utils.weight_norm(self.decoder.conv_post)
418
+ for flow in self.model.flow.flows:
419
+ torch.nn.utils.weight_norm(flow.conv_pre)
420
+ torch.nn.utils.weight_norm(flow.conv_post)
421
 
422
+ discriminator = self.model.discriminator
423
+ self.model.discriminator = None
424
 
425
+ optimizer = torch.optim.AdamW(
426
+ self.model.parameters(),
427
+ 2e-4,
428
+ betas=[0.8, 0.99],
429
+ # eps=training_args.adam_epsilon,
430
+ )
431
 
432
+ # Hack to be able to train on multiple device
433
+ disc_optimizer = torch.optim.AdamW(
434
+ discriminator.parameters(),
435
+ 2e-4,
436
+ betas=[0.8, 0.99],
437
+ # eps=training_args.adam_epsilon,
438
+ )
439
+ lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
440
+ optimizer,gamma=0.999875, last_epoch=-1
441
+ )
442
+ disc_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
443
+ disc_optimizer, gamma=0.999875,last_epoch=-1
444
+ )
445
+ self.models=(self.model,discriminator)
446
+ self.optimizers=(optimizer,disc_optimizer,scaler)
447
+ self.lr_schedulers=(lr_scheduler,disc_lr_scheduler)
448
+ self.tools=load_tools()
449
+ self.stute_mode=True
450
+ print(self.lr_schedulers)
451
 
452
 
453
 
 
502
  training_args.num_train_epochs=4
503
  training_args.eval_steps=1000
504
 
505
+ # set_seed(training_args.seed)
506
+ # scaler = GradScaler(enabled=training_args.fp16)
507
 
508
 
509
+ # # Initialize optimizer, lr_scheduler
510
+ # for disc in self.model.discriminator.discriminators:
511
+ # disc.apply_weight_norm()
512
+ # self.model.decoder.apply_weight_norm()
513
+ # # torch.nn.utils.weight_norm(self.decoder.conv_pre)
514
+ # # torch.nn.utils.weight_norm(self.decoder.conv_post)
515
+ # for flow in self.model.flow.flows:
516
+ # torch.nn.utils.weight_norm(flow.conv_pre)
517
+ # torch.nn.utils.weight_norm(flow.conv_post)
518
 
519
+ # discriminator = self.model.discriminator
520
+ # self.model.discriminator = None
521
 
522
+ # optimizer = torch.optim.AdamW(
523
+ # self.model.parameters(),
524
+ # training_args.learning_rate,
525
+ # betas=[training_args.adam_beta1, training_args.adam_beta2],
526
+ # eps=training_args.adam_epsilon,
527
+ # )
528
 
529
+ # # Hack to be able to train on multiple device
530
+ # disc_optimizer = torch.optim.AdamW(
531
+ # discriminator.parameters(),
532
+ # training_args.d_learning_rate,
533
+ # betas=[training_args.d_adam_beta1, training_args.d_adam_beta2],
534
+ # eps=training_args.adam_epsilon,
535
+ # )
536
+ # lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
537
+ # optimizer, gamma=training_args.lr_decay, last_epoch=-1
538
+ # )
539
+ # disc_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
540
+ # disc_optimizer, gamma=training_args.lr_decay, last_epoch=-1
541
+ # )
542
+ # self.models=(self.model,discriminator)
543
+ # self.optimizers=(optimizer,disc_optimizer,scaler)
544
+ # self.lr_schedulers=(lr_scheduler,disc_lr_scheduler)
545
+ # self.tools=load_tools()
546
+ # self.stute_mode=True
547
+ # print(self.lr_schedulers)
548
 
549
 
550
 
 
633
  device=device
634
  )
635
 
636
+ @spaces.GPU(duration=5)
637
  def run_train_epoch(num):
638
  if num >0:
639
  pro.init_training()