wasmdashai commited on
Commit
1e1ef94
·
verified ·
1 Parent(s): a3f4d62

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -23
app.py CHANGED
@@ -565,29 +565,8 @@ dir_model='wasmdashai/vits-ar-huba-fine'
565
 
566
  global_step=0
567
  wandb.login(key= "782b6a6e82bbb5a5348de0d3c7d40d1e76351e79")
568
- parser = HfArgumentParser((ModelArguments, DataTrainingArguments, VITSTrainingArguments))
569
- json_file = os.path.abspath('VitsModelSplit/finetune_config_ara.json')
570
- model_args, data_args, training_args = parser.parse_json_file(json_file = json_file)
571
- print('start')
572
- sgl=get_state_grad_loss(mel=True,
573
- # generator=False,
574
- # discriminator=False,
575
- duration=False)
576
-
577
- print(training_args)
578
- training_args.num_train_epochs=1000
579
- training_args.fp16=True
580
- training_args.eval_steps=300
581
- training_args.weight_kl=1
582
- training_args.d_learning_rate=2e-4
583
- training_args.learning_rate=2e-4
584
- training_args.weight_mel=45
585
- training_args.num_train_epochs=4
586
- training_args.eval_steps=1000
587
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
588
- print(device)
589
 
590
- b=int(id)
591
  ctrain_datasets,eval_dataset,full_generation_dataset=get_data_loader(train_dataset_dirs = train_dataset_dirs,
592
  eval_dataset_dir = os.path.join(dataset_dir,'eval'),
593
  full_generation_dir = os.path.join(dataset_dir,'full_generation'),
@@ -602,13 +581,36 @@ print('loadeed')
602
  @spaces.GPU
603
  def greet(text,id):
604
  global GK
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
605
 
606
  for i in range(10000):
607
  # model.train(True)
608
  print(f'clcye epochs ={i}')
609
  yield f'clcye epochs ={i}'
610
 
611
- # model.setMfA(monotonic_align.maximum_path)
 
612
  #dir_model_save=dir_model+'/vend'
613
 
614
 
 
565
 
566
  global_step=0
567
  wandb.login(key= "782b6a6e82bbb5a5348de0d3c7d40d1e76351e79")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
568
 
569
+
570
  ctrain_datasets,eval_dataset,full_generation_dataset=get_data_loader(train_dataset_dirs = train_dataset_dirs,
571
  eval_dataset_dir = os.path.join(dataset_dir,'eval'),
572
  full_generation_dir = os.path.join(dataset_dir,'full_generation'),
 
581
  @spaces.GPU
582
  def greet(text,id):
583
  global GK
584
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, VITSTrainingArguments))
585
+ json_file = os.path.abspath('VitsModelSplit/finetune_config_ara.json')
586
+ model_args, data_args, training_args = parser.parse_json_file(json_file = json_file)
587
+ print('start')
588
+ sgl=get_state_grad_loss(mel=True,
589
+ # generator=False,
590
+ # discriminator=False,
591
+ duration=False)
592
+
593
+ print(training_args)
594
+ training_args.num_train_epochs=1000
595
+ training_args.fp16=True
596
+ training_args.eval_steps=300
597
+ training_args.weight_kl=1
598
+ training_args.d_learning_rate=2e-4
599
+ training_args.learning_rate=2e-4
600
+ training_args.weight_mel=45
601
+ training_args.num_train_epochs=4
602
+ training_args.eval_steps=1000
603
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
604
+ print(device)
605
+
606
 
607
  for i in range(10000):
608
  # model.train(True)
609
  print(f'clcye epochs ={i}')
610
  yield f'clcye epochs ={i}'
611
 
612
+ model=VitsModel.from_pretrained(dir_model,token=token).to("cuda")
613
+
614
  #dir_model_save=dir_model+'/vend'
615
 
616