wasmdashai commited on
Commit
a3f4d62
·
verified ·
1 Parent(s): 7fd4bd3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -33
app.py CHANGED
@@ -565,48 +565,52 @@ dir_model='wasmdashai/vits-ar-huba-fine'
565
 
566
  global_step=0
567
  wandb.login(key= "782b6a6e82bbb5a5348de0d3c7d40d1e76351e79")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
568
  @spaces.GPU
569
  def greet(text,id):
570
  global GK
571
- parser = HfArgumentParser((ModelArguments, DataTrainingArguments, VITSTrainingArguments))
572
- json_file = os.path.abspath('VitsModelSplit/finetune_config_ara.json')
573
- model_args, data_args, training_args = parser.parse_json_file(json_file = json_file)
574
- print('start')
575
- sgl=get_state_grad_loss(mel=True,
576
- # generator=False,
577
- # discriminator=False,
578
- duration=False)
579
-
580
- print(training_args)
581
- training_args.num_train_epochs=1000
582
- training_args.fp16=True
583
- training_args.eval_steps=300
584
- training_args.weight_kl=1
585
- training_args.d_learning_rate=2e-4
586
- training_args.learning_rate=2e-4
587
- training_args.weight_mel=45
588
- training_args.num_train_epochs=4
589
- training_args.eval_steps=1000
590
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
591
- print(device)
592
-
593
- b=int(id)
594
- ctrain_datasets,eval_dataset,full_generation_dataset=get_data_loader(train_dataset_dirs = train_dataset_dirs,
595
- eval_dataset_dir = os.path.join(dataset_dir,'eval'),
596
- full_generation_dir = os.path.join(dataset_dir,'full_generation'),
597
- device=device)
598
-
599
- print('load Data')
600
- wandb.init(project= 'AZ')
601
- print('wandb')
602
  for i in range(10000):
603
  # model.train(True)
604
  print(f'clcye epochs ={i}')
605
  yield f'clcye epochs ={i}'
606
- model=VitsModel.from_pretrained(dir_model,token=token).to(device)
607
  # model.setMfA(monotonic_align.maximum_path)
608
  #dir_model_save=dir_model+'/vend'
609
- print('loadeed')
610
 
611
  trainer_to_cuda(model,
612
  ctrain_datasets = ctrain_datasets,
 
565
 
566
  global_step=0
567
  wandb.login(key= "782b6a6e82bbb5a5348de0d3c7d40d1e76351e79")
568
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, VITSTrainingArguments))
569
+ json_file = os.path.abspath('VitsModelSplit/finetune_config_ara.json')
570
+ model_args, data_args, training_args = parser.parse_json_file(json_file = json_file)
571
+ print('start')
572
+ sgl=get_state_grad_loss(mel=True,
573
+ # generator=False,
574
+ # discriminator=False,
575
+ duration=False)
576
+
577
+ print(training_args)
578
+ training_args.num_train_epochs=1000
579
+ training_args.fp16=True
580
+ training_args.eval_steps=300
581
+ training_args.weight_kl=1
582
+ training_args.d_learning_rate=2e-4
583
+ training_args.learning_rate=2e-4
584
+ training_args.weight_mel=45
585
+ training_args.num_train_epochs=4
586
+ training_args.eval_steps=1000
587
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
588
+ print(device)
589
+
590
+ b=int(id)
591
+ ctrain_datasets,eval_dataset,full_generation_dataset=get_data_loader(train_dataset_dirs = train_dataset_dirs,
592
+ eval_dataset_dir = os.path.join(dataset_dir,'eval'),
593
+ full_generation_dir = os.path.join(dataset_dir,'full_generation'),
594
+ device="cuda")
595
+
596
+ print('load Data')
597
+ wandb.init(project= 'AZ')
598
+
599
+ print('wandb')
600
+ model=VitsModel.from_pretrained(dir_model,token=token).to("cuda")
601
+ print('loadeed')
602
  @spaces.GPU
603
  def greet(text,id):
604
  global GK
605
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
606
  for i in range(10000):
607
  # model.train(True)
608
  print(f'clcye epochs ={i}')
609
  yield f'clcye epochs ={i}'
610
+
611
  # model.setMfA(monotonic_align.maximum_path)
612
  #dir_model_save=dir_model+'/vend'
613
+
614
 
615
  trainer_to_cuda(model,
616
  ctrain_datasets = ctrain_datasets,