Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -37,26 +37,9 @@ from VitsModelSplit.dataset_features_collector import FeaturesCollectionDataset
|
|
37 |
from torch.cuda.amp import autocast, GradScaler
|
38 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
39 |
|
40 |
-
|
41 |
-
# model= VitsModel.from_pretrained("/content/drive/MyDrive/vitsM/TO/sp3/core/vend").to("cuda")
|
42 |
-
# model=VitsModel.from_pretrained("/content/drive/MyDrive/vitsM/heppa/EndCore3/v0").to("cuda")
|
43 |
-
# model.discriminator=model1.discriminator
|
44 |
-
# model.duration_predictor=model1.duration_predictor
|
45 |
-
|
46 |
-
# model.setMfA(monotonic_align.maximum_path)
|
47 |
-
# tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-ara",cache_dir="./")
|
48 |
feature_extractor = VitsFeatureExtractor()
|
49 |
-
|
50 |
-
json_file = os.path.abspath('VitsModelSplit/finetune_config_ara.json')
|
51 |
-
model_args, data_args, training_args = parser.parse_json_file(json_file = json_file)
|
52 |
-
sgl=get_state_grad_loss(mel=True,
|
53 |
-
# generator=False,
|
54 |
-
# discriminator=False,
|
55 |
-
duration=False)
|
56 |
-
|
57 |
-
training_args.num_train_epochs=1000
|
58 |
-
training_args.fp16=True
|
59 |
-
training_args.eval_steps=300
|
60 |
# sgl=get_state_grad_loss(k1=True,#generator=False,
|
61 |
# discriminator=False,
|
62 |
# duration=False
|
@@ -573,24 +556,37 @@ train_dataset_dirs=[
|
|
573 |
|
574 |
|
575 |
|
576 |
-
|
577 |
-
training_args.d_learning_rate=2e-4
|
578 |
-
training_args.learning_rate=2e-4
|
579 |
-
training_args.weight_mel=45
|
580 |
-
training_args.num_train_epochs=4
|
581 |
-
training_args.eval_steps=1000
|
582 |
-
global_step=0
|
583 |
dir_model='wasmdashai/vits-ar-huba-fine'
|
584 |
|
585 |
|
586 |
|
587 |
wandb.login(key= "782b6a6e82bbb5a5348de0d3c7d40d1e76351e79")
|
588 |
wandb.init(project= 'AZ',config = training_args.to_dict())
|
589 |
-
|
590 |
|
591 |
@spaces.GPU
|
592 |
def greet(text,id):
|
593 |
global GK
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
594 |
b=int(id)
|
595 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
596 |
ctrain_datasets,eval_dataset,full_generation_dataset=get_data_loader(train_dataset_dirs = train_dataset_dirs,
|
|
|
37 |
from torch.cuda.amp import autocast, GradScaler
|
38 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
39 |
|
40 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
feature_extractor = VitsFeatureExtractor()
|
42 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
# sgl=get_state_grad_loss(k1=True,#generator=False,
|
44 |
# discriminator=False,
|
45 |
# duration=False
|
|
|
556 |
|
557 |
|
558 |
|
559 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
560 |
dir_model='wasmdashai/vits-ar-huba-fine'
|
561 |
|
562 |
|
563 |
|
564 |
wandb.login(key= "782b6a6e82bbb5a5348de0d3c7d40d1e76351e79")
|
565 |
wandb.init(project= 'AZ',config = training_args.to_dict())
|
566 |
+
global_step=0
|
567 |
|
568 |
@spaces.GPU
|
569 |
def greet(text,id):
|
570 |
global GK
|
571 |
+
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, VITSTrainingArguments))
|
572 |
+
json_file = os.path.abspath('VitsModelSplit/finetune_config_ara.json')
|
573 |
+
model_args, data_args, training_args = parser.parse_json_file(json_file = json_file)
|
574 |
+
sgl=get_state_grad_loss(mel=True,
|
575 |
+
# generator=False,
|
576 |
+
# discriminator=False,
|
577 |
+
duration=False)
|
578 |
+
|
579 |
+
|
580 |
+
training_args.num_train_epochs=1000
|
581 |
+
training_args.fp16=True
|
582 |
+
training_args.eval_steps=300
|
583 |
+
training_args.weight_kl=1
|
584 |
+
training_args.d_learning_rate=2e-4
|
585 |
+
training_args.learning_rate=2e-4
|
586 |
+
training_args.weight_mel=45
|
587 |
+
training_args.num_train_epochs=4
|
588 |
+
training_args.eval_steps=1000
|
589 |
+
|
590 |
b=int(id)
|
591 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
592 |
ctrain_datasets,eval_dataset,full_generation_dataset=get_data_loader(train_dataset_dirs = train_dataset_dirs,
|