archivartaunik commited on
Commit
09ba4d1
1 Parent(s): 7252b33

Initial upload of GPT_XTTS_V2 model files

Browse files
best_model_14510.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6a9a504f1d009969a2576f95f13dc51e5a97b4dbc4224163b2014d8c7ef3c89
3
+ size 5780141753
config.json ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_path": "/checkpoints/",
3
+ "logger_uri": null,
4
+ "run_name": "GPT_XTTS_FT",
5
+ "project_name": "XTTS_trainer",
6
+ "run_description": [
7
+ "\n GPT XTTS training\n "
8
+ ],
9
+ "print_step": 50,
10
+ "plot_step": 100,
11
+ "model_param_stats": false,
12
+ "wandb_entity": null,
13
+ "dashboard_logger": "tensorboard",
14
+ "save_on_interrupt": true,
15
+ "log_model_step": 100,
16
+ "save_step": 20000,
17
+ "save_n_checkpoints": 1,
18
+ "save_checkpoints": true,
19
+ "save_all_best": false,
20
+ "save_best_after": 10000,
21
+ "target_loss": null,
22
+ "print_eval": false,
23
+ "test_delay_epochs": 0,
24
+ "run_eval": true,
25
+ "run_eval_steps": null,
26
+ "distributed_backend": "nccl",
27
+ "distributed_url": "tcp://localhost:54321",
28
+ "mixed_precision": false,
29
+ "precision": "fp16",
30
+ "epochs": 5,
31
+ "batch_size": 16,
32
+ "eval_batch_size": 16,
33
+ "grad_clip": 0.0,
34
+ "scheduler_after_epoch": true,
35
+ "lr": 5e-06,
36
+ "optimizer": "AdamW",
37
+ "optimizer_params": {
38
+ "betas": [
39
+ 0.9,
40
+ 0.96
41
+ ],
42
+ "eps": 1e-08,
43
+ "weight_decay": 0.01
44
+ },
45
+ "lr_scheduler": "MultiStepLR",
46
+ "lr_scheduler_params": {
47
+ "milestones": [
48
+ 60000,
49
+ 120000,
50
+ 180000
51
+ ],
52
+ "gamma": 0.5,
53
+ "last_epoch": -1
54
+ },
55
+ "use_grad_scaler": false,
56
+ "allow_tf32": false,
57
+ "cudnn_enable": true,
58
+ "cudnn_deterministic": false,
59
+ "cudnn_benchmark": false,
60
+ "training_seed": 54321,
61
+ "model": "xtts",
62
+ "num_loader_workers": 4,
63
+ "num_eval_loader_workers": 0,
64
+ "use_noise_augment": false,
65
+ "audio": {
66
+ "sample_rate": 22050,
67
+ "output_sample_rate": 24000,
68
+ "dvae_sample_rate": 22050
69
+ },
70
+ "use_phonemes": false,
71
+ "phonemizer": null,
72
+ "phoneme_language": null,
73
+ "compute_input_seq_cache": false,
74
+ "text_cleaner": null,
75
+ "enable_eos_bos_chars": false,
76
+ "test_sentences_file": "",
77
+ "phoneme_cache_path": null,
78
+ "characters": null,
79
+ "add_blank": false,
80
+ "batch_group_size": 0,
81
+ "loss_masking": null,
82
+ "min_audio_len": 1,
83
+ "max_audio_len": Infinity,
84
+ "min_text_len": 1,
85
+ "max_text_len": Infinity,
86
+ "compute_f0": false,
87
+ "compute_energy": false,
88
+ "compute_linear_spec": false,
89
+ "precompute_num_workers": 0,
90
+ "start_by_longest": false,
91
+ "shuffle": false,
92
+ "drop_last": false,
93
+ "datasets": [
94
+ {
95
+ "formatter": "",
96
+ "dataset_name": "",
97
+ "path": "",
98
+ "meta_file_train": "",
99
+ "ignored_speakers": null,
100
+ "language": "",
101
+ "phonemizer": "",
102
+ "meta_file_val": "",
103
+ "meta_file_attn_mask": ""
104
+ }
105
+ ],
106
+ "test_sentences": [],
107
+ "eval_split_max_size": 256,
108
+ "eval_split_size": 0.01,
109
+ "use_speaker_weighted_sampler": false,
110
+ "speaker_weighted_sampler_alpha": 1.0,
111
+ "use_language_weighted_sampler": false,
112
+ "language_weighted_sampler_alpha": 1.0,
113
+ "use_length_weighted_sampler": false,
114
+ "length_weighted_sampler_alpha": 1.0,
115
+ "model_args": {
116
+ "gpt_batch_size": 1,
117
+ "enable_redaction": false,
118
+ "kv_cache": true,
119
+ "gpt_checkpoint": "",
120
+ "clvp_checkpoint": null,
121
+ "decoder_checkpoint": null,
122
+ "num_chars": 255,
123
+ "tokenizer_file": "/checkpoints/XTTS_v2.0_original_model_files/vocab.json",
124
+ "gpt_max_audio_tokens": 605,
125
+ "gpt_max_text_tokens": 402,
126
+ "gpt_max_prompt_tokens": 70,
127
+ "gpt_layers": 30,
128
+ "gpt_n_model_channels": 1024,
129
+ "gpt_n_heads": 16,
130
+ "gpt_number_text_tokens": 13685,
131
+ "gpt_start_text_token": 261,
132
+ "gpt_stop_text_token": 0,
133
+ "gpt_num_audio_tokens": 1026,
134
+ "gpt_start_audio_token": 1024,
135
+ "gpt_stop_audio_token": 1025,
136
+ "gpt_code_stride_len": 1024,
137
+ "gpt_use_masking_gt_prompt_approach": true,
138
+ "gpt_use_perceiver_resampler": true,
139
+ "input_sample_rate": 22050,
140
+ "output_sample_rate": 24000,
141
+ "output_hop_length": 256,
142
+ "decoder_input_dim": 1024,
143
+ "d_vector_dim": 512,
144
+ "cond_d_vector_in_each_upsampling_layer": true,
145
+ "duration_const": 102400,
146
+ "min_conditioning_length": 88200,
147
+ "max_conditioning_length": 264600,
148
+ "gpt_loss_text_ce_weight": 0.01,
149
+ "gpt_loss_mel_ce_weight": 1.0,
150
+ "debug_loading_failures": false,
151
+ "max_wav_length": 5000750,
152
+ "max_text_length": 500,
153
+ "mel_norm_file": "/checkpoints/XTTS_v2.0_original_model_files/mel_stats.pth",
154
+ "dvae_checkpoint": "/checkpoints/XTTS_v2.0_original_model_files/dvae.pth",
155
+ "xtts_checkpoint": "/checkpoints/XTTS_v2.0_original_model_files/model.pth",
156
+ "vocoder": ""
157
+ },
158
+ "model_dir": null,
159
+ "languages": [
160
+ "en",
161
+ "es",
162
+ "fr",
163
+ "de",
164
+ "it",
165
+ "pt",
166
+ "pl",
167
+ "tr",
168
+ "ru",
169
+ "nl",
170
+ "cs",
171
+ "ar",
172
+ "zh-cn",
173
+ "hu",
174
+ "ko",
175
+ "ja",
176
+ "hi",
177
+ "be",
178
+ "be"
179
+ ],
180
+ "temperature": 0.75,
181
+ "length_penalty": 1.0,
182
+ "repetition_penalty": 5.0,
183
+ "top_k": 50,
184
+ "top_p": 0.85,
185
+ "num_gpt_outputs": 1,
186
+ "gpt_cond_len": 30,
187
+ "gpt_cond_chunk_len": 4,
188
+ "max_ref_len": 30,
189
+ "sound_norm_refs": false,
190
+ "optimizer_wd_only_on_weights": true,
191
+ "weighted_loss_attrs": null,
192
+ "weighted_loss_multipliers": null,
193
+ "github_branch": "inside_docker"
194
+ }
dvae.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b29bc227d410d4991e0a8c09b858f77415013eeb9fba9650258e96095557d97a
3
+ size 210514388
events.out.tfevents.1734889439.9cc9878929c5.1257.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19e2f392eb35eb10849cf470ded95ce91c4cd9251ac48434ec49d50752148073
3
+ size 74527
mel_stats.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f69422a8a8f344c4fca2f0c6b8d41d2151d6615b7321e48e6bb15ae949b119c
3
+ size 1067
model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6a9a504f1d009969a2576f95f13dc51e5a97b4dbc4224163b2014d8c7ef3c89
3
+ size 5780141753
train_gpt_xtts.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gc
3
+
4
+ from trainer import Trainer, TrainerArgs
5
+
6
+ from TTS.config.shared_configs import BaseDatasetConfig
7
+ from TTS.tts.datasets import load_tts_samples
8
+ from TTS.tts.layers.xtts.trainer.gpt_trainer import GPTArgs, GPTTrainer, GPTTrainerConfig, XttsAudioConfig
9
+ from TTS.utils.manage import ModelManager
10
+
11
+ from dataclasses import dataclass, field
12
+ from typing import Optional
13
+ from transformers import HfArgumentParser
14
+
15
+ import argparse
16
+
17
+ def create_xtts_trainer_parser():
18
+ parser = argparse.ArgumentParser(description="Arguments for XTTS Trainer")
19
+
20
+ parser.add_argument("--output_path", type=str, required=True,
21
+ help="Path to pretrained + checkpoint model")
22
+ parser.add_argument("--metadatas", nargs='+', type=str, required=True,
23
+ help="train_csv_path,eval_csv_path,language")
24
+ parser.add_argument("--num_epochs", type=int, default=1,
25
+ help="Number of epochs")
26
+ parser.add_argument("--batch_size", type=int, default=1,
27
+ help="Mini batch size")
28
+ parser.add_argument("--grad_acumm", type=int, default=1,
29
+ help="Grad accumulation steps")
30
+ parser.add_argument("--max_audio_length", type=int, default=255995,
31
+ help="Max audio length")
32
+ parser.add_argument("--max_text_length", type=int, default=200,
33
+ help="Max text length")
34
+ parser.add_argument("--weight_decay", type=float, default=1e-2,
35
+ help="Weight decay")
36
+ parser.add_argument("--lr", type=float, default=5e-6,
37
+ help="Learning rate")
38
+ parser.add_argument("--save_step", type=int, default=5000,
39
+ help="Save step")
40
+
41
+ return parser
42
+
43
+
44
+
45
+ def train_gpt(metadatas, num_epochs, batch_size, grad_acumm, output_path, max_audio_length, max_text_length, lr, weight_decay, save_step):
46
+ # Logging parameters
47
+ RUN_NAME = "GPT_XTTS_FT"
48
+ PROJECT_NAME = "XTTS_trainer"
49
+ DASHBOARD_LOGGER = "tensorboard"
50
+ LOGGER_URI = None
51
+
52
+ # Set here the path that the checkpoints will be saved. Default: ./run/training/
53
+ # OUT_PATH = os.path.join(output_path, "run", "training")
54
+ OUT_PATH = output_path
55
+
56
+ # Training Parameters
57
+ OPTIMIZER_WD_ONLY_ON_WEIGHTS = True # for multi-gpu training please make it False
58
+ START_WITH_EVAL = False # if True it will star with evaluation
59
+ BATCH_SIZE = batch_size # set here the batch size
60
+ GRAD_ACUMM_STEPS = grad_acumm # set here the grad accumulation steps
61
+
62
+
63
+ # Define here the dataset that you want to use for the fine-tuning on.
64
+ DATASETS_CONFIG_LIST = []
65
+ for metadata in metadatas:
66
+ train_csv, eval_csv, language = metadata.split(",")
67
+ print(train_csv, eval_csv, language)
68
+
69
+ config_dataset = BaseDatasetConfig(
70
+ formatter="coqui",
71
+ dataset_name="ft_dataset",
72
+ path=os.path.dirname(train_csv),
73
+ meta_file_train=os.path.basename(train_csv),
74
+ meta_file_val=os.path.basename(eval_csv),
75
+ language=language,
76
+ )
77
+
78
+ DATASETS_CONFIG_LIST.append(config_dataset)
79
+
80
+ # Define the path where XTTS v2.0.1 files will be downloaded
81
+ CHECKPOINTS_OUT_PATH = os.path.join(OUT_PATH, "XTTS_v2.0_original_model_files/")
82
+ os.makedirs(CHECKPOINTS_OUT_PATH, exist_ok=True)
83
+
84
+ # DVAE files
85
+ DVAE_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/dvae.pth"
86
+ MEL_NORM_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/mel_stats.pth"
87
+
88
+ # Set the path to the downloaded files
89
+ DVAE_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(DVAE_CHECKPOINT_LINK))
90
+ MEL_NORM_FILE = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(MEL_NORM_LINK))
91
+
92
+ # download DVAE files if needed
93
+ if not os.path.isfile(DVAE_CHECKPOINT) or not os.path.isfile(MEL_NORM_FILE):
94
+ print(" > Downloading DVAE files!")
95
+ ModelManager._download_model_files([MEL_NORM_LINK, DVAE_CHECKPOINT_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True)
96
+
97
+
98
+ # Download XTTS v2.0 checkpoint if needed
99
+ TOKENIZER_FILE_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/vocab.json"
100
+ XTTS_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/model.pth"
101
+ XTTS_CONFIG_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/config.json"
102
+
103
+ # XTTS transfer learning parameters: You we need to provide the paths of XTTS model checkpoint that you want to do the fine tuning.
104
+ TOKENIZER_FILE = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(TOKENIZER_FILE_LINK)) # vocab.json file
105
+ XTTS_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(XTTS_CHECKPOINT_LINK)) # model.pth file
106
+ XTTS_CONFIG_FILE = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(XTTS_CONFIG_LINK)) # config.json file
107
+
108
+ # download XTTS v2.0 files if needed
109
+ if not os.path.isfile(TOKENIZER_FILE):
110
+ print(" > Downloading XTTS v2.0 tokenizer!")
111
+ ModelManager._download_model_files(
112
+ [TOKENIZER_FILE_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True
113
+ )
114
+ if not os.path.isfile(XTTS_CHECKPOINT):
115
+ print(" > Downloading XTTS v2.0 checkpoint!")
116
+ ModelManager._download_model_files(
117
+ [XTTS_CHECKPOINT_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True
118
+ )
119
+ if not os.path.isfile(XTTS_CONFIG_FILE):
120
+ print(" > Downloading XTTS v2.0 config!")
121
+ ModelManager._download_model_files(
122
+ [XTTS_CONFIG_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True
123
+ )
124
+
125
+ # init args and config
126
+ model_args = GPTArgs(
127
+ max_conditioning_length=264600, # 12 secs
128
+ min_conditioning_length=88200, # 4 secs
129
+ debug_loading_failures=False,
130
+ max_wav_length=max_audio_length, # ~11.6 seconds
131
+ max_text_length=max_text_length,
132
+ mel_norm_file=MEL_NORM_FILE,
133
+ dvae_checkpoint=DVAE_CHECKPOINT,
134
+ xtts_checkpoint=XTTS_CHECKPOINT, # checkpoint path of the model that you want to fine-tune
135
+ tokenizer_file=TOKENIZER_FILE,
136
+ gpt_num_audio_tokens=1026,
137
+ gpt_start_audio_token=1024,
138
+ gpt_stop_audio_token=1025,
139
+ gpt_use_masking_gt_prompt_approach=True,
140
+ gpt_use_perceiver_resampler=True,
141
+ )
142
+ # define audio config
143
+ audio_config = XttsAudioConfig(sample_rate=22050, dvae_sample_rate=22050, output_sample_rate=24000)
144
+ # training parameters config
145
+
146
+ config = GPTTrainerConfig()
147
+
148
+ config.load_json(XTTS_CONFIG_FILE)
149
+
150
+ config.epochs = num_epochs
151
+ config.output_path = OUT_PATH
152
+ config.model_args = model_args
153
+ config.run_name = RUN_NAME
154
+ config.project_name = PROJECT_NAME
155
+ config.run_description = """
156
+ GPT XTTS training
157
+ """,
158
+ config.dashboard_logger = DASHBOARD_LOGGER
159
+ config.logger_uri = LOGGER_URI
160
+ config.audio = audio_config
161
+ config.batch_size = BATCH_SIZE
162
+ config.num_loader_workers = 4
163
+ config.eval_split_max_size = 256
164
+ config.print_step = 50
165
+ config.plot_step = 100
166
+ config.log_model_step = 100
167
+ config.save_step = save_step
168
+ config.save_n_checkpoints = 1
169
+ config.save_checkpoints = True
170
+ config.print_eval = False
171
+ config.optimizer = "AdamW"
172
+ config.optimizer_wd_only_on_weights = OPTIMIZER_WD_ONLY_ON_WEIGHTS
173
+ config.optimizer_params = {"betas": [0.9, 0.96], "eps": 1e-8, "weight_decay": weight_decay}
174
+ config.lr = lr
175
+ config.lr_scheduler = "MultiStepLR"
176
+ config.lr_scheduler_params = {"milestones": [
177
+ save_step * 3, save_step * 3 * 2, save_step * 3 * 3], "gamma": 0.5, "last_epoch": -1}
178
+ config.test_sentences = []
179
+
180
+ # init the model from config
181
+ model = GPTTrainer.init_from_config(config)
182
+
183
+ # load training samples
184
+ train_samples, eval_samples = load_tts_samples(
185
+ DATASETS_CONFIG_LIST,
186
+ eval_split=True,
187
+ eval_split_max_size=config.eval_split_max_size,
188
+ eval_split_size=config.eval_split_size,
189
+ )
190
+
191
+ # init the trainer and 🚀
192
+ trainer = Trainer(
193
+ TrainerArgs(
194
+ restore_path=None, # xtts checkpoint is restored via xtts_checkpoint key so no need of restore it using Trainer restore_path parameter
195
+ skip_train_epoch=False,
196
+ start_with_eval=START_WITH_EVAL,
197
+ grad_accum_steps=GRAD_ACUMM_STEPS
198
+ ),
199
+ config,
200
+ #output_path=os.path.join(output_path, "run", "training"),
201
+ output_path=os.path.join(output_path),
202
+ model=model,
203
+ train_samples=train_samples,
204
+ eval_samples=eval_samples,
205
+ )
206
+ trainer.fit()
207
+
208
+ # get the longest text audio file to use as speaker reference
209
+ samples_len = [len(item["text"].split(" ")) for item in train_samples]
210
+ longest_text_idx = samples_len.index(max(samples_len))
211
+ speaker_ref = train_samples[longest_text_idx]["audio_file"]
212
+
213
+ trainer_out_path = trainer.output_path
214
+
215
+ # deallocate VRAM and RAM
216
+ del model, trainer, train_samples, eval_samples
217
+ gc.collect()
218
+
219
+ return trainer_out_path
220
+
221
+ if __name__ == "__main__":
222
+ parser = create_xtts_trainer_parser()
223
+ args = parser.parse_args()
224
+
225
+ trainer_out_path = train_gpt(
226
+ metadatas=args.metadatas,
227
+ output_path=args.output_path,
228
+ num_epochs=args.num_epochs,
229
+ batch_size=args.batch_size,
230
+ grad_acumm=args.grad_acumm,
231
+ weight_decay=args.weight_decay,
232
+ lr=args.lr,
233
+ max_text_length=args.max_text_length,
234
+ max_audio_length=args.max_audio_length,
235
+ save_step=args.save_step
236
+ )
237
+
238
+ print(f"Checkpoint saved in dir: {trainer_out_path}")
trainer_0_log.txt ADDED
The diff for this file is too large to render. See raw diff
 
vocab.json ADDED
The diff for this file is too large to render. See raw diff