Spaces:
Running
on
Zero
Running
on
Zero
Sync from GitHub repo
Browse filesThis Space is synced from the GitHub repo: https://github.com/SWivid/F5-TTS. Please submit contributions to the Space there
src/f5_tts/configs/F5TTS_Small.yaml
CHANGED
@@ -10,7 +10,7 @@ datasets:
|
|
10 |
num_workers: 16
|
11 |
|
12 |
optim:
|
13 |
-
epochs: 11
|
14 |
learning_rate: 7.5e-5
|
15 |
num_warmup_updates: 20000 # warmup updates
|
16 |
grad_accumulation_steps: 1 # note: updates = steps / grad_accumulation_steps
|
@@ -49,4 +49,4 @@ ckpts:
|
|
49 |
save_per_updates: 50000 # save checkpoint per updates
|
50 |
keep_last_n_checkpoints: -1 # -1 to keep all, 0 to not save intermediate, > 0 to keep last N checkpoints
|
51 |
last_per_updates: 5000 # save last checkpoint per updates
|
52 |
-
save_dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}
|
|
|
10 |
num_workers: 16
|
11 |
|
12 |
optim:
|
13 |
+
epochs: 11 # only suitable for Emilia, if you want to train it on LibriTTS, set epoch 686
|
14 |
learning_rate: 7.5e-5
|
15 |
num_warmup_updates: 20000 # warmup updates
|
16 |
grad_accumulation_steps: 1 # note: updates = steps / grad_accumulation_steps
|
|
|
49 |
save_per_updates: 50000 # save checkpoint per updates
|
50 |
keep_last_n_checkpoints: -1 # -1 to keep all, 0 to not save intermediate, > 0 to keep last N checkpoints
|
51 |
last_per_updates: 5000 # save last checkpoint per updates
|
52 |
+
save_dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}
|