|
--- |
|
tags: |
|
- espnet |
|
- audio |
|
- text-to-speech |
|
language: en |
|
datasets: |
|
- talromur |
|
license: cc-by-4.0 |
|
--- |
|
|
|
## ESPnet2 TTS model |
|
|
|
### `language-and-voice-lab/talromur_d_loudnorm_xvector_finetune_fastspeech2` |
|
|
|
This model was trained by G-Thor using talromur recipe in [espnet](https://github.com/espnet/espnet/). |
|
|
|
### Demo: How to use in ESPnet2 |
|
|
|
Follow the [ESPnet installation instructions](https://espnet.github.io/espnet/installation.html) |
|
if you haven't done that already. |
|
|
|
```bash |
|
cd espnet |
|
git checkout d0047402e830a3c53e8b590064af4bf70415fb3b |
|
pip install -e . |
|
cd egs2/talromur/tts1 |
|
./run.sh --skip_data_prep false --skip_train true --download_model language-and-voice-lab/talromur_d_loudnorm_xvector_finetune_fastspeech2 |
|
``` |
|
|
|
|
|
|
|
## TTS config |
|
|
|
<details><summary>expand</summary> |
|
|
|
``` |
|
config: ./conf/tuning/finetune_xvector_fastspeech2.yaml |
|
print_config: false |
|
log_level: INFO |
|
drop_last_iter: false |
|
dry_run: false |
|
iterator_type: sequence |
|
valid_iterator_type: null |
|
output_dir: exp/tts_finetune_d_loudnorm_xvector_fastspeech2 |
|
ngpu: 1 |
|
seed: 0 |
|
num_workers: 1 |
|
num_att_plot: 3 |
|
dist_backend: nccl |
|
dist_init_method: env:// |
|
dist_world_size: null |
|
dist_rank: null |
|
local_rank: 0 |
|
dist_master_addr: null |
|
dist_master_port: null |
|
dist_launcher: null |
|
multiprocessing_distributed: false |
|
unused_parameters: false |
|
sharded_ddp: false |
|
cudnn_enabled: true |
|
cudnn_benchmark: false |
|
cudnn_deterministic: true |
|
collect_stats: false |
|
write_collected_feats: false |
|
max_epoch: 50 |
|
patience: null |
|
val_scheduler_criterion: |
|
- valid |
|
- loss |
|
early_stopping_criterion: |
|
- valid |
|
- loss |
|
- min |
|
best_model_criterion: |
|
- - valid |
|
- loss |
|
- min |
|
- - train |
|
- loss |
|
- min |
|
keep_nbest_models: 5 |
|
nbest_averaging_interval: 0 |
|
grad_clip: 1.0 |
|
grad_clip_type: 2.0 |
|
grad_noise: false |
|
accum_grad: 8 |
|
no_forward_run: false |
|
resume: true |
|
train_dtype: float32 |
|
use_amp: false |
|
log_interval: null |
|
use_matplotlib: true |
|
use_tensorboard: true |
|
create_graph_in_tensorboard: false |
|
use_wandb: false |
|
wandb_project: null |
|
wandb_id: null |
|
wandb_entity: null |
|
wandb_name: null |
|
wandb_model_log_interval: -1 |
|
detect_anomaly: false |
|
use_adapter: false |
|
adapter: lora |
|
save_strategy: all |
|
adapter_conf: {} |
|
pretrain_path: null |
|
init_param: |
|
- /users/home/gunnaro/talromur_1and2_spk_avg_xvector_fastspeech2/exp/tts_xvector_fastspeech2_spk_avg_combined/valid.loss.ave_5best.pth:tts:tts |
|
ignore_init_mismatch: false |
|
freeze_param: [] |
|
num_iters_per_epoch: 800 |
|
batch_size: 20 |
|
valid_batch_size: null |
|
batch_bins: 4500000 |
|
valid_batch_bins: null |
|
train_shape_file: |
|
- exp/tts_stats_d/train/text_shape.phn |
|
- exp/tts_stats_d/train/speech_shape |
|
valid_shape_file: |
|
- exp/tts_stats_d/valid/text_shape.phn |
|
- exp/tts_stats_d/valid/speech_shape |
|
batch_type: numel |
|
valid_batch_type: null |
|
fold_length: |
|
- 150 |
|
- 204800 |
|
sort_in_batch: descending |
|
shuffle_within_batch: false |
|
sort_batch: descending |
|
multiple_iterator: false |
|
chunk_length: 500 |
|
chunk_shift_ratio: 0.5 |
|
num_cache_chunks: 1024 |
|
chunk_excluded_key_prefixes: [] |
|
chunk_default_fs: null |
|
train_data_path_and_name_and_type: |
|
- - dump/raw/train_d/text |
|
- text |
|
- text |
|
- - data/train_d/durations |
|
- durations |
|
- text_int |
|
- - dump/raw/train_d/wav.scp |
|
- speech |
|
- sound |
|
- - dump/xvector/train_d/xvector.scp |
|
- spembs |
|
- kaldi_ark |
|
valid_data_path_and_name_and_type: |
|
- - dump/raw/dev_d/text |
|
- text |
|
- text |
|
- - data/dev_d/durations |
|
- durations |
|
- text_int |
|
- - dump/raw/dev_d/wav.scp |
|
- speech |
|
- sound |
|
- - dump/xvector/dev_d/xvector.scp |
|
- spembs |
|
- kaldi_ark |
|
allow_variable_data_keys: false |
|
max_cache_size: 0.0 |
|
max_cache_fd: 32 |
|
allow_multi_rates: false |
|
valid_max_cache_size: null |
|
exclude_weight_decay: false |
|
exclude_weight_decay_conf: {} |
|
optim: adam |
|
optim_conf: |
|
lr: 0.1 |
|
scheduler: noamlr |
|
scheduler_conf: |
|
model_size: 384 |
|
warmup_steps: 4000 |
|
token_list: |
|
- <blank> |
|
- <unk> |
|
- a |
|
- r |
|
- sil |
|
- I |
|
- t |
|
- n |
|
- s |
|
- D |
|
- Y |
|
- E |
|
- l |
|
- v |
|
- m |
|
- h |
|
- k |
|
- j |
|
- G |
|
- T |
|
- f |
|
- p |
|
- 'E:' |
|
- c |
|
- i |
|
- 'au:' |
|
- 'O:' |
|
- 'a:' |
|
- ei |
|
- 'i:' |
|
- r_0 |
|
- t_h |
|
- O |
|
- k_h |
|
- ou |
|
- ai |
|
- '9' |
|
- au |
|
- 'I:' |
|
- 'ou:' |
|
- u |
|
- 'ei:' |
|
- N |
|
- l_0 |
|
- 'u:' |
|
- n_0 |
|
- '9:' |
|
- 'ai:' |
|
- 9i |
|
- c_h |
|
- p_h |
|
- x |
|
- C |
|
- '9i:' |
|
- 'Y:' |
|
- J |
|
- N_0 |
|
- m_0 |
|
- Oi |
|
- Yi |
|
- J_0 |
|
- spn |
|
- '1' |
|
- '7' |
|
- <sos/eos> |
|
odim: null |
|
model_conf: {} |
|
use_preprocessor: true |
|
token_type: phn |
|
bpemodel: null |
|
non_linguistic_symbols: null |
|
cleaner: null |
|
g2p: null |
|
feats_extract: fbank |
|
feats_extract_conf: |
|
n_fft: 1024 |
|
hop_length: 256 |
|
win_length: null |
|
fs: 22050 |
|
fmin: 80 |
|
fmax: 7600 |
|
n_mels: 80 |
|
normalize: global_mvn |
|
normalize_conf: |
|
stats_file: exp/tts_stats_d/train/feats_stats.npz |
|
tts: fastspeech2 |
|
tts_conf: |
|
adim: 384 |
|
aheads: 2 |
|
elayers: 4 |
|
eunits: 1536 |
|
dlayers: 4 |
|
dunits: 1536 |
|
positionwise_layer_type: conv1d |
|
positionwise_conv_kernel_size: 3 |
|
duration_predictor_layers: 2 |
|
duration_predictor_chans: 256 |
|
duration_predictor_kernel_size: 3 |
|
postnet_layers: 5 |
|
postnet_filts: 5 |
|
postnet_chans: 256 |
|
use_masking: true |
|
use_scaled_pos_enc: true |
|
encoder_normalize_before: true |
|
decoder_normalize_before: true |
|
reduction_factor: 1 |
|
init_type: xavier_uniform |
|
init_enc_alpha: 1.0 |
|
init_dec_alpha: 1.0 |
|
transformer_enc_dropout_rate: 0.2 |
|
transformer_enc_positional_dropout_rate: 0.2 |
|
transformer_enc_attn_dropout_rate: 0.2 |
|
transformer_dec_dropout_rate: 0.2 |
|
transformer_dec_positional_dropout_rate: 0.2 |
|
transformer_dec_attn_dropout_rate: 0.2 |
|
pitch_predictor_layers: 5 |
|
pitch_predictor_chans: 256 |
|
pitch_predictor_kernel_size: 5 |
|
pitch_predictor_dropout: 0.5 |
|
pitch_embed_kernel_size: 1 |
|
pitch_embed_dropout: 0.0 |
|
stop_gradient_from_pitch_predictor: true |
|
energy_predictor_layers: 2 |
|
energy_predictor_chans: 256 |
|
energy_predictor_kernel_size: 3 |
|
energy_predictor_dropout: 0.5 |
|
energy_embed_kernel_size: 1 |
|
energy_embed_dropout: 0.0 |
|
stop_gradient_from_energy_predictor: false |
|
spk_embed_dim: 512 |
|
spk_embed_integration_type: add |
|
pitch_extract: dio |
|
pitch_extract_conf: |
|
fs: 22050 |
|
n_fft: 1024 |
|
hop_length: 256 |
|
f0max: 400 |
|
f0min: 80 |
|
reduction_factor: 1 |
|
pitch_normalize: global_mvn |
|
pitch_normalize_conf: |
|
stats_file: exp/tts_stats_d/train/pitch_stats.npz |
|
energy_extract: energy |
|
energy_extract_conf: |
|
fs: 22050 |
|
n_fft: 1024 |
|
hop_length: 256 |
|
win_length: null |
|
reduction_factor: 1 |
|
energy_normalize: global_mvn |
|
energy_normalize_conf: |
|
stats_file: exp/tts_stats_d/train/energy_stats.npz |
|
required: |
|
- output_dir |
|
- token_list |
|
version: '202402' |
|
distributed: false |
|
``` |
|
|
|
</details> |
|
|
|
|
|
|
|
### Citing ESPnet |
|
|
|
```BibTex |
|
@inproceedings{watanabe2018espnet, |
|
author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai}, |
|
title={{ESPnet}: End-to-End Speech Processing Toolkit}, |
|
year={2018}, |
|
booktitle={Proceedings of Interspeech}, |
|
pages={2207--2211}, |
|
doi={10.21437/Interspeech.2018-1456}, |
|
url={http://dx.doi.org/10.21437/Interspeech.2018-1456} |
|
} |
|
|
|
|
|
|
|
|
|
@inproceedings{hayashi2020espnet, |
|
title={{Espnet-TTS}: Unified, reproducible, and integratable open source end-to-end text-to-speech toolkit}, |
|
author={Hayashi, Tomoki and Yamamoto, Ryuichi and Inoue, Katsuki and Yoshimura, Takenori and Watanabe, Shinji and Toda, Tomoki and Takeda, Kazuya and Zhang, Yu and Tan, Xu}, |
|
booktitle={Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, |
|
pages={7654--7658}, |
|
year={2020}, |
|
organization={IEEE} |
|
} |
|
|
|
|
|
``` |
|
|
|
or arXiv: |
|
|
|
```bibtex |
|
@misc{watanabe2018espnet, |
|
title={ESPnet: End-to-End Speech Processing Toolkit}, |
|
author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai}, |
|
year={2018}, |
|
eprint={1804.00015}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL} |
|
} |
|
``` |
|
|