Siddhant's picture
import from zenodo
f7f1668
config: conf/tuning/train_asr_transformer.yaml
print_config: false
log_level: INFO
dry_run: false
iterator_type: sequence
output_dir: exp/asr_transformer
ngpu: 1
seed: 0
num_workers: 1
num_att_plot: 3
dist_backend: nccl
dist_init_method: env://
dist_world_size: null
dist_rank: null
local_rank: 0
dist_master_addr: null
dist_master_port: null
dist_launcher: null
multiprocessing_distributed: false
cudnn_enabled: true
cudnn_benchmark: false
cudnn_deterministic: true
collect_stats: false
write_collected_feats: false
max_epoch: 100
patience: 0
val_scheduler_criterion:
- valid
- loss
early_stopping_criterion:
- valid
- loss
- min
best_model_criterion:
- - valid
- acc
- max
keep_nbest_models: 10
grad_clip: 5
grad_clip_type: 2.0
grad_noise: false
accum_grad: 2
no_forward_run: false
resume: true
train_dtype: float32
use_amp: false
log_interval: null
unused_parameters: false
use_tensorboard: true
use_wandb: false
wandb_project: null
wandb_id: null
pretrain_path: null
init_param: []
num_iters_per_epoch: null
batch_size: 32
valid_batch_size: null
batch_bins: 1000000
valid_batch_bins: null
train_shape_file:
- exp/asr_stats_raw_bpe150/train/speech_shape
- exp/asr_stats_raw_bpe150/train/text_shape.bpe
valid_shape_file:
- exp/asr_stats_raw_bpe150/valid/speech_shape
- exp/asr_stats_raw_bpe150/valid/text_shape.bpe
batch_type: folded
valid_batch_type: null
fold_length:
- 80000
- 150
sort_in_batch: descending
sort_batch: descending
multiple_iterator: false
chunk_length: 500
chunk_shift_ratio: 0.5
num_cache_chunks: 1024
train_data_path_and_name_and_type:
- - dump/raw/es_train/wav.scp
- speech
- sound
- - dump/raw/es_train/text
- text
- text
valid_data_path_and_name_and_type:
- - dump/raw/es_dev/wav.scp
- speech
- sound
- - dump/raw/es_dev/text
- text
- text
allow_variable_data_keys: false
max_cache_size: 0.0
max_cache_fd: 32
valid_max_cache_size: null
optim: adam
optim_conf:
lr: 1.0
scheduler: noamlr
scheduler_conf:
warmup_steps: 25000
token_list:
- <blank>
- <unk>
-
- s
- n
- r
- o
- a
- ▁de
- e
- l
- ▁a
- u
- ▁y
- ▁que
- ra
- ta
- do
- ▁la
- i
- ▁en
- re
- to
- ▁el
- d
- p
- da
- la
- c
- b
- t
- ro
- ó
- en
- ri
- g
- ba
- ▁se
- os
- er
- te
- ▁con
- ci
- ▁es
- es
- ▁no
- ▁su
- h
- ti
- é
- mo
- á
- ▁ca
- ▁ha
- na
- ▁los
- lo
- í
- ía
- de
- me
- ca
- ▁al
- le
- ce
- v
- ma
- nte
- ▁di
- ▁ma
- ▁por
- y
- di
- m
- ▁pa
- sa
- ▁si
- ▁pe
- gu
- z
- ▁mi
- ▁co
- ▁me
- ▁o
- ▁e
- ▁un
- tra
- ▁re
- li
- ▁f
- co
- ▁á
- ndo
- se
- mi
- ga
- ni
- ▁cu
- ▁le
- jo
- ▁ve
- mp
- bi
- f
- va
- ▁mu
- go
- ▁so
- ñ
- tu
- si
- ▁lo
- ▁pu
- ▁vi
- ▁b
- ▁las
- ▁c
- ▁sa
- za
- ▁del
- ▁po
- ▁in
- vi
- ▁te
- tro
- cia
- ▁una
- qui
- pi
- que
- ja
- pa
- ▁para
- cu
- pe
- ▁como
- ▁esta
- ve
- je
- lle
- x
- ú
- j
- q
- ''''
- k
- w
- ü
- '-'
- <sos/eos>
init: chainer
input_size: null
ctc_conf:
dropout_rate: 0.0
ctc_type: builtin
reduce: true
ignore_nan_grad: false
model_conf:
ctc_weight: 0.3
lsm_weight: 0.1
length_normalized_loss: false
use_preprocessor: true
token_type: bpe
bpemodel: data/token_list/bpe_unigram150/bpe.model
non_linguistic_symbols: null
cleaner: null
g2p: null
frontend: default
frontend_conf:
fs: 16k
specaug: null
specaug_conf: {}
normalize: global_mvn
normalize_conf:
stats_file: exp/asr_stats_raw_bpe150/train/feats_stats.npz
preencoder: null
preencoder_conf: {}
encoder: transformer
encoder_conf:
input_layer: conv2d
num_blocks: 12
linear_units: 2048
dropout_rate: 0.1
output_size: 256
attention_heads: 4
attention_dropout_rate: 0.0
decoder: transformer
decoder_conf:
input_layer: embed
num_blocks: 6
linear_units: 2048
dropout_rate: 0.1
required:
- output_dir
- token_list
distributed: false