ftshijt commited on
Commit
f311193
1 Parent(s): 6457367

Update model

Browse files
Files changed (29) hide show
  1. README.md +343 -0
  2. exp/codec_train_dac_large_v1.4_raw_fs16000/120epoch.pth +3 -0
  3. exp/codec_train_dac_large_v1.4_raw_fs16000/config.yaml +268 -0
  4. exp/codec_train_dac_large_v1.4_raw_fs16000/images/adv_loss.png +0 -0
  5. exp/codec_train_dac_large_v1.4_raw_fs16000/images/codec_commit_loss.png +0 -0
  6. exp/codec_train_dac_large_v1.4_raw_fs16000/images/codec_loss.png +0 -0
  7. exp/codec_train_dac_large_v1.4_raw_fs16000/images/codec_quantization_loss.png +0 -0
  8. exp/codec_train_dac_large_v1.4_raw_fs16000/images/discriminator_backward_time.png +0 -0
  9. exp/codec_train_dac_large_v1.4_raw_fs16000/images/discriminator_forward_time.png +0 -0
  10. exp/codec_train_dac_large_v1.4_raw_fs16000/images/discriminator_loss.png +0 -0
  11. exp/codec_train_dac_large_v1.4_raw_fs16000/images/discriminator_optim_step_time.png +0 -0
  12. exp/codec_train_dac_large_v1.4_raw_fs16000/images/discriminator_train_time.png +0 -0
  13. exp/codec_train_dac_large_v1.4_raw_fs16000/images/fake_loss.png +0 -0
  14. exp/codec_train_dac_large_v1.4_raw_fs16000/images/feat_match_loss.png +0 -0
  15. exp/codec_train_dac_large_v1.4_raw_fs16000/images/generator_backward_time.png +0 -0
  16. exp/codec_train_dac_large_v1.4_raw_fs16000/images/generator_forward_time.png +0 -0
  17. exp/codec_train_dac_large_v1.4_raw_fs16000/images/generator_optim_step_time.png +0 -0
  18. exp/codec_train_dac_large_v1.4_raw_fs16000/images/generator_train_time.png +0 -0
  19. exp/codec_train_dac_large_v1.4_raw_fs16000/images/gpu_max_cached_mem_GB.png +0 -0
  20. exp/codec_train_dac_large_v1.4_raw_fs16000/images/iter_time.png +0 -0
  21. exp/codec_train_dac_large_v1.4_raw_fs16000/images/loss.png +0 -0
  22. exp/codec_train_dac_large_v1.4_raw_fs16000/images/mel_loss.png +0 -0
  23. exp/codec_train_dac_large_v1.4_raw_fs16000/images/mel_loss_real.png +0 -0
  24. exp/codec_train_dac_large_v1.4_raw_fs16000/images/optim0_lr0.png +0 -0
  25. exp/codec_train_dac_large_v1.4_raw_fs16000/images/optim1_lr0.png +0 -0
  26. exp/codec_train_dac_large_v1.4_raw_fs16000/images/real_loss.png +0 -0
  27. exp/codec_train_dac_large_v1.4_raw_fs16000/images/reconstruct_loss.png +0 -0
  28. exp/codec_train_dac_large_v1.4_raw_fs16000/images/train_time.png +0 -0
  29. meta.yaml +8 -0
README.md ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - espnet
4
+ - audio
5
+ - codec
6
+ language: multilingual
7
+ datasets:
8
+ - amuse
9
+ license: cc-by-4.0
10
+ ---
11
+
12
+ ## ESPnet2 Codec model
13
+
14
+ ### `ftshijt/espnet_codec_dac_large_v1.4_120epoch`
15
+
16
+ This model was trained by ftshijt using amuse recipe in [espnet](https://github.com/espnet/espnet/).
17
+
18
+ ### Demo: How to use in ESPnet2
19
+
20
+ Follow the [ESPnet installation instructions](https://espnet.github.io/espnet/installation.html)
21
+ if you haven't done that already.
22
+
23
+ ```bash
24
+ cd espnet
25
+ git checkout 9baec3a7b10b784cb721849e19caed19e8ac45bc
26
+ pip install -e .
27
+ cd egs2/amuse/codec1
28
+ ./run.sh --skip_data_prep false --skip_train true --download_model ftshijt/espnet_codec_dac_large_v1.4_120epoch
29
+ ```
30
+
31
+
32
+
33
+ ## Codec config
34
+
35
+ <details><summary>expand</summary>
36
+
37
+ ```
38
+ config: conf/train_dac_large_v1.4.yaml
39
+ print_config: false
40
+ log_level: INFO
41
+ drop_last_iter: false
42
+ dry_run: false
43
+ iterator_type: chunk
44
+ valid_iterator_type: null
45
+ output_dir: exp/codec_train_dac_large_v1.4_raw_fs16000
46
+ ngpu: 1
47
+ seed: 777
48
+ num_workers: 1
49
+ num_att_plot: 0
50
+ dist_backend: nccl
51
+ dist_init_method: env://
52
+ dist_world_size: 2
53
+ dist_rank: 0
54
+ local_rank: 0
55
+ dist_master_addr: localhost
56
+ dist_master_port: 49719
57
+ dist_launcher: null
58
+ multiprocessing_distributed: true
59
+ unused_parameters: true
60
+ sharded_ddp: false
61
+ use_deepspeed: false
62
+ deepspeed_config: null
63
+ cudnn_enabled: true
64
+ cudnn_benchmark: false
65
+ cudnn_deterministic: false
66
+ use_tf32: false
67
+ collect_stats: false
68
+ write_collected_feats: false
69
+ max_epoch: 120
70
+ patience: null
71
+ val_scheduler_criterion:
72
+ - valid
73
+ - loss
74
+ early_stopping_criterion:
75
+ - valid
76
+ - loss
77
+ - min
78
+ best_model_criterion:
79
+ - - valid
80
+ - mel_loss
81
+ - min
82
+ - - train
83
+ - mel_loss
84
+ - min
85
+ - - train
86
+ - total_count
87
+ - max
88
+ keep_nbest_models: 5
89
+ nbest_averaging_interval: 0
90
+ grad_clip: -1
91
+ grad_clip_type: 2.0
92
+ grad_noise: false
93
+ accum_grad: 1
94
+ no_forward_run: false
95
+ resume: true
96
+ train_dtype: float32
97
+ use_amp: false
98
+ log_interval: 50
99
+ use_matplotlib: true
100
+ use_tensorboard: true
101
+ create_graph_in_tensorboard: false
102
+ use_wandb: false
103
+ wandb_project: null
104
+ wandb_id: null
105
+ wandb_entity: null
106
+ wandb_name: null
107
+ wandb_model_log_interval: -1
108
+ detect_anomaly: false
109
+ use_adapter: false
110
+ adapter: lora
111
+ save_strategy: all
112
+ adapter_conf: {}
113
+ pretrain_path: null
114
+ init_param: []
115
+ ignore_init_mismatch: false
116
+ freeze_param: []
117
+ num_iters_per_epoch: 5000
118
+ batch_size: 64
119
+ valid_batch_size: null
120
+ batch_bins: 1000000
121
+ valid_batch_bins: null
122
+ category_sample_size: 10
123
+ train_shape_file:
124
+ - exp/codec_stats_raw/train/audio_shape
125
+ valid_shape_file:
126
+ - exp/codec_stats_raw/valid/audio_shape
127
+ batch_type: unsorted
128
+ valid_batch_type: null
129
+ fold_length:
130
+ - 256000
131
+ sort_in_batch: descending
132
+ shuffle_within_batch: false
133
+ sort_batch: descending
134
+ multiple_iterator: false
135
+ chunk_length: 32000
136
+ chunk_shift_ratio: 0.5
137
+ num_cache_chunks: 256
138
+ chunk_excluded_key_prefixes: []
139
+ chunk_default_fs: null
140
+ chunk_max_abs_length: null
141
+ chunk_discard_short_samples: true
142
+ train_data_path_and_name_and_type:
143
+ - - dump/raw/owsm_all_temp/wav.scp
144
+ - audio
145
+ - kaldi_ark
146
+ valid_data_path_and_name_and_type:
147
+ - - dump/raw/dev-small/wav.scp
148
+ - audio
149
+ - kaldi_ark
150
+ multi_task_dataset: false
151
+ allow_variable_data_keys: false
152
+ max_cache_size: 0.0
153
+ max_cache_fd: 32
154
+ allow_multi_rates: false
155
+ valid_max_cache_size: null
156
+ exclude_weight_decay: false
157
+ exclude_weight_decay_conf: {}
158
+ optim: adamw
159
+ optim_conf:
160
+ lr: 0.0002
161
+ betas:
162
+ - 0.5
163
+ - 0.9
164
+ eps: 1.0e-09
165
+ weight_decay: 0.0
166
+ scheduler: exponentiallr
167
+ scheduler_conf:
168
+ gamma: 0.999875
169
+ optim2: adamw
170
+ optim2_conf:
171
+ lr: 0.0002
172
+ betas:
173
+ - 0.5
174
+ - 0.9
175
+ eps: 1.0e-09
176
+ weight_decay: 0.0
177
+ scheduler2: exponentiallr
178
+ scheduler2_conf:
179
+ gamma: 0.999875
180
+ generator_first: true
181
+ skip_discriminator_prob: 0.0
182
+ model_conf: {}
183
+ use_preprocessor: true
184
+ codec: dac
185
+ codec_conf:
186
+ sampling_rate: 16000
187
+ generator_params:
188
+ hidden_dim: 512
189
+ codebook_dim: 512
190
+ encdec_channels: 1
191
+ encdec_n_filters: 32
192
+ encdec_n_residual_layers: 3
193
+ encdec_ratios:
194
+ - 8
195
+ - 5
196
+ - 4
197
+ - 2
198
+ encdec_activation: Snake
199
+ encdec_norm: weight_norm
200
+ encdec_kernel_size: 7
201
+ encdec_residual_kernel_size: 7
202
+ encdec_last_kernel_size: 7
203
+ encdec_dilation_base: 2
204
+ encdec_causal: false
205
+ encdec_pad_mode: reflect
206
+ encdec_true_skip: false
207
+ encdec_compress: 2
208
+ encdec_lstm: 2
209
+ decoder_trim_right_ratio: 1.0
210
+ decoder_final_activation: null
211
+ decoder_final_activation_params: null
212
+ quantizer_n_q: 8
213
+ quantizer_bins: 1024
214
+ quantizer_decay: 0.99
215
+ quantizer_kmeans_init: true
216
+ quantizer_kmeans_iters: 50
217
+ quantizer_threshold_ema_dead_code: 2
218
+ quantizer_target_bandwidth:
219
+ - 0.5
220
+ - 1
221
+ - 2
222
+ - 4
223
+ quantizer_dropout: true
224
+ sample_rate: 16000
225
+ discriminator_params:
226
+ msmpmb_discriminator_params:
227
+ rates: []
228
+ sample_rate: 24000
229
+ fft_sizes:
230
+ - 2048
231
+ - 1024
232
+ - 512
233
+ periods:
234
+ - 2
235
+ - 3
236
+ - 5
237
+ - 7
238
+ - 11
239
+ period_discriminator_params:
240
+ in_channels: 1
241
+ out_channels: 1
242
+ kernel_sizes:
243
+ - 5
244
+ - 3
245
+ channels: 32
246
+ downsample_scales:
247
+ - 3
248
+ - 3
249
+ - 3
250
+ - 3
251
+ - 1
252
+ max_downsample_channels: 1024
253
+ bias: true
254
+ nonlinear_activation: LeakyReLU
255
+ nonlinear_activation_params:
256
+ negative_slope: 0.1
257
+ use_weight_norm: true
258
+ use_spectral_norm: false
259
+ band_discriminator_params:
260
+ hop_factor: 0.25
261
+ sample_rate: 24000
262
+ bands:
263
+ - - 0.0
264
+ - 0.1
265
+ - - 0.1
266
+ - 0.25
267
+ - - 0.25
268
+ - 0.5
269
+ - - 0.5
270
+ - 0.75
271
+ - - 0.75
272
+ - 1.0
273
+ channel: 32
274
+ generator_adv_loss_params:
275
+ average_by_discriminators: false
276
+ loss_type: mse
277
+ discriminator_adv_loss_params:
278
+ average_by_discriminators: false
279
+ loss_type: mse
280
+ use_feat_match_loss: true
281
+ feat_match_loss_params:
282
+ average_by_discriminators: false
283
+ average_by_layers: false
284
+ include_final_outputs: true
285
+ use_mel_loss: true
286
+ mel_loss_params:
287
+ range_start: 6
288
+ range_end: 11
289
+ window: hann
290
+ n_mels: 80
291
+ fmin: 0
292
+ fmax: null
293
+ log_base: null
294
+ fs: 16000
295
+ lambda_quantization: 0.25
296
+ lambda_commit: 1.0
297
+ lambda_reconstruct: 1.0
298
+ lambda_adv: 1.0
299
+ lambda_mel: 45.0
300
+ lambda_feat_match: 2.0
301
+ cache_generator_outputs: true
302
+ required:
303
+ - output_dir
304
+ version: '202402'
305
+ distributed: true
306
+ ```
307
+
308
+ </details>
309
+
310
+
311
+
312
+ ### Citing ESPnet
313
+
314
+ ```BibTex
315
+ @inproceedings{watanabe2018espnet,
316
+ author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
317
+ title={{ESPnet}: End-to-End Speech Processing Toolkit},
318
+ year={2018},
319
+ booktitle={Proceedings of Interspeech},
320
+ pages={2207--2211},
321
+ doi={10.21437/Interspeech.2018-1456},
322
+ url={http://dx.doi.org/10.21437/Interspeech.2018-1456}
323
+ }
324
+
325
+
326
+
327
+
328
+
329
+
330
+ ```
331
+
332
+ or arXiv:
333
+
334
+ ```bibtex
335
+ @misc{watanabe2018espnet,
336
+ title={ESPnet: End-to-End Speech Processing Toolkit},
337
+ author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
338
+ year={2018},
339
+ eprint={1804.00015},
340
+ archivePrefix={arXiv},
341
+ primaryClass={cs.CL}
342
+ }
343
+ ```
exp/codec_train_dac_large_v1.4_raw_fs16000/120epoch.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a4d83611032f85150d6d423dd80665ee20d6be67300cd976cb1e1c4efa17348
3
+ size 283100815
exp/codec_train_dac_large_v1.4_raw_fs16000/config.yaml ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ config: conf/train_dac_large_v1.4.yaml
2
+ print_config: false
3
+ log_level: INFO
4
+ drop_last_iter: false
5
+ dry_run: false
6
+ iterator_type: chunk
7
+ valid_iterator_type: null
8
+ output_dir: exp/codec_train_dac_large_v1.4_raw_fs16000
9
+ ngpu: 1
10
+ seed: 777
11
+ num_workers: 1
12
+ num_att_plot: 0
13
+ dist_backend: nccl
14
+ dist_init_method: env://
15
+ dist_world_size: 2
16
+ dist_rank: 0
17
+ local_rank: 0
18
+ dist_master_addr: localhost
19
+ dist_master_port: 49719
20
+ dist_launcher: null
21
+ multiprocessing_distributed: true
22
+ unused_parameters: true
23
+ sharded_ddp: false
24
+ use_deepspeed: false
25
+ deepspeed_config: null
26
+ cudnn_enabled: true
27
+ cudnn_benchmark: false
28
+ cudnn_deterministic: false
29
+ use_tf32: false
30
+ collect_stats: false
31
+ write_collected_feats: false
32
+ max_epoch: 120
33
+ patience: null
34
+ val_scheduler_criterion:
35
+ - valid
36
+ - loss
37
+ early_stopping_criterion:
38
+ - valid
39
+ - loss
40
+ - min
41
+ best_model_criterion:
42
+ - - valid
43
+ - mel_loss
44
+ - min
45
+ - - train
46
+ - mel_loss
47
+ - min
48
+ - - train
49
+ - total_count
50
+ - max
51
+ keep_nbest_models: 5
52
+ nbest_averaging_interval: 0
53
+ grad_clip: -1
54
+ grad_clip_type: 2.0
55
+ grad_noise: false
56
+ accum_grad: 1
57
+ no_forward_run: false
58
+ resume: true
59
+ train_dtype: float32
60
+ use_amp: false
61
+ log_interval: 50
62
+ use_matplotlib: true
63
+ use_tensorboard: true
64
+ create_graph_in_tensorboard: false
65
+ use_wandb: false
66
+ wandb_project: null
67
+ wandb_id: null
68
+ wandb_entity: null
69
+ wandb_name: null
70
+ wandb_model_log_interval: -1
71
+ detect_anomaly: false
72
+ use_adapter: false
73
+ adapter: lora
74
+ save_strategy: all
75
+ adapter_conf: {}
76
+ pretrain_path: null
77
+ init_param: []
78
+ ignore_init_mismatch: false
79
+ freeze_param: []
80
+ num_iters_per_epoch: 5000
81
+ batch_size: 64
82
+ valid_batch_size: null
83
+ batch_bins: 1000000
84
+ valid_batch_bins: null
85
+ category_sample_size: 10
86
+ train_shape_file:
87
+ - exp/codec_stats_raw/train/audio_shape
88
+ valid_shape_file:
89
+ - exp/codec_stats_raw/valid/audio_shape
90
+ batch_type: unsorted
91
+ valid_batch_type: null
92
+ fold_length:
93
+ - 256000
94
+ sort_in_batch: descending
95
+ shuffle_within_batch: false
96
+ sort_batch: descending
97
+ multiple_iterator: false
98
+ chunk_length: 32000
99
+ chunk_shift_ratio: 0.5
100
+ num_cache_chunks: 256
101
+ chunk_excluded_key_prefixes: []
102
+ chunk_default_fs: null
103
+ chunk_max_abs_length: null
104
+ chunk_discard_short_samples: true
105
+ train_data_path_and_name_and_type:
106
+ - - dump/raw/owsm_all_temp/wav.scp
107
+ - audio
108
+ - kaldi_ark
109
+ valid_data_path_and_name_and_type:
110
+ - - dump/raw/dev-small/wav.scp
111
+ - audio
112
+ - kaldi_ark
113
+ multi_task_dataset: false
114
+ allow_variable_data_keys: false
115
+ max_cache_size: 0.0
116
+ max_cache_fd: 32
117
+ allow_multi_rates: false
118
+ valid_max_cache_size: null
119
+ exclude_weight_decay: false
120
+ exclude_weight_decay_conf: {}
121
+ optim: adamw
122
+ optim_conf:
123
+ lr: 0.0002
124
+ betas:
125
+ - 0.5
126
+ - 0.9
127
+ eps: 1.0e-09
128
+ weight_decay: 0.0
129
+ scheduler: exponentiallr
130
+ scheduler_conf:
131
+ gamma: 0.999875
132
+ optim2: adamw
133
+ optim2_conf:
134
+ lr: 0.0002
135
+ betas:
136
+ - 0.5
137
+ - 0.9
138
+ eps: 1.0e-09
139
+ weight_decay: 0.0
140
+ scheduler2: exponentiallr
141
+ scheduler2_conf:
142
+ gamma: 0.999875
143
+ generator_first: true
144
+ skip_discriminator_prob: 0.0
145
+ model_conf: {}
146
+ use_preprocessor: true
147
+ codec: dac
148
+ codec_conf:
149
+ sampling_rate: 16000
150
+ generator_params:
151
+ hidden_dim: 512
152
+ codebook_dim: 512
153
+ encdec_channels: 1
154
+ encdec_n_filters: 32
155
+ encdec_n_residual_layers: 3
156
+ encdec_ratios:
157
+ - 8
158
+ - 5
159
+ - 4
160
+ - 2
161
+ encdec_activation: Snake
162
+ encdec_norm: weight_norm
163
+ encdec_kernel_size: 7
164
+ encdec_residual_kernel_size: 7
165
+ encdec_last_kernel_size: 7
166
+ encdec_dilation_base: 2
167
+ encdec_causal: false
168
+ encdec_pad_mode: reflect
169
+ encdec_true_skip: false
170
+ encdec_compress: 2
171
+ encdec_lstm: 2
172
+ decoder_trim_right_ratio: 1.0
173
+ decoder_final_activation: null
174
+ decoder_final_activation_params: null
175
+ quantizer_n_q: 8
176
+ quantizer_bins: 1024
177
+ quantizer_decay: 0.99
178
+ quantizer_kmeans_init: true
179
+ quantizer_kmeans_iters: 50
180
+ quantizer_threshold_ema_dead_code: 2
181
+ quantizer_target_bandwidth:
182
+ - 0.5
183
+ - 1
184
+ - 2
185
+ - 4
186
+ quantizer_dropout: true
187
+ sample_rate: 16000
188
+ discriminator_params:
189
+ msmpmb_discriminator_params:
190
+ rates: []
191
+ sample_rate: 24000
192
+ fft_sizes:
193
+ - 2048
194
+ - 1024
195
+ - 512
196
+ periods:
197
+ - 2
198
+ - 3
199
+ - 5
200
+ - 7
201
+ - 11
202
+ period_discriminator_params:
203
+ in_channels: 1
204
+ out_channels: 1
205
+ kernel_sizes:
206
+ - 5
207
+ - 3
208
+ channels: 32
209
+ downsample_scales:
210
+ - 3
211
+ - 3
212
+ - 3
213
+ - 3
214
+ - 1
215
+ max_downsample_channels: 1024
216
+ bias: true
217
+ nonlinear_activation: LeakyReLU
218
+ nonlinear_activation_params:
219
+ negative_slope: 0.1
220
+ use_weight_norm: true
221
+ use_spectral_norm: false
222
+ band_discriminator_params:
223
+ hop_factor: 0.25
224
+ sample_rate: 24000
225
+ bands:
226
+ - - 0.0
227
+ - 0.1
228
+ - - 0.1
229
+ - 0.25
230
+ - - 0.25
231
+ - 0.5
232
+ - - 0.5
233
+ - 0.75
234
+ - - 0.75
235
+ - 1.0
236
+ channel: 32
237
+ generator_adv_loss_params:
238
+ average_by_discriminators: false
239
+ loss_type: mse
240
+ discriminator_adv_loss_params:
241
+ average_by_discriminators: false
242
+ loss_type: mse
243
+ use_feat_match_loss: true
244
+ feat_match_loss_params:
245
+ average_by_discriminators: false
246
+ average_by_layers: false
247
+ include_final_outputs: true
248
+ use_mel_loss: true
249
+ mel_loss_params:
250
+ range_start: 6
251
+ range_end: 11
252
+ window: hann
253
+ n_mels: 80
254
+ fmin: 0
255
+ fmax: null
256
+ log_base: null
257
+ fs: 16000
258
+ lambda_quantization: 0.25
259
+ lambda_commit: 1.0
260
+ lambda_reconstruct: 1.0
261
+ lambda_adv: 1.0
262
+ lambda_mel: 45.0
263
+ lambda_feat_match: 2.0
264
+ cache_generator_outputs: true
265
+ required:
266
+ - output_dir
267
+ version: '202402'
268
+ distributed: true
exp/codec_train_dac_large_v1.4_raw_fs16000/images/adv_loss.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/codec_commit_loss.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/codec_loss.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/codec_quantization_loss.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/discriminator_backward_time.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/discriminator_forward_time.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/discriminator_loss.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/discriminator_optim_step_time.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/discriminator_train_time.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/fake_loss.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/feat_match_loss.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/generator_backward_time.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/generator_forward_time.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/generator_optim_step_time.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/generator_train_time.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/gpu_max_cached_mem_GB.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/iter_time.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/loss.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/mel_loss.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/mel_loss_real.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/optim0_lr0.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/optim1_lr0.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/real_loss.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/reconstruct_loss.png ADDED
exp/codec_train_dac_large_v1.4_raw_fs16000/images/train_time.png ADDED
meta.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ espnet: '202402'
2
+ files:
3
+ model_file: exp/codec_train_dac_large_v1.4_raw_fs16000/120epoch.pth
4
+ python: 3.10.13 | packaged by conda-forge | (main, Dec 23 2023, 15:26:55) [GCC 12.3.0]
5
+ timestamp: 1730047564.380797
6
+ torch: 2.5.0.dev20240825+cu124
7
+ yaml_files:
8
+ train_config: exp/codec_train_dac_large_v1.4_raw_fs16000/config.yaml