HoneyTian commited on
Commit
79a2d3a
·
1 Parent(s): bd3d872
examples/rnnoise/yaml/config.yaml CHANGED
@@ -3,29 +3,33 @@ model_name: "rnnoise"
3
  # spec
4
  sample_rate: 8000
5
  segment_size: 32000
6
- nfft: 512
7
- win_size: 512
8
- hop_size: 256
9
  win_type: hann
10
 
11
- # data
12
- max_snr_db: 20
13
- min_snr_db: -10
14
 
15
  # model
16
  conv_size: 256
17
  gru_size: 256
18
 
19
- # train
20
- max_epochs: 100
21
- batch_size: 32
22
- num_workers: 4
23
- seed: 1234
24
 
 
25
  lr: 0.001
26
- lr_scheduler: CosineAnnealingLR
27
- lr_scheduler_kwargs: {}
 
 
28
 
29
- weight_decay: 0.00001
30
  clip_grad_norm: 10.0
31
- eval_steps: 20000
 
 
 
 
 
3
  # spec
4
  sample_rate: 8000
5
  segment_size: 32000
6
+ nfft: 160
7
+ win_size: 160
8
+ hop_size: 80
9
  win_type: hann
10
 
11
+ erb_bins: 32
12
+ min_freq_bins_for_erb: 2
 
13
 
14
  # model
15
  conv_size: 256
16
  gru_size: 256
17
 
18
+ # data
19
+ max_snr_db: 20
20
+ min_snr_db: -10
 
 
21
 
22
+ # train
23
  lr: 0.001
24
+ lr_scheduler: "CosineAnnealingLR"
25
+ lr_scheduler_kwargs:
26
+ T_max: 250000
27
+ eta_min: 0.0001
28
 
29
+ max_epochs: 100
30
  clip_grad_norm: 10.0
31
+ seed: 1234
32
+
33
+ batch_size: 64
34
+ num_workers: 4
35
+ eval_steps: 15000
toolbox/torchaudio/models/rnnoise/configuration_rnnoise.py CHANGED
@@ -21,17 +21,16 @@ class RNNoiseConfig(PretrainedConfig):
21
  min_snr_db: float = -10,
22
  max_snr_db: float = 20,
23
 
24
- max_epochs: int = 100,
25
- batch_size: int = 4,
26
- num_workers: int = 4,
27
- seed: int = 1234,
28
-
29
  lr: float = 0.001,
30
  lr_scheduler: str = "CosineAnnealingLR",
31
  lr_scheduler_kwargs: dict = None,
32
 
33
- weight_decay: float = 0.00001,
34
  clip_grad_norm: float = 10.,
 
 
 
 
35
  eval_steps: int = 25000,
36
 
37
  **kwargs
@@ -53,17 +52,16 @@ class RNNoiseConfig(PretrainedConfig):
53
  self.min_snr_db = min_snr_db
54
  self.max_snr_db = max_snr_db
55
 
56
- self.max_epochs = max_epochs
57
- self.batch_size = batch_size
58
- self.num_workers = num_workers
59
- self.seed = seed
60
-
61
  self.lr = lr
62
  self.lr_scheduler = lr_scheduler
63
  self.lr_scheduler_kwargs = lr_scheduler_kwargs or dict()
64
 
65
- self.weight_decay = weight_decay
66
  self.clip_grad_norm = clip_grad_norm
 
 
 
 
67
  self.eval_steps = eval_steps
68
 
69
 
 
21
  min_snr_db: float = -10,
22
  max_snr_db: float = 20,
23
 
 
 
 
 
 
24
  lr: float = 0.001,
25
  lr_scheduler: str = "CosineAnnealingLR",
26
  lr_scheduler_kwargs: dict = None,
27
 
28
+ max_epochs: int = 100,
29
  clip_grad_norm: float = 10.,
30
+ seed: int = 1234,
31
+
32
+ batch_size: int = 64,
33
+ num_workers: int = 4,
34
  eval_steps: int = 25000,
35
 
36
  **kwargs
 
52
  self.min_snr_db = min_snr_db
53
  self.max_snr_db = max_snr_db
54
 
 
 
 
 
 
55
  self.lr = lr
56
  self.lr_scheduler = lr_scheduler
57
  self.lr_scheduler_kwargs = lr_scheduler_kwargs or dict()
58
 
59
+ self.max_epochs = max_epochs
60
  self.clip_grad_norm = clip_grad_norm
61
+ self.seed = seed
62
+
63
+ self.batch_size = batch_size
64
+ self.num_workers = num_workers
65
  self.eval_steps = eval_steps
66
 
67
 
toolbox/torchaudio/models/rnnoise/yaml/config.yaml CHANGED
@@ -3,32 +3,33 @@ model_name: "rnnoise"
3
  # spec
4
  sample_rate: 8000
5
  segment_size: 32000
6
- nfft: 512
7
- win_size: 512
8
- hop_size: 256
9
  win_type: hann
10
 
11
  erb_bins: 32
12
  min_freq_bins_for_erb: 2
13
 
14
- # data
15
- max_snr_db: 20
16
- min_snr_db: -10
17
-
18
  # model
19
  conv_size: 256
20
  gru_size: 256
21
 
22
- # train
23
- max_epochs: 100
24
- batch_size: 32
25
- num_workers: 4
26
- seed: 1234
27
 
 
28
  lr: 0.001
29
- lr_scheduler: CosineAnnealingLR
30
- lr_scheduler_kwargs: {}
 
 
31
 
32
- weight_decay: 0.00001
33
  clip_grad_norm: 10.0
34
- eval_steps: 20000
 
 
 
 
 
3
  # spec
4
  sample_rate: 8000
5
  segment_size: 32000
6
+ nfft: 160
7
+ win_size: 160
8
+ hop_size: 80
9
  win_type: hann
10
 
11
  erb_bins: 32
12
  min_freq_bins_for_erb: 2
13
 
 
 
 
 
14
  # model
15
  conv_size: 256
16
  gru_size: 256
17
 
18
+ # data
19
+ max_snr_db: 20
20
+ min_snr_db: -10
 
 
21
 
22
+ # train
23
  lr: 0.001
24
+ lr_scheduler: "CosineAnnealingLR"
25
+ lr_scheduler_kwargs:
26
+ T_max: 250000
27
+ eta_min: 0.0001
28
 
29
+ max_epochs: 100
30
  clip_grad_norm: 10.0
31
+ seed: 1234
32
+
33
+ batch_size: 64
34
+ num_workers: 4
35
+ eval_steps: 15000