File size: 2,914 Bytes
affcd23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
{
  "name": "one_batch_test",
  "n_gpu": 1,
  "text_encoder": {
    "type": "CTCCharTextEncoder",
    "args": {
        "kenlm_model_path": "hw_asr/text_encoder/3-gram.arpa",
        "unigrams_path": "hw_asr/text_encoder/librispeech-vocab.txt"
    }
  },
  "preprocessing": {
    "sr": 16000,
    "spectrogram": {
      "type": "MelSpectrogram",
      "args": {
        "n_mels": 512
      }
    },
    "log_spec": true
  },
  "augmentations": {
    "wave": [
        {"type": "AddColoredNoise", "args": {"p": 0.3, "sample_rate": 16000}},
        {"type": "Gain", "args": {"p": 0.4, "sample_rate": 16000}},
        {"type": "HighPassFilter", "args": {"p": 0.3, "sample_rate": 16000}},
        {"type": "LowPassFilter", "args": {"p": 0.3, "sample_rate": 16000}},
        {"type": "PitchShift", "args": {"p": 0.3, "sample_rate": 16000}},
        {"type": "PolarityInversion", "args": {"p": 0, "sample_rate": 16000}},
        {"type": "Shift", "args": {"p": 0.2, "sample_rate": 16000}}
    ],
    "spectrogram": []
  },
  "arch": {
    "type": "DeepSpeech2Model",
    "args": {
      "n_feats": 512,
      "n_rnn_layers": 1,
      "rnn_hidden_size": 256
    }
  },
  "data": {
    "train": {
      "batch_size": 10,
      "num_workers": 0,
      "datasets": [
        {
          "type": "LibrispeechDataset",
          "args": {
            "part": "dev-clean",
            "max_audio_length": 20.0,
            "max_text_length": 200,
            "limit": 10
          }
        }
      ]
    },
    "val": {
      "batch_size": 10,
      "num_workers": 0,
      "datasets": [
        {
          "type": "LibrispeechDataset",
          "args": {
            "part": "dev-clean",
            "max_audio_length": 20.0,
            "max_text_length": 200,
            "limit": 10
          }
        }
      ]
    }
  },
  "optimizer": {
    "type": "SGD",
    "args": {
      "lr": 1e-2
    }
  },
  "loss": {
    "type": "CTCLoss",
    "args": {}
  },
  "metrics": [
    {
      "type": "ArgmaxWERMetric",
      "args": {
        "name": "WER (argmax)"
      }
    },
    {
      "type": "ArgmaxCERMetric",
      "args": {
        "name": "CER (argmax)"
      }
    },
    {
      "type": "BeamSearchWERMetric",
      "args": {
        "beam_size": 2,
        "name": "WER (beam search)"
      }
    },
    {
      "type": "BeamSearchCERMetric",
      "args": {
        "beam_size": 2,
        "name": "CER (beam search)"
      }
    }
  ],
  "lr_scheduler": {
    "type": "OneCycleLR",
    "args": {
      "steps_per_epoch": 100,
      "epochs": 50,
      "anneal_strategy": "cos",
      "max_lr": 1e-2,
      "pct_start": 0.2
    }
  },
  "trainer": {
    "epochs": 50,
    "save_dir": "saved/",
    "save_period": 5,
    "verbosity": 2,
    "monitor": "min val_loss",
    "early_stop": 100,
    "visualize": "wandb",
    "wandb_project": "asr_project_check",
    "len_epoch": 100,
    "grad_norm_clip": 10
  }
}