mrm8488 commited on
Commit
aba457d
1 Parent(s): 787cd28

Initial commit

Browse files
config.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-large-xlsr-53",
3
+ "activation_dropout": 0.0,
4
+ "apply_spec_augment": true,
5
+ "architectures": [
6
+ "Wav2Vec2ForCTC"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "conv_bias": true,
11
+ "conv_dim": [
12
+ 512,
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512
19
+ ],
20
+ "conv_kernel": [
21
+ 10,
22
+ 3,
23
+ 3,
24
+ 3,
25
+ 3,
26
+ 2,
27
+ 2
28
+ ],
29
+ "conv_stride": [
30
+ 5,
31
+ 2,
32
+ 2,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2
37
+ ],
38
+ "ctc_loss_reduction": "mean",
39
+ "ctc_zero_infinity": false,
40
+ "do_stable_layer_norm": true,
41
+ "eos_token_id": 2,
42
+ "feat_extract_activation": "gelu",
43
+ "feat_extract_dropout": 0.0,
44
+ "feat_extract_norm": "layer",
45
+ "feat_proj_dropout": 0.0,
46
+ "final_dropout": 0.0,
47
+ "gradient_checkpointing": true,
48
+ "hidden_act": "gelu",
49
+ "hidden_dropout": 0.1,
50
+ "hidden_size": 1024,
51
+ "initializer_range": 0.02,
52
+ "intermediate_size": 4096,
53
+ "layer_norm_eps": 1e-05,
54
+ "layerdrop": 0.1,
55
+ "mask_channel_length": 10,
56
+ "mask_channel_min_space": 1,
57
+ "mask_channel_other": 0.0,
58
+ "mask_channel_prob": 0.0,
59
+ "mask_channel_selection": "static",
60
+ "mask_feature_length": 10,
61
+ "mask_feature_prob": 0.0,
62
+ "mask_time_length": 10,
63
+ "mask_time_min_space": 1,
64
+ "mask_time_other": 0.0,
65
+ "mask_time_prob": 0.05,
66
+ "mask_time_selection": "static",
67
+ "model_type": "wav2vec2",
68
+ "num_attention_heads": 16,
69
+ "num_conv_pos_embedding_groups": 16,
70
+ "num_conv_pos_embeddings": 128,
71
+ "num_feat_extract_layers": 7,
72
+ "num_hidden_layers": 24,
73
+ "pad_token_id": 55,
74
+ "transformers_version": "4.5.0.dev0",
75
+ "vocab_size": 56
76
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_size": 1,
4
+ "padding_side": "right",
5
+ "padding_value": 0.0,
6
+ "return_attention_mask": true,
7
+ "sampling_rate": 16000
8
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5ea10f6fd23609fd4c28cc96653b2147d12568e092866cdaf7e02bbda40284d
3
+ size 1262163415
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|"}
trainer_state.json ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 29.953846153846154,
5
+ "global_step": 6800,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.76,
12
+ "learning_rate": 0.00023999999999999998,
13
+ "loss": 6.331,
14
+ "step": 400
15
+ },
16
+ {
17
+ "epoch": 1.76,
18
+ "eval_loss": 3.29716157913208,
19
+ "eval_runtime": 392.7938,
20
+ "eval_samples_per_second": 8.236,
21
+ "eval_wer": 0.9908326967150497,
22
+ "step": 400
23
+ },
24
+ {
25
+ "epoch": 3.52,
26
+ "learning_rate": 0.00028573692551505547,
27
+ "loss": 3.3384,
28
+ "step": 800
29
+ },
30
+ {
31
+ "epoch": 3.52,
32
+ "eval_loss": 3.2769880294799805,
33
+ "eval_runtime": 403.0436,
34
+ "eval_samples_per_second": 8.026,
35
+ "eval_wer": 0.9908326967150497,
36
+ "step": 800
37
+ },
38
+ {
39
+ "epoch": 5.29,
40
+ "learning_rate": 0.00026671949286846274,
41
+ "loss": 2.8767,
42
+ "step": 1200
43
+ },
44
+ {
45
+ "epoch": 5.29,
46
+ "eval_loss": 0.987671434879303,
47
+ "eval_runtime": 414.5553,
48
+ "eval_samples_per_second": 7.804,
49
+ "eval_wer": 0.9542438985163444,
50
+ "step": 1200
51
+ },
52
+ {
53
+ "epoch": 7.05,
54
+ "learning_rate": 0.00024770206022187,
55
+ "loss": 0.7362,
56
+ "step": 1600
57
+ },
58
+ {
59
+ "epoch": 7.05,
60
+ "eval_loss": 0.5294700860977173,
61
+ "eval_runtime": 412.2788,
62
+ "eval_samples_per_second": 7.847,
63
+ "eval_wer": 0.7132805275220135,
64
+ "step": 1600
65
+ },
66
+ {
67
+ "epoch": 8.81,
68
+ "learning_rate": 0.0002286846275752773,
69
+ "loss": 0.4225,
70
+ "step": 2000
71
+ },
72
+ {
73
+ "epoch": 8.81,
74
+ "eval_loss": 0.47939780354499817,
75
+ "eval_runtime": 412.9001,
76
+ "eval_samples_per_second": 7.835,
77
+ "eval_wer": 0.6393791966547384,
78
+ "step": 2000
79
+ },
80
+ {
81
+ "epoch": 10.57,
82
+ "learning_rate": 0.0002096671949286846,
83
+ "loss": 0.2997,
84
+ "step": 2400
85
+ },
86
+ {
87
+ "epoch": 10.57,
88
+ "eval_loss": 0.47680211067199707,
89
+ "eval_runtime": 406.5095,
90
+ "eval_samples_per_second": 7.958,
91
+ "eval_wer": 0.5926983233484782,
92
+ "step": 2400
93
+ },
94
+ {
95
+ "epoch": 12.33,
96
+ "learning_rate": 0.00019064976228209192,
97
+ "loss": 0.2349,
98
+ "step": 2800
99
+ },
100
+ {
101
+ "epoch": 12.33,
102
+ "eval_loss": 0.46566906571388245,
103
+ "eval_runtime": 410.6338,
104
+ "eval_samples_per_second": 7.878,
105
+ "eval_wer": 0.5762936753648827,
106
+ "step": 2800
107
+ },
108
+ {
109
+ "epoch": 14.1,
110
+ "learning_rate": 0.00017163232963549917,
111
+ "loss": 0.1947,
112
+ "step": 3200
113
+ },
114
+ {
115
+ "epoch": 14.1,
116
+ "eval_loss": 0.5099472999572754,
117
+ "eval_runtime": 412.6367,
118
+ "eval_samples_per_second": 7.84,
119
+ "eval_wer": 0.5637087370833501,
120
+ "step": 3200
121
+ },
122
+ {
123
+ "epoch": 15.86,
124
+ "learning_rate": 0.00015261489698890646,
125
+ "loss": 0.1652,
126
+ "step": 3600
127
+ },
128
+ {
129
+ "epoch": 15.86,
130
+ "eval_loss": 0.4866821765899658,
131
+ "eval_runtime": 413.7594,
132
+ "eval_samples_per_second": 7.819,
133
+ "eval_wer": 0.5551445458566202,
134
+ "step": 3600
135
+ },
136
+ {
137
+ "epoch": 17.62,
138
+ "learning_rate": 0.0001335974643423138,
139
+ "loss": 0.1397,
140
+ "step": 4000
141
+ },
142
+ {
143
+ "epoch": 17.62,
144
+ "eval_loss": 0.510926365852356,
145
+ "eval_runtime": 416.443,
146
+ "eval_samples_per_second": 7.768,
147
+ "eval_wer": 0.5460576575127659,
148
+ "step": 4000
149
+ },
150
+ {
151
+ "epoch": 19.38,
152
+ "learning_rate": 0.00011458003169572107,
153
+ "loss": 0.1251,
154
+ "step": 4400
155
+ },
156
+ {
157
+ "epoch": 19.38,
158
+ "eval_loss": 0.5401586294174194,
159
+ "eval_runtime": 410.6668,
160
+ "eval_samples_per_second": 7.877,
161
+ "eval_wer": 0.5362470346990471,
162
+ "step": 4400
163
+ },
164
+ {
165
+ "epoch": 21.15,
166
+ "learning_rate": 9.556259904912835e-05,
167
+ "loss": 0.1105,
168
+ "step": 4800
169
+ },
170
+ {
171
+ "epoch": 21.15,
172
+ "eval_loss": 0.5472865104675293,
173
+ "eval_runtime": 415.7439,
174
+ "eval_samples_per_second": 7.781,
175
+ "eval_wer": 0.530416951469583,
176
+ "step": 4800
177
+ },
178
+ {
179
+ "epoch": 22.91,
180
+ "learning_rate": 7.654516640253565e-05,
181
+ "loss": 0.0932,
182
+ "step": 5200
183
+ },
184
+ {
185
+ "epoch": 22.91,
186
+ "eval_loss": 0.5360305309295654,
187
+ "eval_runtime": 420.1088,
188
+ "eval_samples_per_second": 7.7,
189
+ "eval_wer": 0.5289694825298541,
190
+ "step": 5200
191
+ },
192
+ {
193
+ "epoch": 24.67,
194
+ "learning_rate": 5.752773375594294e-05,
195
+ "loss": 0.0893,
196
+ "step": 5600
197
+ },
198
+ {
199
+ "epoch": 24.67,
200
+ "eval_loss": 0.5389866232872009,
201
+ "eval_runtime": 429.4038,
202
+ "eval_samples_per_second": 7.534,
203
+ "eval_wer": 0.5169072413654457,
204
+ "step": 5600
205
+ },
206
+ {
207
+ "epoch": 26.43,
208
+ "learning_rate": 3.851030110935023e-05,
209
+ "loss": 0.0863,
210
+ "step": 6000
211
+ },
212
+ {
213
+ "epoch": 26.43,
214
+ "eval_loss": 0.5444660186767578,
215
+ "eval_runtime": 423.7501,
216
+ "eval_samples_per_second": 7.634,
217
+ "eval_wer": 0.5146958304853042,
218
+ "step": 6000
219
+ },
220
+ {
221
+ "epoch": 28.19,
222
+ "learning_rate": 1.9492868462757525e-05,
223
+ "loss": 0.0778,
224
+ "step": 6400
225
+ },
226
+ {
227
+ "epoch": 28.19,
228
+ "eval_loss": 0.5570007562637329,
229
+ "eval_runtime": 420.4258,
230
+ "eval_samples_per_second": 7.695,
231
+ "eval_wer": 0.5113988179003659,
232
+ "step": 6400
233
+ },
234
+ {
235
+ "epoch": 29.95,
236
+ "learning_rate": 4.754358161648177e-07,
237
+ "loss": 0.0732,
238
+ "step": 6800
239
+ },
240
+ {
241
+ "epoch": 29.95,
242
+ "eval_loss": 0.557680606842041,
243
+ "eval_runtime": 427.3219,
244
+ "eval_samples_per_second": 7.57,
245
+ "eval_wer": 0.5118411000763942,
246
+ "step": 6800
247
+ }
248
+ ],
249
+ "max_steps": 6810,
250
+ "num_train_epochs": 30,
251
+ "total_flos": 3.375783462381318e+19,
252
+ "trial_name": null,
253
+ "trial_params": null
254
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2fa9c7c05a8a24aecbe728ed8d960a674e19cd55c296c7de8bb30b25d9fe68c
3
+ size 2287
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"з": 0, "щ": 2, "x": 3, "ю": 4, "ї": 5, "л": 6, "ж": 7, "с": 8, "—": 9, "ш": 10, "m": 11, "ц": 12, "n": 13, "l": 14, "–": 15, "о": 16, "»": 17, "y": 18, "ч": 19, "и": 20, "a": 21, "м": 22, "o": 23, "i": 24, "c": 25, "п": 26, "«": 27, "p": 28, "…": 29, "ґ": 30, "u": 31, "у": 32, "і": 33, "'": 34, "e": 35, "ф": 36, "ь": 37, "а": 38, "н": 39, "р": 40, "`": 41, "г": 42, "в": 43, "к": 44, "е": 45, "т": 46, "д": 47, "х": 48, "’": 49, "б": 50, "я": 51, "й": 52, "є": 53, "|": 1, "[UNK]": 54, "[PAD]": 55}