cdactvm commited on
Commit
6ae1a3b
·
verified ·
1 Parent(s): 8b7f411

Upload 8 files

Browse files
config.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/w2v-bert-2.0",
3
+ "activation_dropout": 0.0,
4
+ "adapter_act": "relu",
5
+ "adapter_kernel_size": 3,
6
+ "adapter_stride": 2,
7
+ "add_adapter": true,
8
+ "apply_spec_augment": false,
9
+ "architectures": [
10
+ "Wav2Vec2BertForCTC"
11
+ ],
12
+ "attention_dropout": 0.2,
13
+ "bos_token_id": 1,
14
+ "classifier_proj_size": 768,
15
+ "codevector_dim": 768,
16
+ "conformer_conv_dropout": 0.1,
17
+ "contrastive_logits_temperature": 0.1,
18
+ "conv_depthwise_kernel_size": 31,
19
+ "ctc_loss_reduction": "mean",
20
+ "ctc_zero_infinity": true,
21
+ "diversity_loss_weight": 0.1,
22
+ "eos_token_id": 2,
23
+ "feat_proj_dropout": 0.2,
24
+ "feat_quantizer_dropout": 0.0,
25
+ "feature_projection_input_dim": 160,
26
+ "final_dropout": 0.1,
27
+ "hidden_act": "swish",
28
+ "hidden_dropout": 0.2,
29
+ "hidden_size": 1024,
30
+ "initializer_range": 0.02,
31
+ "intermediate_size": 4096,
32
+ "layer_norm_eps": 1e-05,
33
+ "layerdrop": 0.2,
34
+ "left_max_position_embeddings": 64,
35
+ "mask_feature_length": 10,
36
+ "mask_feature_min_masks": 0,
37
+ "mask_feature_prob": 0.0,
38
+ "mask_time_length": 10,
39
+ "mask_time_min_masks": 2,
40
+ "mask_time_prob": 0.0,
41
+ "max_source_positions": 5000,
42
+ "model_type": "wav2vec2-bert",
43
+ "num_adapter_layers": 1,
44
+ "num_attention_heads": 16,
45
+ "num_codevector_groups": 2,
46
+ "num_codevectors_per_group": 320,
47
+ "num_hidden_layers": 24,
48
+ "num_negatives": 100,
49
+ "output_hidden_size": 1024,
50
+ "pad_token_id": 64,
51
+ "position_embeddings_type": "relative_key",
52
+ "proj_codevector_dim": 768,
53
+ "right_max_position_embeddings": 8,
54
+ "rotary_embedding_base": 10000,
55
+ "tdnn_dilation": [
56
+ 1,
57
+ 2,
58
+ 3,
59
+ 1,
60
+ 1
61
+ ],
62
+ "tdnn_dim": [
63
+ 512,
64
+ 512,
65
+ 512,
66
+ 512,
67
+ 1500
68
+ ],
69
+ "tdnn_kernel": [
70
+ 5,
71
+ 3,
72
+ 3,
73
+ 1,
74
+ 1
75
+ ],
76
+ "torch_dtype": "float32",
77
+ "transformers_version": "4.42.4",
78
+ "use_intermediate_ffn_before_adapter": false,
79
+ "use_weighted_layer_sum": false,
80
+ "vocab_size": 67,
81
+ "xvector_output_dim": 512
82
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04d1af1948f55a4eaefcd56200d21216520cea00842fc2e5a6f32c953e1add50
3
+ size 2423089260
preprocessor_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "feature_extractor_type": "SeamlessM4TFeatureExtractor",
3
+ "feature_size": 80,
4
+ "num_mel_bins": 80,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000,
9
+ "stride": 2
10
+ }
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40c2efbd34a555c011bddc6c43f76a836326617ad12bb332da4cf7a1afc0c347
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a963b078720dd9db782f52ebc335fcd45d25e21fd109960da0e1be793c5a94fc
3
+ size 1256
trainer_state.json ADDED
@@ -0,0 +1,1883 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.35703112038112395,
3
+ "best_model_checkpoint": "results/punjabi_model/20240829_200444/checkpoint-3000",
4
+ "epoch": 9.999656616990592,
5
+ "eval_steps": 3000,
6
+ "global_step": 109200,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.045784401254492596,
13
+ "grad_norm": 2.4544355869293213,
14
+ "learning_rate": 9.980000000000001e-06,
15
+ "loss": 3.9787,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.09156880250898519,
20
+ "grad_norm": 5.300338268280029,
21
+ "learning_rate": 1.9980000000000002e-05,
22
+ "loss": 2.7973,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.1373532037634778,
27
+ "grad_norm": 5.187418460845947,
28
+ "learning_rate": 2.9980000000000004e-05,
29
+ "loss": 2.1081,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.18313760501797038,
34
+ "grad_norm": 5.451952934265137,
35
+ "learning_rate": 3.998000000000001e-05,
36
+ "loss": 1.7718,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 0.22892200627246298,
41
+ "grad_norm": 10.363802909851074,
42
+ "learning_rate": 3.962053231939164e-05,
43
+ "loss": 1.6058,
44
+ "step": 2500
45
+ },
46
+ {
47
+ "epoch": 0.2747064075269556,
48
+ "grad_norm": 3.4008052349090576,
49
+ "learning_rate": 3.924030418250951e-05,
50
+ "loss": 1.5005,
51
+ "step": 3000
52
+ },
53
+ {
54
+ "epoch": 0.2747064075269556,
55
+ "eval_loss": 0.580813467502594,
56
+ "eval_runtime": 645.4536,
57
+ "eval_samples_per_second": 10.151,
58
+ "eval_steps_per_second": 10.151,
59
+ "eval_wer": 0.35703112038112395,
60
+ "step": 3000
61
+ },
62
+ {
63
+ "epoch": 0.32049080878144814,
64
+ "grad_norm": 2.9593212604522705,
65
+ "learning_rate": 3.886007604562738e-05,
66
+ "loss": 1.4034,
67
+ "step": 3500
68
+ },
69
+ {
70
+ "epoch": 0.36627521003594077,
71
+ "grad_norm": 4.573808193206787,
72
+ "learning_rate": 3.847984790874525e-05,
73
+ "loss": 1.3577,
74
+ "step": 4000
75
+ },
76
+ {
77
+ "epoch": 0.41205961129043334,
78
+ "grad_norm": 2.684593915939331,
79
+ "learning_rate": 3.809961977186312e-05,
80
+ "loss": 1.3114,
81
+ "step": 4500
82
+ },
83
+ {
84
+ "epoch": 0.45784401254492596,
85
+ "grad_norm": 2.6589484214782715,
86
+ "learning_rate": 3.771939163498099e-05,
87
+ "loss": 1.3163,
88
+ "step": 5000
89
+ },
90
+ {
91
+ "epoch": 0.5036284137994186,
92
+ "grad_norm": 3.144775390625,
93
+ "learning_rate": 3.733992395437262e-05,
94
+ "loss": 1.2375,
95
+ "step": 5500
96
+ },
97
+ {
98
+ "epoch": 0.5494128150539112,
99
+ "grad_norm": 3.1532704830169678,
100
+ "learning_rate": 3.6959695817490496e-05,
101
+ "loss": 1.2442,
102
+ "step": 6000
103
+ },
104
+ {
105
+ "epoch": 0.5494128150539112,
106
+ "eval_loss": 0.43034785985946655,
107
+ "eval_runtime": 665.7368,
108
+ "eval_samples_per_second": 9.842,
109
+ "eval_steps_per_second": 9.842,
110
+ "eval_wer": 0.2554395487189211,
111
+ "step": 6000
112
+ },
113
+ {
114
+ "epoch": 0.5951972163084037,
115
+ "grad_norm": 9.455696105957031,
116
+ "learning_rate": 3.657946768060837e-05,
117
+ "loss": 1.225,
118
+ "step": 6500
119
+ },
120
+ {
121
+ "epoch": 0.6409816175628963,
122
+ "grad_norm": 3.3579087257385254,
123
+ "learning_rate": 3.619923954372624e-05,
124
+ "loss": 1.1457,
125
+ "step": 7000
126
+ },
127
+ {
128
+ "epoch": 0.686766018817389,
129
+ "grad_norm": 4.421695709228516,
130
+ "learning_rate": 3.581901140684411e-05,
131
+ "loss": 1.1656,
132
+ "step": 7500
133
+ },
134
+ {
135
+ "epoch": 0.7325504200718815,
136
+ "grad_norm": 3.5701684951782227,
137
+ "learning_rate": 3.5439543726235745e-05,
138
+ "loss": 1.175,
139
+ "step": 8000
140
+ },
141
+ {
142
+ "epoch": 0.7783348213263741,
143
+ "grad_norm": 4.08390474319458,
144
+ "learning_rate": 3.505931558935361e-05,
145
+ "loss": 1.131,
146
+ "step": 8500
147
+ },
148
+ {
149
+ "epoch": 0.8241192225808667,
150
+ "grad_norm": 3.4614651203155518,
151
+ "learning_rate": 3.4679847908745255e-05,
152
+ "loss": 1.1133,
153
+ "step": 9000
154
+ },
155
+ {
156
+ "epoch": 0.8241192225808667,
157
+ "eval_loss": 0.3901245594024658,
158
+ "eval_runtime": 664.9166,
159
+ "eval_samples_per_second": 9.854,
160
+ "eval_steps_per_second": 9.854,
161
+ "eval_wer": 0.2110935981607452,
162
+ "step": 9000
163
+ },
164
+ {
165
+ "epoch": 0.8699036238353592,
166
+ "grad_norm": 2.597609519958496,
167
+ "learning_rate": 3.429961977186312e-05,
168
+ "loss": 1.0977,
169
+ "step": 9500
170
+ },
171
+ {
172
+ "epoch": 0.9156880250898519,
173
+ "grad_norm": 3.4706475734710693,
174
+ "learning_rate": 3.3919391634980995e-05,
175
+ "loss": 1.0986,
176
+ "step": 10000
177
+ },
178
+ {
179
+ "epoch": 0.9614724263443445,
180
+ "grad_norm": 2.1491544246673584,
181
+ "learning_rate": 3.353916349809886e-05,
182
+ "loss": 1.1154,
183
+ "step": 10500
184
+ },
185
+ {
186
+ "epoch": 1.0072568275988372,
187
+ "grad_norm": 1.495735764503479,
188
+ "learning_rate": 3.3158935361216734e-05,
189
+ "loss": 1.1157,
190
+ "step": 11000
191
+ },
192
+ {
193
+ "epoch": 1.0530412288533297,
194
+ "grad_norm": 2.120736837387085,
195
+ "learning_rate": 3.277870722433461e-05,
196
+ "loss": 1.0495,
197
+ "step": 11500
198
+ },
199
+ {
200
+ "epoch": 1.0988256301078223,
201
+ "grad_norm": 2.2971479892730713,
202
+ "learning_rate": 3.239847908745247e-05,
203
+ "loss": 1.0785,
204
+ "step": 12000
205
+ },
206
+ {
207
+ "epoch": 1.0988256301078223,
208
+ "eval_loss": 0.34569498896598816,
209
+ "eval_runtime": 655.7186,
210
+ "eval_samples_per_second": 9.992,
211
+ "eval_steps_per_second": 9.992,
212
+ "eval_wer": 0.19471569766063854,
213
+ "step": 12000
214
+ },
215
+ {
216
+ "epoch": 1.1446100313623149,
217
+ "grad_norm": 1.080255150794983,
218
+ "learning_rate": 3.2018250950570346e-05,
219
+ "loss": 1.0311,
220
+ "step": 12500
221
+ },
222
+ {
223
+ "epoch": 1.1903944326168074,
224
+ "grad_norm": 2.3899152278900146,
225
+ "learning_rate": 3.163802281368821e-05,
226
+ "loss": 1.107,
227
+ "step": 13000
228
+ },
229
+ {
230
+ "epoch": 1.2361788338713,
231
+ "grad_norm": 2.9057199954986572,
232
+ "learning_rate": 3.125855513307985e-05,
233
+ "loss": 1.0711,
234
+ "step": 13500
235
+ },
236
+ {
237
+ "epoch": 1.2819632351257926,
238
+ "grad_norm": 1.879520058631897,
239
+ "learning_rate": 3.087832699619772e-05,
240
+ "loss": 1.0348,
241
+ "step": 14000
242
+ },
243
+ {
244
+ "epoch": 1.3277476363802854,
245
+ "grad_norm": 2.777270555496216,
246
+ "learning_rate": 3.0498098859315592e-05,
247
+ "loss": 1.0308,
248
+ "step": 14500
249
+ },
250
+ {
251
+ "epoch": 1.373532037634778,
252
+ "grad_norm": 3.571882724761963,
253
+ "learning_rate": 3.0117870722433462e-05,
254
+ "loss": 1.0245,
255
+ "step": 15000
256
+ },
257
+ {
258
+ "epoch": 1.373532037634778,
259
+ "eval_loss": 0.3307068943977356,
260
+ "eval_runtime": 655.2961,
261
+ "eval_samples_per_second": 9.999,
262
+ "eval_steps_per_second": 9.999,
263
+ "eval_wer": 0.1764061055675381,
264
+ "step": 15000
265
+ },
266
+ {
267
+ "epoch": 1.4193164388892705,
268
+ "grad_norm": 2.154317855834961,
269
+ "learning_rate": 2.973764258555133e-05,
270
+ "loss": 1.0197,
271
+ "step": 15500
272
+ },
273
+ {
274
+ "epoch": 1.465100840143763,
275
+ "grad_norm": 2.7176313400268555,
276
+ "learning_rate": 2.9357414448669205e-05,
277
+ "loss": 1.0042,
278
+ "step": 16000
279
+ },
280
+ {
281
+ "epoch": 1.5108852413982556,
282
+ "grad_norm": 3.9277138710021973,
283
+ "learning_rate": 2.8977946768060842e-05,
284
+ "loss": 0.9801,
285
+ "step": 16500
286
+ },
287
+ {
288
+ "epoch": 1.5566696426527482,
289
+ "grad_norm": 6.810076713562012,
290
+ "learning_rate": 2.859771863117871e-05,
291
+ "loss": 0.9256,
292
+ "step": 17000
293
+ },
294
+ {
295
+ "epoch": 1.6024540439072408,
296
+ "grad_norm": 1.7569458484649658,
297
+ "learning_rate": 2.821749049429658e-05,
298
+ "loss": 0.981,
299
+ "step": 17500
300
+ },
301
+ {
302
+ "epoch": 1.6482384451617333,
303
+ "grad_norm": 3.3207457065582275,
304
+ "learning_rate": 2.783726235741445e-05,
305
+ "loss": 0.9452,
306
+ "step": 18000
307
+ },
308
+ {
309
+ "epoch": 1.6482384451617333,
310
+ "eval_loss": 0.3014201521873474,
311
+ "eval_runtime": 651.2443,
312
+ "eval_samples_per_second": 10.061,
313
+ "eval_steps_per_second": 10.061,
314
+ "eval_wer": 0.1733545068853547,
315
+ "step": 18000
316
+ },
317
+ {
318
+ "epoch": 1.694022846416226,
319
+ "grad_norm": 1.4468517303466797,
320
+ "learning_rate": 2.7457034220532324e-05,
321
+ "loss": 0.9969,
322
+ "step": 18500
323
+ },
324
+ {
325
+ "epoch": 1.7398072476707185,
326
+ "grad_norm": 1.2308686971664429,
327
+ "learning_rate": 2.7077566539923958e-05,
328
+ "loss": 0.9551,
329
+ "step": 19000
330
+ },
331
+ {
332
+ "epoch": 1.785591648925211,
333
+ "grad_norm": 3.698532819747925,
334
+ "learning_rate": 2.6697338403041827e-05,
335
+ "loss": 0.9585,
336
+ "step": 19500
337
+ },
338
+ {
339
+ "epoch": 1.8313760501797036,
340
+ "grad_norm": 1.4992170333862305,
341
+ "learning_rate": 2.6317110266159697e-05,
342
+ "loss": 0.951,
343
+ "step": 20000
344
+ },
345
+ {
346
+ "epoch": 1.8771604514341964,
347
+ "grad_norm": 3.0142011642456055,
348
+ "learning_rate": 2.5936882129277566e-05,
349
+ "loss": 0.9677,
350
+ "step": 20500
351
+ },
352
+ {
353
+ "epoch": 1.922944852688689,
354
+ "grad_norm": 1.9342625141143799,
355
+ "learning_rate": 2.5556653992395443e-05,
356
+ "loss": 0.9656,
357
+ "step": 21000
358
+ },
359
+ {
360
+ "epoch": 1.922944852688689,
361
+ "eval_loss": 0.3123963475227356,
362
+ "eval_runtime": 653.0667,
363
+ "eval_samples_per_second": 10.033,
364
+ "eval_steps_per_second": 10.033,
365
+ "eval_wer": 0.15938233272498875,
366
+ "step": 21000
367
+ },
368
+ {
369
+ "epoch": 1.9687292539431815,
370
+ "grad_norm": 5.628562927246094,
371
+ "learning_rate": 2.5177186311787077e-05,
372
+ "loss": 0.9511,
373
+ "step": 21500
374
+ },
375
+ {
376
+ "epoch": 2.0145136551976743,
377
+ "grad_norm": 2.930298089981079,
378
+ "learning_rate": 2.4796958174904946e-05,
379
+ "loss": 0.918,
380
+ "step": 22000
381
+ },
382
+ {
383
+ "epoch": 2.060298056452167,
384
+ "grad_norm": 1.2405227422714233,
385
+ "learning_rate": 2.4416730038022816e-05,
386
+ "loss": 0.9372,
387
+ "step": 22500
388
+ },
389
+ {
390
+ "epoch": 2.1060824577066595,
391
+ "grad_norm": 2.1558220386505127,
392
+ "learning_rate": 2.4036501901140686e-05,
393
+ "loss": 0.9673,
394
+ "step": 23000
395
+ },
396
+ {
397
+ "epoch": 2.151866858961152,
398
+ "grad_norm": 1.4096624851226807,
399
+ "learning_rate": 2.365703422053232e-05,
400
+ "loss": 0.9826,
401
+ "step": 23500
402
+ },
403
+ {
404
+ "epoch": 2.1976512602156446,
405
+ "grad_norm": 0.9921212196350098,
406
+ "learning_rate": 2.3276806083650192e-05,
407
+ "loss": 0.9261,
408
+ "step": 24000
409
+ },
410
+ {
411
+ "epoch": 2.1976512602156446,
412
+ "eval_loss": 0.30950331687927246,
413
+ "eval_runtime": 651.0611,
414
+ "eval_samples_per_second": 10.064,
415
+ "eval_steps_per_second": 10.064,
416
+ "eval_wer": 0.15439904244981156,
417
+ "step": 24000
418
+ },
419
+ {
420
+ "epoch": 2.243435661470137,
421
+ "grad_norm": 3.321803331375122,
422
+ "learning_rate": 2.2896577946768065e-05,
423
+ "loss": 0.8988,
424
+ "step": 24500
425
+ },
426
+ {
427
+ "epoch": 2.2892200627246297,
428
+ "grad_norm": 1.4012964963912964,
429
+ "learning_rate": 2.2516349809885935e-05,
430
+ "loss": 0.8717,
431
+ "step": 25000
432
+ },
433
+ {
434
+ "epoch": 2.3350044639791223,
435
+ "grad_norm": 1.9718306064605713,
436
+ "learning_rate": 2.2136121673003805e-05,
437
+ "loss": 0.9545,
438
+ "step": 25500
439
+ },
440
+ {
441
+ "epoch": 2.380788865233615,
442
+ "grad_norm": 2.5009524822235107,
443
+ "learning_rate": 2.1755893536121674e-05,
444
+ "loss": 0.9311,
445
+ "step": 26000
446
+ },
447
+ {
448
+ "epoch": 2.4265732664881074,
449
+ "grad_norm": 1.2410893440246582,
450
+ "learning_rate": 2.137642585551331e-05,
451
+ "loss": 0.8701,
452
+ "step": 26500
453
+ },
454
+ {
455
+ "epoch": 2.4723576677426,
456
+ "grad_norm": 1.5738617181777954,
457
+ "learning_rate": 2.099619771863118e-05,
458
+ "loss": 0.8938,
459
+ "step": 27000
460
+ },
461
+ {
462
+ "epoch": 2.4723576677426,
463
+ "eval_loss": 0.26886311173439026,
464
+ "eval_runtime": 654.1765,
465
+ "eval_samples_per_second": 10.016,
466
+ "eval_steps_per_second": 10.016,
467
+ "eval_wer": 0.15074304946552583,
468
+ "step": 27000
469
+ },
470
+ {
471
+ "epoch": 2.5181420689970926,
472
+ "grad_norm": 2.2281384468078613,
473
+ "learning_rate": 2.061596958174905e-05,
474
+ "loss": 0.8609,
475
+ "step": 27500
476
+ },
477
+ {
478
+ "epoch": 2.563926470251585,
479
+ "grad_norm": 2.347487449645996,
480
+ "learning_rate": 2.023574144486692e-05,
481
+ "loss": 0.9237,
482
+ "step": 28000
483
+ },
484
+ {
485
+ "epoch": 2.6097108715060777,
486
+ "grad_norm": 1.710715413093567,
487
+ "learning_rate": 1.9855513307984794e-05,
488
+ "loss": 0.8778,
489
+ "step": 28500
490
+ },
491
+ {
492
+ "epoch": 2.6554952727605707,
493
+ "grad_norm": 9.36032772064209,
494
+ "learning_rate": 1.947680608365019e-05,
495
+ "loss": 0.9092,
496
+ "step": 29000
497
+ },
498
+ {
499
+ "epoch": 2.701279674015063,
500
+ "grad_norm": 1.8656507730484009,
501
+ "learning_rate": 1.909657794676806e-05,
502
+ "loss": 0.8829,
503
+ "step": 29500
504
+ },
505
+ {
506
+ "epoch": 2.747064075269556,
507
+ "grad_norm": 0.8339200019836426,
508
+ "learning_rate": 1.8716349809885934e-05,
509
+ "loss": 0.8979,
510
+ "step": 30000
511
+ },
512
+ {
513
+ "epoch": 2.747064075269556,
514
+ "eval_loss": 0.2853592336177826,
515
+ "eval_runtime": 652.9254,
516
+ "eval_samples_per_second": 10.035,
517
+ "eval_steps_per_second": 10.035,
518
+ "eval_wer": 0.14718778886492379,
519
+ "step": 30000
520
+ },
521
+ {
522
+ "epoch": 2.792848476524048,
523
+ "grad_norm": 1.8189595937728882,
524
+ "learning_rate": 1.8336121673003804e-05,
525
+ "loss": 0.9216,
526
+ "step": 30500
527
+ },
528
+ {
529
+ "epoch": 2.838632877778541,
530
+ "grad_norm": 1.2487553358078003,
531
+ "learning_rate": 1.7955893536121673e-05,
532
+ "loss": 0.8714,
533
+ "step": 31000
534
+ },
535
+ {
536
+ "epoch": 2.8844172790330336,
537
+ "grad_norm": 2.8581480979919434,
538
+ "learning_rate": 1.757642585551331e-05,
539
+ "loss": 0.8675,
540
+ "step": 31500
541
+ },
542
+ {
543
+ "epoch": 2.930201680287526,
544
+ "grad_norm": 1.5087124109268188,
545
+ "learning_rate": 1.719619771863118e-05,
546
+ "loss": 0.9003,
547
+ "step": 32000
548
+ },
549
+ {
550
+ "epoch": 2.9759860815420187,
551
+ "grad_norm": 2.1860768795013428,
552
+ "learning_rate": 1.6816730038022814e-05,
553
+ "loss": 0.906,
554
+ "step": 32500
555
+ },
556
+ {
557
+ "epoch": 3.0217704827965113,
558
+ "grad_norm": 2.0595364570617676,
559
+ "learning_rate": 1.6436501901140687e-05,
560
+ "loss": 0.8624,
561
+ "step": 33000
562
+ },
563
+ {
564
+ "epoch": 3.0217704827965113,
565
+ "eval_loss": 0.282227486371994,
566
+ "eval_runtime": 652.2893,
567
+ "eval_samples_per_second": 10.045,
568
+ "eval_steps_per_second": 10.045,
569
+ "eval_wer": 0.14017800004740347,
570
+ "step": 33000
571
+ },
572
+ {
573
+ "epoch": 3.067554884051004,
574
+ "grad_norm": 2.31689453125,
575
+ "learning_rate": 1.6056273764258557e-05,
576
+ "loss": 0.8498,
577
+ "step": 33500
578
+ },
579
+ {
580
+ "epoch": 3.1133392853054964,
581
+ "grad_norm": 1.297116756439209,
582
+ "learning_rate": 1.5676045627376426e-05,
583
+ "loss": 0.8301,
584
+ "step": 34000
585
+ },
586
+ {
587
+ "epoch": 3.159123686559989,
588
+ "grad_norm": 1.9447718858718872,
589
+ "learning_rate": 1.52958174904943e-05,
590
+ "loss": 0.8445,
591
+ "step": 34500
592
+ },
593
+ {
594
+ "epoch": 3.2049080878144816,
595
+ "grad_norm": 1.8142869472503662,
596
+ "learning_rate": 1.4915589353612167e-05,
597
+ "loss": 0.8988,
598
+ "step": 35000
599
+ },
600
+ {
601
+ "epoch": 3.250692489068974,
602
+ "grad_norm": 26.81559944152832,
603
+ "learning_rate": 1.4535361216730039e-05,
604
+ "loss": 0.8358,
605
+ "step": 35500
606
+ },
607
+ {
608
+ "epoch": 3.2964768903234667,
609
+ "grad_norm": 1.4768437147140503,
610
+ "learning_rate": 1.4155133079847908e-05,
611
+ "loss": 0.8269,
612
+ "step": 36000
613
+ },
614
+ {
615
+ "epoch": 3.2964768903234667,
616
+ "eval_loss": 0.26364651322364807,
617
+ "eval_runtime": 653.8923,
618
+ "eval_samples_per_second": 10.02,
619
+ "eval_steps_per_second": 10.02,
620
+ "eval_wer": 0.13765376502097604,
621
+ "step": 36000
622
+ },
623
+ {
624
+ "epoch": 3.3422612915779593,
625
+ "grad_norm": 3.271960496902466,
626
+ "learning_rate": 1.3774904942965781e-05,
627
+ "loss": 0.8389,
628
+ "step": 36500
629
+ },
630
+ {
631
+ "epoch": 3.388045692832452,
632
+ "grad_norm": 1.8337602615356445,
633
+ "learning_rate": 1.3395437262357415e-05,
634
+ "loss": 0.8295,
635
+ "step": 37000
636
+ },
637
+ {
638
+ "epoch": 3.4338300940869444,
639
+ "grad_norm": 2.3357372283935547,
640
+ "learning_rate": 1.301596958174905e-05,
641
+ "loss": 0.8646,
642
+ "step": 37500
643
+ },
644
+ {
645
+ "epoch": 3.479614495341437,
646
+ "grad_norm": 3.577268600463867,
647
+ "learning_rate": 1.2636501901140685e-05,
648
+ "loss": 0.844,
649
+ "step": 38000
650
+ },
651
+ {
652
+ "epoch": 3.52539889659593,
653
+ "grad_norm": 3.0567266941070557,
654
+ "learning_rate": 1.2256273764258558e-05,
655
+ "loss": 0.8212,
656
+ "step": 38500
657
+ },
658
+ {
659
+ "epoch": 3.571183297850422,
660
+ "grad_norm": 5.759117126464844,
661
+ "learning_rate": 1.1876045627376427e-05,
662
+ "loss": 0.85,
663
+ "step": 39000
664
+ },
665
+ {
666
+ "epoch": 3.571183297850422,
667
+ "eval_loss": 0.24618284404277802,
668
+ "eval_runtime": 648.8718,
669
+ "eval_samples_per_second": 10.098,
670
+ "eval_steps_per_second": 10.098,
671
+ "eval_wer": 0.13248678628143443,
672
+ "step": 39000
673
+ },
674
+ {
675
+ "epoch": 3.616967699104915,
676
+ "grad_norm": 1.7661995887756348,
677
+ "learning_rate": 1.1495817490494297e-05,
678
+ "loss": 0.8213,
679
+ "step": 39500
680
+ },
681
+ {
682
+ "epoch": 3.6627521003594077,
683
+ "grad_norm": 2.657759428024292,
684
+ "learning_rate": 1.1115589353612168e-05,
685
+ "loss": 0.8379,
686
+ "step": 40000
687
+ },
688
+ {
689
+ "epoch": 3.7085365016139002,
690
+ "grad_norm": 2.6153953075408936,
691
+ "learning_rate": 1.0735361216730038e-05,
692
+ "loss": 0.807,
693
+ "step": 40500
694
+ },
695
+ {
696
+ "epoch": 3.754320902868393,
697
+ "grad_norm": 4.843667507171631,
698
+ "learning_rate": 1.035513307984791e-05,
699
+ "loss": 0.8265,
700
+ "step": 41000
701
+ },
702
+ {
703
+ "epoch": 3.8001053041228854,
704
+ "grad_norm": 3.4964911937713623,
705
+ "learning_rate": 9.97490494296578e-06,
706
+ "loss": 0.85,
707
+ "step": 41500
708
+ },
709
+ {
710
+ "epoch": 3.845889705377378,
711
+ "grad_norm": 2.8312175273895264,
712
+ "learning_rate": 9.594676806083652e-06,
713
+ "loss": 0.8557,
714
+ "step": 42000
715
+ },
716
+ {
717
+ "epoch": 3.845889705377378,
718
+ "eval_loss": 0.24823951721191406,
719
+ "eval_runtime": 641.5128,
720
+ "eval_samples_per_second": 10.213,
721
+ "eval_steps_per_second": 10.213,
722
+ "eval_wer": 0.12771088620795903,
723
+ "step": 42000
724
+ },
725
+ {
726
+ "epoch": 3.8916741066318705,
727
+ "grad_norm": 2.185664653778076,
728
+ "learning_rate": 9.214448669201521e-06,
729
+ "loss": 0.8157,
730
+ "step": 42500
731
+ },
732
+ {
733
+ "epoch": 3.937458507886363,
734
+ "grad_norm": 3.490175724029541,
735
+ "learning_rate": 8.834220532319391e-06,
736
+ "loss": 0.8202,
737
+ "step": 43000
738
+ },
739
+ {
740
+ "epoch": 3.9832429091408557,
741
+ "grad_norm": 1.9320560693740845,
742
+ "learning_rate": 8.453992395437262e-06,
743
+ "loss": 0.8344,
744
+ "step": 43500
745
+ },
746
+ {
747
+ "epoch": 4.029027310395349,
748
+ "grad_norm": 1.6495819091796875,
749
+ "learning_rate": 8.073764258555134e-06,
750
+ "loss": 0.8167,
751
+ "step": 44000
752
+ },
753
+ {
754
+ "epoch": 4.074811711649841,
755
+ "grad_norm": 5.037049770355225,
756
+ "learning_rate": 7.694296577946768e-06,
757
+ "loss": 0.8033,
758
+ "step": 44500
759
+ },
760
+ {
761
+ "epoch": 4.120596112904334,
762
+ "grad_norm": 2.1924211978912354,
763
+ "learning_rate": 7.314828897338404e-06,
764
+ "loss": 0.8177,
765
+ "step": 45000
766
+ },
767
+ {
768
+ "epoch": 4.120596112904334,
769
+ "eval_loss": 0.2399299442768097,
770
+ "eval_runtime": 647.8262,
771
+ "eval_samples_per_second": 10.114,
772
+ "eval_steps_per_second": 10.114,
773
+ "eval_wer": 0.12562513332227251,
774
+ "step": 45000
775
+ },
776
+ {
777
+ "epoch": 4.166380514158826,
778
+ "grad_norm": 0.6167405843734741,
779
+ "learning_rate": 6.9346007604562745e-06,
780
+ "loss": 0.8205,
781
+ "step": 45500
782
+ },
783
+ {
784
+ "epoch": 4.212164915413319,
785
+ "grad_norm": 1.5424410104751587,
786
+ "learning_rate": 6.554372623574144e-06,
787
+ "loss": 0.8096,
788
+ "step": 46000
789
+ },
790
+ {
791
+ "epoch": 4.257949316667811,
792
+ "grad_norm": 3.1014537811279297,
793
+ "learning_rate": 6.1741444866920154e-06,
794
+ "loss": 0.7904,
795
+ "step": 46500
796
+ },
797
+ {
798
+ "epoch": 4.303733717922304,
799
+ "grad_norm": 1.1060818433761597,
800
+ "learning_rate": 5.794676806083651e-06,
801
+ "loss": 0.7818,
802
+ "step": 47000
803
+ },
804
+ {
805
+ "epoch": 4.349518119176796,
806
+ "grad_norm": 1.345553994178772,
807
+ "learning_rate": 5.414448669201522e-06,
808
+ "loss": 0.8144,
809
+ "step": 47500
810
+ },
811
+ {
812
+ "epoch": 4.395302520431289,
813
+ "grad_norm": 2.172055721282959,
814
+ "learning_rate": 5.034220532319392e-06,
815
+ "loss": 0.7933,
816
+ "step": 48000
817
+ },
818
+ {
819
+ "epoch": 4.395302520431289,
820
+ "eval_loss": 0.2388318032026291,
821
+ "eval_runtime": 640.8224,
822
+ "eval_samples_per_second": 10.224,
823
+ "eval_steps_per_second": 10.224,
824
+ "eval_wer": 0.12215282880235122,
825
+ "step": 48000
826
+ },
827
+ {
828
+ "epoch": 4.441086921685781,
829
+ "grad_norm": 3.54559063911438,
830
+ "learning_rate": 4.653992395437263e-06,
831
+ "loss": 0.814,
832
+ "step": 48500
833
+ },
834
+ {
835
+ "epoch": 4.486871322940274,
836
+ "grad_norm": 3.8439674377441406,
837
+ "learning_rate": 4.273764258555134e-06,
838
+ "loss": 0.8641,
839
+ "step": 49000
840
+ },
841
+ {
842
+ "epoch": 4.5326557241947665,
843
+ "grad_norm": 1.967161774635315,
844
+ "learning_rate": 3.893536121673004e-06,
845
+ "loss": 0.7962,
846
+ "step": 49500
847
+ },
848
+ {
849
+ "epoch": 4.5784401254492595,
850
+ "grad_norm": 1.7992150783538818,
851
+ "learning_rate": 3.5133079847908747e-06,
852
+ "loss": 0.7871,
853
+ "step": 50000
854
+ },
855
+ {
856
+ "epoch": 4.6242245267037525,
857
+ "grad_norm": 1.4352868795394897,
858
+ "learning_rate": 3.1330798479087456e-06,
859
+ "loss": 0.839,
860
+ "step": 50500
861
+ },
862
+ {
863
+ "epoch": 4.670008927958245,
864
+ "grad_norm": 1.4436633586883545,
865
+ "learning_rate": 2.7536121673003807e-06,
866
+ "loss": 0.7864,
867
+ "step": 51000
868
+ },
869
+ {
870
+ "epoch": 4.670008927958245,
871
+ "eval_loss": 0.237684428691864,
872
+ "eval_runtime": 642.8597,
873
+ "eval_samples_per_second": 10.192,
874
+ "eval_steps_per_second": 10.192,
875
+ "eval_wer": 0.12045815458273092,
876
+ "step": 51000
877
+ },
878
+ {
879
+ "epoch": 4.715793329212737,
880
+ "grad_norm": 6.0789055824279785,
881
+ "learning_rate": 2.373384030418251e-06,
882
+ "loss": 0.7787,
883
+ "step": 51500
884
+ },
885
+ {
886
+ "epoch": 4.76157773046723,
887
+ "grad_norm": 3.2274794578552246,
888
+ "learning_rate": 1.993916349809886e-06,
889
+ "loss": 0.8234,
890
+ "step": 52000
891
+ },
892
+ {
893
+ "epoch": 4.807362131721723,
894
+ "grad_norm": 2.8651950359344482,
895
+ "learning_rate": 1.6136882129277568e-06,
896
+ "loss": 0.8,
897
+ "step": 52500
898
+ },
899
+ {
900
+ "epoch": 4.853146532976215,
901
+ "grad_norm": 1.4273629188537598,
902
+ "learning_rate": 1.2334600760456275e-06,
903
+ "loss": 0.7861,
904
+ "step": 53000
905
+ },
906
+ {
907
+ "epoch": 4.898930934230708,
908
+ "grad_norm": 1.4547092914581299,
909
+ "learning_rate": 8.532319391634982e-07,
910
+ "loss": 0.7801,
911
+ "step": 53500
912
+ },
913
+ {
914
+ "epoch": 4.9447153354852,
915
+ "grad_norm": 2.2551023960113525,
916
+ "learning_rate": 4.737642585551331e-07,
917
+ "loss": 0.8371,
918
+ "step": 54000
919
+ },
920
+ {
921
+ "epoch": 4.9447153354852,
922
+ "eval_loss": 0.23816268146038055,
923
+ "eval_runtime": 641.5849,
924
+ "eval_samples_per_second": 10.212,
925
+ "eval_steps_per_second": 10.212,
926
+ "eval_wer": 0.11996634353298097,
927
+ "step": 54000
928
+ },
929
+ {
930
+ "epoch": 4.990499736739693,
931
+ "grad_norm": 1.8520026206970215,
932
+ "learning_rate": 9.353612167300382e-08,
933
+ "loss": 0.7373,
934
+ "step": 54500
935
+ },
936
+ {
937
+ "epoch": 5.036627521003594,
938
+ "grad_norm": 5.179400444030762,
939
+ "learning_rate": 2.0232462686567166e-05,
940
+ "loss": 0.836,
941
+ "step": 55000
942
+ },
943
+ {
944
+ "epoch": 5.082411922258086,
945
+ "grad_norm": 7.242797374725342,
946
+ "learning_rate": 2.004589552238806e-05,
947
+ "loss": 0.8493,
948
+ "step": 55500
949
+ },
950
+ {
951
+ "epoch": 5.128196323512579,
952
+ "grad_norm": 3.6222658157348633,
953
+ "learning_rate": 1.9859328358208957e-05,
954
+ "loss": 0.8105,
955
+ "step": 56000
956
+ },
957
+ {
958
+ "epoch": 5.1739807247670715,
959
+ "grad_norm": 2.839559316635132,
960
+ "learning_rate": 1.9672761194029853e-05,
961
+ "loss": 0.8075,
962
+ "step": 56500
963
+ },
964
+ {
965
+ "epoch": 5.2197651260215645,
966
+ "grad_norm": 2.1728529930114746,
967
+ "learning_rate": 1.9486940298507463e-05,
968
+ "loss": 0.7846,
969
+ "step": 57000
970
+ },
971
+ {
972
+ "epoch": 5.2197651260215645,
973
+ "eval_loss": 0.24261941015720367,
974
+ "eval_runtime": 650.6489,
975
+ "eval_samples_per_second": 10.07,
976
+ "eval_steps_per_second": 10.07,
977
+ "eval_wer": 0.13190609371666942,
978
+ "step": 57000
979
+ },
980
+ {
981
+ "epoch": 5.2655495272760575,
982
+ "grad_norm": 1.945101022720337,
983
+ "learning_rate": 1.9300373134328362e-05,
984
+ "loss": 0.8194,
985
+ "step": 57500
986
+ },
987
+ {
988
+ "epoch": 5.31133392853055,
989
+ "grad_norm": 1.497315764427185,
990
+ "learning_rate": 1.9113805970149254e-05,
991
+ "loss": 0.8367,
992
+ "step": 58000
993
+ },
994
+ {
995
+ "epoch": 5.357118329785042,
996
+ "grad_norm": 3.0750606060028076,
997
+ "learning_rate": 1.8927238805970153e-05,
998
+ "loss": 0.8442,
999
+ "step": 58500
1000
+ },
1001
+ {
1002
+ "epoch": 5.402902731039535,
1003
+ "grad_norm": 2.4695701599121094,
1004
+ "learning_rate": 1.8740671641791046e-05,
1005
+ "loss": 0.8124,
1006
+ "step": 59000
1007
+ },
1008
+ {
1009
+ "epoch": 5.448687132294028,
1010
+ "grad_norm": 1.8706340789794922,
1011
+ "learning_rate": 1.85544776119403e-05,
1012
+ "loss": 0.775,
1013
+ "step": 59500
1014
+ },
1015
+ {
1016
+ "epoch": 5.49447153354852,
1017
+ "grad_norm": 5.028220176696777,
1018
+ "learning_rate": 1.8367910447761194e-05,
1019
+ "loss": 0.808,
1020
+ "step": 60000
1021
+ },
1022
+ {
1023
+ "epoch": 5.49447153354852,
1024
+ "eval_loss": 0.24327826499938965,
1025
+ "eval_runtime": 650.6547,
1026
+ "eval_samples_per_second": 10.07,
1027
+ "eval_steps_per_second": 10.07,
1028
+ "eval_wer": 0.13021734493138348,
1029
+ "step": 60000
1030
+ },
1031
+ {
1032
+ "epoch": 5.540255934803013,
1033
+ "grad_norm": 1.9498703479766846,
1034
+ "learning_rate": 1.818134328358209e-05,
1035
+ "loss": 0.7813,
1036
+ "step": 60500
1037
+ },
1038
+ {
1039
+ "epoch": 5.586040336057505,
1040
+ "grad_norm": 3.7338151931762695,
1041
+ "learning_rate": 1.7994776119402986e-05,
1042
+ "loss": 0.7698,
1043
+ "step": 61000
1044
+ },
1045
+ {
1046
+ "epoch": 5.631824737311998,
1047
+ "grad_norm": 3.364290475845337,
1048
+ "learning_rate": 1.780820895522388e-05,
1049
+ "loss": 0.7783,
1050
+ "step": 61500
1051
+ },
1052
+ {
1053
+ "epoch": 5.67760913856649,
1054
+ "grad_norm": 3.4009501934051514,
1055
+ "learning_rate": 1.7622014925373137e-05,
1056
+ "loss": 0.7652,
1057
+ "step": 62000
1058
+ },
1059
+ {
1060
+ "epoch": 5.723393539820983,
1061
+ "grad_norm": 2.9030606746673584,
1062
+ "learning_rate": 1.743544776119403e-05,
1063
+ "loss": 0.7946,
1064
+ "step": 62500
1065
+ },
1066
+ {
1067
+ "epoch": 5.769177941075475,
1068
+ "grad_norm": 1.3028395175933838,
1069
+ "learning_rate": 1.724888059701493e-05,
1070
+ "loss": 0.7456,
1071
+ "step": 63000
1072
+ },
1073
+ {
1074
+ "epoch": 5.769177941075475,
1075
+ "eval_loss": 0.2477836161851883,
1076
+ "eval_runtime": 648.6941,
1077
+ "eval_samples_per_second": 10.1,
1078
+ "eval_steps_per_second": 10.1,
1079
+ "eval_wer": 0.1254769974639141,
1080
+ "step": 63000
1081
+ },
1082
+ {
1083
+ "epoch": 5.814962342329968,
1084
+ "grad_norm": 1.8734581470489502,
1085
+ "learning_rate": 1.706231343283582e-05,
1086
+ "loss": 0.7893,
1087
+ "step": 63500
1088
+ },
1089
+ {
1090
+ "epoch": 5.8607467435844605,
1091
+ "grad_norm": 2.084261178970337,
1092
+ "learning_rate": 1.6876119402985077e-05,
1093
+ "loss": 0.7331,
1094
+ "step": 64000
1095
+ },
1096
+ {
1097
+ "epoch": 5.9065311448389535,
1098
+ "grad_norm": 2.031810760498047,
1099
+ "learning_rate": 1.668955223880597e-05,
1100
+ "loss": 0.747,
1101
+ "step": 64500
1102
+ },
1103
+ {
1104
+ "epoch": 5.952315546093446,
1105
+ "grad_norm": 4.709017276763916,
1106
+ "learning_rate": 1.650298507462687e-05,
1107
+ "loss": 0.7473,
1108
+ "step": 65000
1109
+ },
1110
+ {
1111
+ "epoch": 5.998099947347939,
1112
+ "grad_norm": 3.2480199337005615,
1113
+ "learning_rate": 1.631641791044776e-05,
1114
+ "loss": 0.7195,
1115
+ "step": 65500
1116
+ },
1117
+ {
1118
+ "epoch": 6.043884348602431,
1119
+ "grad_norm": 2.1403627395629883,
1120
+ "learning_rate": 1.6129850746268657e-05,
1121
+ "loss": 0.7217,
1122
+ "step": 66000
1123
+ },
1124
+ {
1125
+ "epoch": 6.043884348602431,
1126
+ "eval_loss": 0.24559348821640015,
1127
+ "eval_runtime": 650.412,
1128
+ "eval_samples_per_second": 10.074,
1129
+ "eval_steps_per_second": 10.074,
1130
+ "eval_wer": 0.12621175132137186,
1131
+ "step": 66000
1132
+ },
1133
+ {
1134
+ "epoch": 6.089668749856924,
1135
+ "grad_norm": 2.90395188331604,
1136
+ "learning_rate": 1.5943283582089553e-05,
1137
+ "loss": 0.7285,
1138
+ "step": 66500
1139
+ },
1140
+ {
1141
+ "epoch": 6.135453151111417,
1142
+ "grad_norm": 2.9291510581970215,
1143
+ "learning_rate": 1.5757089552238805e-05,
1144
+ "loss": 0.7324,
1145
+ "step": 67000
1146
+ },
1147
+ {
1148
+ "epoch": 6.181237552365909,
1149
+ "grad_norm": 12.322357177734375,
1150
+ "learning_rate": 1.5570522388059705e-05,
1151
+ "loss": 0.7198,
1152
+ "step": 67500
1153
+ },
1154
+ {
1155
+ "epoch": 6.227021953620402,
1156
+ "grad_norm": 4.177962303161621,
1157
+ "learning_rate": 1.5383955223880597e-05,
1158
+ "loss": 0.7206,
1159
+ "step": 68000
1160
+ },
1161
+ {
1162
+ "epoch": 6.272806354874894,
1163
+ "grad_norm": 1.2690945863723755,
1164
+ "learning_rate": 1.5197388059701494e-05,
1165
+ "loss": 0.729,
1166
+ "step": 68500
1167
+ },
1168
+ {
1169
+ "epoch": 6.318590756129387,
1170
+ "grad_norm": 1.8948352336883545,
1171
+ "learning_rate": 1.5010820895522389e-05,
1172
+ "loss": 0.7115,
1173
+ "step": 69000
1174
+ },
1175
+ {
1176
+ "epoch": 6.318590756129387,
1177
+ "eval_loss": 0.2540062367916107,
1178
+ "eval_runtime": 648.0874,
1179
+ "eval_samples_per_second": 10.11,
1180
+ "eval_steps_per_second": 10.11,
1181
+ "eval_wer": 0.12306534568983907,
1182
+ "step": 69000
1183
+ },
1184
+ {
1185
+ "epoch": 6.364375157383879,
1186
+ "grad_norm": 2.9900403022766113,
1187
+ "learning_rate": 1.4824253731343286e-05,
1188
+ "loss": 0.7681,
1189
+ "step": 69500
1190
+ },
1191
+ {
1192
+ "epoch": 6.410159558638372,
1193
+ "grad_norm": 3.5702733993530273,
1194
+ "learning_rate": 1.4638059701492537e-05,
1195
+ "loss": 0.7142,
1196
+ "step": 70000
1197
+ },
1198
+ {
1199
+ "epoch": 6.455943959892864,
1200
+ "grad_norm": 2.3290789127349854,
1201
+ "learning_rate": 1.4451492537313434e-05,
1202
+ "loss": 0.71,
1203
+ "step": 70500
1204
+ },
1205
+ {
1206
+ "epoch": 6.501728361147357,
1207
+ "grad_norm": 6.307437896728516,
1208
+ "learning_rate": 1.4264925373134328e-05,
1209
+ "loss": 0.6944,
1210
+ "step": 71000
1211
+ },
1212
+ {
1213
+ "epoch": 6.547512762401849,
1214
+ "grad_norm": 5.269557952880859,
1215
+ "learning_rate": 1.4078358208955226e-05,
1216
+ "loss": 0.7638,
1217
+ "step": 71500
1218
+ },
1219
+ {
1220
+ "epoch": 6.593297163656342,
1221
+ "grad_norm": 1.6152143478393555,
1222
+ "learning_rate": 1.389179104477612e-05,
1223
+ "loss": 0.6701,
1224
+ "step": 72000
1225
+ },
1226
+ {
1227
+ "epoch": 6.593297163656342,
1228
+ "eval_loss": 0.2436157912015915,
1229
+ "eval_runtime": 649.7342,
1230
+ "eval_samples_per_second": 10.084,
1231
+ "eval_steps_per_second": 10.084,
1232
+ "eval_wer": 0.12396008627432391,
1233
+ "step": 72000
1234
+ },
1235
+ {
1236
+ "epoch": 6.639081564910835,
1237
+ "grad_norm": 2.4076077938079834,
1238
+ "learning_rate": 1.3705223880597016e-05,
1239
+ "loss": 0.7058,
1240
+ "step": 72500
1241
+ },
1242
+ {
1243
+ "epoch": 6.684865966165328,
1244
+ "grad_norm": 3.9522953033447266,
1245
+ "learning_rate": 1.351865671641791e-05,
1246
+ "loss": 0.6958,
1247
+ "step": 73000
1248
+ },
1249
+ {
1250
+ "epoch": 6.73065036741982,
1251
+ "grad_norm": 1.8242387771606445,
1252
+ "learning_rate": 1.3332089552238807e-05,
1253
+ "loss": 0.7166,
1254
+ "step": 73500
1255
+ },
1256
+ {
1257
+ "epoch": 6.776434768674313,
1258
+ "grad_norm": 4.560197353363037,
1259
+ "learning_rate": 1.3145522388059701e-05,
1260
+ "loss": 0.6946,
1261
+ "step": 74000
1262
+ },
1263
+ {
1264
+ "epoch": 6.822219169928806,
1265
+ "grad_norm": 2.9841842651367188,
1266
+ "learning_rate": 1.2958955223880599e-05,
1267
+ "loss": 0.7179,
1268
+ "step": 74500
1269
+ },
1270
+ {
1271
+ "epoch": 6.868003571183298,
1272
+ "grad_norm": 1.7246359586715698,
1273
+ "learning_rate": 1.2772761194029852e-05,
1274
+ "loss": 0.6966,
1275
+ "step": 75000
1276
+ },
1277
+ {
1278
+ "epoch": 6.868003571183298,
1279
+ "eval_loss": 0.23714399337768555,
1280
+ "eval_runtime": 653.4649,
1281
+ "eval_samples_per_second": 10.027,
1282
+ "eval_steps_per_second": 10.027,
1283
+ "eval_wer": 0.12096774193548387,
1284
+ "step": 75000
1285
+ },
1286
+ {
1287
+ "epoch": 6.91378797243779,
1288
+ "grad_norm": 3.9530229568481445,
1289
+ "learning_rate": 1.2586194029850747e-05,
1290
+ "loss": 0.6542,
1291
+ "step": 75500
1292
+ },
1293
+ {
1294
+ "epoch": 6.959572373692283,
1295
+ "grad_norm": 4.2303690910339355,
1296
+ "learning_rate": 1.2399626865671643e-05,
1297
+ "loss": 0.683,
1298
+ "step": 76000
1299
+ },
1300
+ {
1301
+ "epoch": 7.005356774946776,
1302
+ "grad_norm": 1.5437530279159546,
1303
+ "learning_rate": 1.2213059701492537e-05,
1304
+ "loss": 0.6415,
1305
+ "step": 76500
1306
+ },
1307
+ {
1308
+ "epoch": 7.051141176201268,
1309
+ "grad_norm": 2.246727466583252,
1310
+ "learning_rate": 1.2026492537313435e-05,
1311
+ "loss": 0.6831,
1312
+ "step": 77000
1313
+ },
1314
+ {
1315
+ "epoch": 7.096925577455761,
1316
+ "grad_norm": 5.2752580642700195,
1317
+ "learning_rate": 1.1839925373134329e-05,
1318
+ "loss": 0.6666,
1319
+ "step": 77500
1320
+ },
1321
+ {
1322
+ "epoch": 7.142709978710253,
1323
+ "grad_norm": 1.453157663345337,
1324
+ "learning_rate": 1.1653358208955226e-05,
1325
+ "loss": 0.7056,
1326
+ "step": 78000
1327
+ },
1328
+ {
1329
+ "epoch": 7.142709978710253,
1330
+ "eval_loss": 0.23014602065086365,
1331
+ "eval_runtime": 648.8331,
1332
+ "eval_samples_per_second": 10.098,
1333
+ "eval_steps_per_second": 10.098,
1334
+ "eval_wer": 0.11875755492877628,
1335
+ "step": 78000
1336
+ },
1337
+ {
1338
+ "epoch": 7.188494379964746,
1339
+ "grad_norm": 1.6704216003417969,
1340
+ "learning_rate": 1.1467164179104479e-05,
1341
+ "loss": 0.7214,
1342
+ "step": 78500
1343
+ },
1344
+ {
1345
+ "epoch": 7.234278781219238,
1346
+ "grad_norm": 2.2454934120178223,
1347
+ "learning_rate": 1.1280597014925375e-05,
1348
+ "loss": 0.6921,
1349
+ "step": 79000
1350
+ },
1351
+ {
1352
+ "epoch": 7.280063182473731,
1353
+ "grad_norm": 0.8851762413978577,
1354
+ "learning_rate": 1.1094029850746269e-05,
1355
+ "loss": 0.6605,
1356
+ "step": 79500
1357
+ },
1358
+ {
1359
+ "epoch": 7.3258475837282235,
1360
+ "grad_norm": 2.482797861099243,
1361
+ "learning_rate": 1.0907462686567165e-05,
1362
+ "loss": 0.6383,
1363
+ "step": 80000
1364
+ },
1365
+ {
1366
+ "epoch": 7.3716319849827165,
1367
+ "grad_norm": 2.4076988697052,
1368
+ "learning_rate": 1.072089552238806e-05,
1369
+ "loss": 0.6638,
1370
+ "step": 80500
1371
+ },
1372
+ {
1373
+ "epoch": 7.417416386237209,
1374
+ "grad_norm": 2.0605874061584473,
1375
+ "learning_rate": 1.0534328358208956e-05,
1376
+ "loss": 0.6654,
1377
+ "step": 81000
1378
+ },
1379
+ {
1380
+ "epoch": 7.417416386237209,
1381
+ "eval_loss": 0.24034035205841064,
1382
+ "eval_runtime": 650.4954,
1383
+ "eval_samples_per_second": 10.072,
1384
+ "eval_steps_per_second": 10.072,
1385
+ "eval_wer": 0.11891754165580337,
1386
+ "step": 81000
1387
+ },
1388
+ {
1389
+ "epoch": 7.463200787491702,
1390
+ "grad_norm": 1.975950002670288,
1391
+ "learning_rate": 1.034776119402985e-05,
1392
+ "loss": 0.6744,
1393
+ "step": 81500
1394
+ },
1395
+ {
1396
+ "epoch": 7.508985188746194,
1397
+ "grad_norm": 1.52113676071167,
1398
+ "learning_rate": 1.0161940298507465e-05,
1399
+ "loss": 0.6832,
1400
+ "step": 82000
1401
+ },
1402
+ {
1403
+ "epoch": 7.554769590000687,
1404
+ "grad_norm": 2.152534246444702,
1405
+ "learning_rate": 9.975373134328359e-06,
1406
+ "loss": 0.6719,
1407
+ "step": 82500
1408
+ },
1409
+ {
1410
+ "epoch": 7.600553991255179,
1411
+ "grad_norm": 1.5796542167663574,
1412
+ "learning_rate": 9.788805970149254e-06,
1413
+ "loss": 0.6515,
1414
+ "step": 83000
1415
+ },
1416
+ {
1417
+ "epoch": 7.646338392509672,
1418
+ "grad_norm": 1.5929287672042847,
1419
+ "learning_rate": 9.60223880597015e-06,
1420
+ "loss": 0.666,
1421
+ "step": 83500
1422
+ },
1423
+ {
1424
+ "epoch": 7.692122793764165,
1425
+ "grad_norm": 1.7882949113845825,
1426
+ "learning_rate": 9.415671641791046e-06,
1427
+ "loss": 0.6339,
1428
+ "step": 84000
1429
+ },
1430
+ {
1431
+ "epoch": 7.692122793764165,
1432
+ "eval_loss": 0.23006904125213623,
1433
+ "eval_runtime": 649.7568,
1434
+ "eval_samples_per_second": 10.084,
1435
+ "eval_steps_per_second": 10.084,
1436
+ "eval_wer": 0.11488232087412008,
1437
+ "step": 84000
1438
+ },
1439
+ {
1440
+ "epoch": 7.737907195018657,
1441
+ "grad_norm": 1.4368743896484375,
1442
+ "learning_rate": 9.22910447761194e-06,
1443
+ "loss": 0.6861,
1444
+ "step": 84500
1445
+ },
1446
+ {
1447
+ "epoch": 7.78369159627315,
1448
+ "grad_norm": 2.0316922664642334,
1449
+ "learning_rate": 9.042537313432836e-06,
1450
+ "loss": 0.6513,
1451
+ "step": 85000
1452
+ },
1453
+ {
1454
+ "epoch": 7.829475997527642,
1455
+ "grad_norm": 1.2255696058273315,
1456
+ "learning_rate": 8.855970149253732e-06,
1457
+ "loss": 0.6336,
1458
+ "step": 85500
1459
+ },
1460
+ {
1461
+ "epoch": 7.875260398782135,
1462
+ "grad_norm": 2.4979262351989746,
1463
+ "learning_rate": 8.669776119402986e-06,
1464
+ "loss": 0.639,
1465
+ "step": 86000
1466
+ },
1467
+ {
1468
+ "epoch": 7.921044800036627,
1469
+ "grad_norm": 2.6587982177734375,
1470
+ "learning_rate": 8.483208955223882e-06,
1471
+ "loss": 0.6506,
1472
+ "step": 86500
1473
+ },
1474
+ {
1475
+ "epoch": 7.96682920129112,
1476
+ "grad_norm": 2.314595937728882,
1477
+ "learning_rate": 8.296641791044778e-06,
1478
+ "loss": 0.6649,
1479
+ "step": 87000
1480
+ },
1481
+ {
1482
+ "epoch": 7.96682920129112,
1483
+ "eval_loss": 0.24233344197273254,
1484
+ "eval_runtime": 654.1461,
1485
+ "eval_samples_per_second": 10.016,
1486
+ "eval_steps_per_second": 10.016,
1487
+ "eval_wer": 0.11500675499514114,
1488
+ "step": 87000
1489
+ },
1490
+ {
1491
+ "epoch": 8.012613602545613,
1492
+ "grad_norm": 2.341566562652588,
1493
+ "learning_rate": 8.110074626865673e-06,
1494
+ "loss": 0.6722,
1495
+ "step": 87500
1496
+ },
1497
+ {
1498
+ "epoch": 8.058398003800106,
1499
+ "grad_norm": 3.6063222885131836,
1500
+ "learning_rate": 7.923507462686567e-06,
1501
+ "loss": 0.6567,
1502
+ "step": 88000
1503
+ },
1504
+ {
1505
+ "epoch": 8.104182405054598,
1506
+ "grad_norm": 3.134598970413208,
1507
+ "learning_rate": 7.736940298507463e-06,
1508
+ "loss": 0.6455,
1509
+ "step": 88500
1510
+ },
1511
+ {
1512
+ "epoch": 8.14996680630909,
1513
+ "grad_norm": 2.014883041381836,
1514
+ "learning_rate": 7.550373134328359e-06,
1515
+ "loss": 0.6618,
1516
+ "step": 89000
1517
+ },
1518
+ {
1519
+ "epoch": 8.195751207563584,
1520
+ "grad_norm": 2.0325255393981934,
1521
+ "learning_rate": 7.363805970149255e-06,
1522
+ "loss": 0.609,
1523
+ "step": 89500
1524
+ },
1525
+ {
1526
+ "epoch": 8.241535608818076,
1527
+ "grad_norm": 2.343564748764038,
1528
+ "learning_rate": 7.177238805970151e-06,
1529
+ "loss": 0.5974,
1530
+ "step": 90000
1531
+ },
1532
+ {
1533
+ "epoch": 8.241535608818076,
1534
+ "eval_loss": 0.24035315215587616,
1535
+ "eval_runtime": 656.8993,
1536
+ "eval_samples_per_second": 9.974,
1537
+ "eval_steps_per_second": 9.974,
1538
+ "eval_wer": 0.11389869877462018,
1539
+ "step": 90000
1540
+ },
1541
+ {
1542
+ "epoch": 8.287320010072568,
1543
+ "grad_norm": 3.118222951889038,
1544
+ "learning_rate": 6.9906716417910455e-06,
1545
+ "loss": 0.6189,
1546
+ "step": 90500
1547
+ },
1548
+ {
1549
+ "epoch": 8.33310441132706,
1550
+ "grad_norm": 1.9298070669174194,
1551
+ "learning_rate": 6.804104477611941e-06,
1552
+ "loss": 0.6321,
1553
+ "step": 91000
1554
+ },
1555
+ {
1556
+ "epoch": 8.378888812581554,
1557
+ "grad_norm": 2.3452179431915283,
1558
+ "learning_rate": 6.617537313432837e-06,
1559
+ "loss": 0.6131,
1560
+ "step": 91500
1561
+ },
1562
+ {
1563
+ "epoch": 8.424673213836046,
1564
+ "grad_norm": 1.260360836982727,
1565
+ "learning_rate": 6.43134328358209e-06,
1566
+ "loss": 0.6485,
1567
+ "step": 92000
1568
+ },
1569
+ {
1570
+ "epoch": 8.470457615090538,
1571
+ "grad_norm": 2.775022506713867,
1572
+ "learning_rate": 6.2447761194029854e-06,
1573
+ "loss": 0.6397,
1574
+ "step": 92500
1575
+ },
1576
+ {
1577
+ "epoch": 8.516242016345032,
1578
+ "grad_norm": 2.084218740463257,
1579
+ "learning_rate": 6.058208955223881e-06,
1580
+ "loss": 0.5994,
1581
+ "step": 93000
1582
+ },
1583
+ {
1584
+ "epoch": 8.516242016345032,
1585
+ "eval_loss": 0.2302207350730896,
1586
+ "eval_runtime": 661.0282,
1587
+ "eval_samples_per_second": 9.912,
1588
+ "eval_steps_per_second": 9.912,
1589
+ "eval_wer": 0.11068118793107534,
1590
+ "step": 93000
1591
+ },
1592
+ {
1593
+ "epoch": 8.562026417599524,
1594
+ "grad_norm": 3.495532751083374,
1595
+ "learning_rate": 5.871641791044776e-06,
1596
+ "loss": 0.6213,
1597
+ "step": 93500
1598
+ },
1599
+ {
1600
+ "epoch": 8.607810818854016,
1601
+ "grad_norm": 1.2543615102767944,
1602
+ "learning_rate": 5.68544776119403e-06,
1603
+ "loss": 0.6118,
1604
+ "step": 94000
1605
+ },
1606
+ {
1607
+ "epoch": 8.653595220108508,
1608
+ "grad_norm": 2.4980568885803223,
1609
+ "learning_rate": 5.498880597014926e-06,
1610
+ "loss": 0.5992,
1611
+ "step": 94500
1612
+ },
1613
+ {
1614
+ "epoch": 8.699379621363002,
1615
+ "grad_norm": 3.1688904762268066,
1616
+ "learning_rate": 5.312313432835822e-06,
1617
+ "loss": 0.629,
1618
+ "step": 95000
1619
+ },
1620
+ {
1621
+ "epoch": 8.745164022617494,
1622
+ "grad_norm": 1.777855396270752,
1623
+ "learning_rate": 5.125746268656717e-06,
1624
+ "loss": 0.5818,
1625
+ "step": 95500
1626
+ },
1627
+ {
1628
+ "epoch": 8.790948423871987,
1629
+ "grad_norm": 4.508159637451172,
1630
+ "learning_rate": 4.939179104477612e-06,
1631
+ "loss": 0.6323,
1632
+ "step": 96000
1633
+ },
1634
+ {
1635
+ "epoch": 8.790948423871987,
1636
+ "eval_loss": 0.23431606590747833,
1637
+ "eval_runtime": 665.4475,
1638
+ "eval_samples_per_second": 9.846,
1639
+ "eval_steps_per_second": 9.846,
1640
+ "eval_wer": 0.11001753928562964,
1641
+ "step": 96000
1642
+ },
1643
+ {
1644
+ "epoch": 8.836732825126479,
1645
+ "grad_norm": 2.9219791889190674,
1646
+ "learning_rate": 4.752611940298508e-06,
1647
+ "loss": 0.5897,
1648
+ "step": 96500
1649
+ },
1650
+ {
1651
+ "epoch": 8.882517226380973,
1652
+ "grad_norm": 1.1439307928085327,
1653
+ "learning_rate": 4.5660447761194035e-06,
1654
+ "loss": 0.611,
1655
+ "step": 97000
1656
+ },
1657
+ {
1658
+ "epoch": 8.928301627635465,
1659
+ "grad_norm": 2.547057628631592,
1660
+ "learning_rate": 4.379477611940298e-06,
1661
+ "loss": 0.6163,
1662
+ "step": 97500
1663
+ },
1664
+ {
1665
+ "epoch": 8.974086028889957,
1666
+ "grad_norm": 1.996872901916504,
1667
+ "learning_rate": 4.193283582089553e-06,
1668
+ "loss": 0.6063,
1669
+ "step": 98000
1670
+ },
1671
+ {
1672
+ "epoch": 9.019870430144449,
1673
+ "grad_norm": 1.6827927827835083,
1674
+ "learning_rate": 4.006716417910448e-06,
1675
+ "loss": 0.594,
1676
+ "step": 98500
1677
+ },
1678
+ {
1679
+ "epoch": 9.065654831398943,
1680
+ "grad_norm": 1.5130544900894165,
1681
+ "learning_rate": 3.820149253731343e-06,
1682
+ "loss": 0.5862,
1683
+ "step": 99000
1684
+ },
1685
+ {
1686
+ "epoch": 9.065654831398943,
1687
+ "eval_loss": 0.22885525226593018,
1688
+ "eval_runtime": 661.9197,
1689
+ "eval_samples_per_second": 9.898,
1690
+ "eval_steps_per_second": 9.898,
1691
+ "eval_wer": 0.10877912350975326,
1692
+ "step": 99000
1693
+ },
1694
+ {
1695
+ "epoch": 9.111439232653435,
1696
+ "grad_norm": 2.690355062484741,
1697
+ "learning_rate": 3.6335820895522388e-06,
1698
+ "loss": 0.5986,
1699
+ "step": 99500
1700
+ },
1701
+ {
1702
+ "epoch": 9.157223633907927,
1703
+ "grad_norm": 1.3742425441741943,
1704
+ "learning_rate": 3.4470149253731346e-06,
1705
+ "loss": 0.5974,
1706
+ "step": 100000
1707
+ },
1708
+ {
1709
+ "epoch": 9.20300803516242,
1710
+ "grad_norm": 2.864790678024292,
1711
+ "learning_rate": 3.2608208955223884e-06,
1712
+ "loss": 0.5916,
1713
+ "step": 100500
1714
+ },
1715
+ {
1716
+ "epoch": 9.248792436416913,
1717
+ "grad_norm": 1.65906822681427,
1718
+ "learning_rate": 3.074253731343284e-06,
1719
+ "loss": 0.5724,
1720
+ "step": 101000
1721
+ },
1722
+ {
1723
+ "epoch": 9.294576837671405,
1724
+ "grad_norm": 1.9599213600158691,
1725
+ "learning_rate": 2.8876865671641795e-06,
1726
+ "loss": 0.5857,
1727
+ "step": 101500
1728
+ },
1729
+ {
1730
+ "epoch": 9.340361238925897,
1731
+ "grad_norm": 3.1552865505218506,
1732
+ "learning_rate": 2.7011194029850745e-06,
1733
+ "loss": 0.6196,
1734
+ "step": 102000
1735
+ },
1736
+ {
1737
+ "epoch": 9.340361238925897,
1738
+ "eval_loss": 0.23824112117290497,
1739
+ "eval_runtime": 661.8149,
1740
+ "eval_samples_per_second": 9.9,
1741
+ "eval_steps_per_second": 9.9,
1742
+ "eval_wer": 0.10700149320945225,
1743
+ "step": 102000
1744
+ },
1745
+ {
1746
+ "epoch": 9.386145640180391,
1747
+ "grad_norm": 2.2402780055999756,
1748
+ "learning_rate": 2.5145522388059703e-06,
1749
+ "loss": 0.6165,
1750
+ "step": 102500
1751
+ },
1752
+ {
1753
+ "epoch": 9.431930041434883,
1754
+ "grad_norm": 3.58760929107666,
1755
+ "learning_rate": 2.327985074626866e-06,
1756
+ "loss": 0.584,
1757
+ "step": 103000
1758
+ },
1759
+ {
1760
+ "epoch": 9.477714442689376,
1761
+ "grad_norm": 4.692273139953613,
1762
+ "learning_rate": 2.1417910447761194e-06,
1763
+ "loss": 0.5801,
1764
+ "step": 103500
1765
+ },
1766
+ {
1767
+ "epoch": 9.523498843943868,
1768
+ "grad_norm": 1.1370134353637695,
1769
+ "learning_rate": 1.955223880597015e-06,
1770
+ "loss": 0.5876,
1771
+ "step": 104000
1772
+ },
1773
+ {
1774
+ "epoch": 9.569283245198362,
1775
+ "grad_norm": 1.0059013366699219,
1776
+ "learning_rate": 1.7686567164179106e-06,
1777
+ "loss": 0.6329,
1778
+ "step": 104500
1779
+ },
1780
+ {
1781
+ "epoch": 9.615067646452854,
1782
+ "grad_norm": 2.666593551635742,
1783
+ "learning_rate": 1.582089552238806e-06,
1784
+ "loss": 0.6077,
1785
+ "step": 105000
1786
+ },
1787
+ {
1788
+ "epoch": 9.615067646452854,
1789
+ "eval_loss": 0.22313112020492554,
1790
+ "eval_runtime": 661.0901,
1791
+ "eval_samples_per_second": 9.911,
1792
+ "eval_steps_per_second": 9.911,
1793
+ "eval_wer": 0.10743997535019317,
1794
+ "step": 105000
1795
+ },
1796
+ {
1797
+ "epoch": 9.660852047707346,
1798
+ "grad_norm": 3.046264410018921,
1799
+ "learning_rate": 1.3955223880597016e-06,
1800
+ "loss": 0.5714,
1801
+ "step": 105500
1802
+ },
1803
+ {
1804
+ "epoch": 9.706636448961838,
1805
+ "grad_norm": 1.997536301612854,
1806
+ "learning_rate": 1.2089552238805971e-06,
1807
+ "loss": 0.6201,
1808
+ "step": 106000
1809
+ },
1810
+ {
1811
+ "epoch": 9.752420850216332,
1812
+ "grad_norm": 3.185781478881836,
1813
+ "learning_rate": 1.0223880597014927e-06,
1814
+ "loss": 0.5782,
1815
+ "step": 106500
1816
+ },
1817
+ {
1818
+ "epoch": 9.798205251470824,
1819
+ "grad_norm": 4.050471782684326,
1820
+ "learning_rate": 8.361940298507463e-07,
1821
+ "loss": 0.5841,
1822
+ "step": 107000
1823
+ },
1824
+ {
1825
+ "epoch": 9.843989652725316,
1826
+ "grad_norm": 2.3069846630096436,
1827
+ "learning_rate": 6.496268656716419e-07,
1828
+ "loss": 0.5601,
1829
+ "step": 107500
1830
+ },
1831
+ {
1832
+ "epoch": 9.88977405397981,
1833
+ "grad_norm": 3.8783347606658936,
1834
+ "learning_rate": 4.6343283582089555e-07,
1835
+ "loss": 0.5949,
1836
+ "step": 108000
1837
+ },
1838
+ {
1839
+ "epoch": 9.88977405397981,
1840
+ "eval_loss": 0.22683313488960266,
1841
+ "eval_runtime": 661.0107,
1842
+ "eval_samples_per_second": 9.912,
1843
+ "eval_steps_per_second": 9.912,
1844
+ "eval_wer": 0.1066222654120547,
1845
+ "step": 108000
1846
+ },
1847
+ {
1848
+ "epoch": 9.935558455234302,
1849
+ "grad_norm": 1.1555778980255127,
1850
+ "learning_rate": 2.768656716417911e-07,
1851
+ "loss": 0.5902,
1852
+ "step": 108500
1853
+ },
1854
+ {
1855
+ "epoch": 9.981342856488794,
1856
+ "grad_norm": 0.8959473371505737,
1857
+ "learning_rate": 9.029850746268658e-08,
1858
+ "loss": 0.5785,
1859
+ "step": 109000
1860
+ }
1861
+ ],
1862
+ "logging_steps": 500,
1863
+ "max_steps": 109200,
1864
+ "num_input_tokens_seen": 0,
1865
+ "num_train_epochs": 10,
1866
+ "save_steps": 3000,
1867
+ "stateful_callbacks": {
1868
+ "TrainerControl": {
1869
+ "args": {
1870
+ "should_epoch_stop": false,
1871
+ "should_evaluate": false,
1872
+ "should_log": false,
1873
+ "should_save": true,
1874
+ "should_training_stop": true
1875
+ },
1876
+ "attributes": {}
1877
+ }
1878
+ },
1879
+ "total_flos": 1.0560519550521973e+20,
1880
+ "train_batch_size": 1,
1881
+ "trial_name": null,
1882
+ "trial_params": null
1883
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fefce5260df9a154d9fd9f7f8daf320142bab60fafb944da85851dc8bc8418a0
3
+ size 5432
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"ਂ": 1, "ਅ": 2, "ਆ": 3, "ਇ": 4, "ਈ": 5, "ਉ": 6, "ਊ": 7, "ਏ": 8, "ਐ": 9, "ਓ": 10, "ਔ": 11, "ਕ": 12, "ਖ": 13, "ਗ": 14, "ਘ": 15, "ਚ": 16, "ਛ": 17, "ਜ": 18, "ਝ": 19, "ਟ": 20, "ਠ": 21, "ਡ": 22, "ਢ": 23, "ਣ": 24, "ਤ": 25, "ਥ": 26, "ਦ": 27, "ਧ": 28, "ਨ": 29, "ਪ": 30, "ਫ": 31, "ਬ": 32, "ਭ": 33, "ਮ": 34, "ਯ": 35, "ਰ": 36, "ਲ": 37, "ਲ਼": 38, "ਵ": 39, "ਸ਼": 40, "ਸ": 41, "ਹ": 42, "਼": 43, "ਾ": 44, "ਿ": 45, "ੀ": 46, "ੁ": 47, "ੂ": 48, "ੇ": 49, "ੈ": 50, "ੋ": 51, "ੌ": 52, "੍": 53, "ਖ਼": 54, "ਗ਼": 55, "ਜ਼": 56, "ੜ": 57, "ਫ਼": 58, "ੰ": 59, "ੱ": 60, "ੲ": 61, "ੳ": 62, "|": 0, "[UNK]": 63, "[PAD]": 64}