csikasote commited on
Commit
2be848f
·
verified ·
1 Parent(s): 7df66d4

End of training

Browse files
README.md CHANGED
@@ -3,6 +3,9 @@ library_name: transformers
3
  license: cc-by-nc-4.0
4
  base_model: facebook/mms-1b-all
5
  tags:
 
 
 
6
  - generated_from_trainer
7
  metrics:
8
  - wer
@@ -16,10 +19,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # mms-1b-toigen-combined-model
18
 
19
- This model is a fine-tuned version of [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.3150
22
- - Wer: 0.3762
23
 
24
  ## Model description
25
 
 
3
  license: cc-by-nc-4.0
4
  base_model: facebook/mms-1b-all
5
  tags:
6
+ - automatic-speech-recognition
7
+ - toigen
8
+ - mms
9
  - generated_from_trainer
10
  metrics:
11
  - wer
 
19
 
20
  # mms-1b-toigen-combined-model
21
 
22
+ This model is a fine-tuned version of [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all) on the TOIGEN - TOI dataset.
23
  It achieves the following results on the evaluation set:
24
+ - Loss: 0.3149
25
+ - Wer: 0.3760
26
 
27
  ## Model description
28
 
adapter.toi.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1c91861b4875db1563aca6aa233d91b866b475faaa6912bab5b7f8f8ac8f80a
3
+ size 8793408
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 12.94854586129754,
3
+ "eval_loss": 0.31494516134262085,
4
+ "eval_runtime": 37.9016,
5
+ "eval_samples": 413,
6
+ "eval_samples_per_second": 10.897,
7
+ "eval_steps_per_second": 2.744,
8
+ "eval_wer": 0.37600736817867836,
9
+ "total_flos": 1.916331767234902e+19,
10
+ "train_loss": 1.512242957805765,
11
+ "train_runtime": 5075.2895,
12
+ "train_samples": 1785,
13
+ "train_samples_per_second": 10.551,
14
+ "train_steps_per_second": 1.318
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 12.94854586129754,
3
+ "eval_loss": 0.31494516134262085,
4
+ "eval_runtime": 37.9016,
5
+ "eval_samples": 413,
6
+ "eval_samples_per_second": 10.897,
7
+ "eval_steps_per_second": 2.744,
8
+ "eval_wer": 0.37600736817867836
9
+ }
runs/Jan03_15-39-30_srvrocgpu011.uct.ac.za/events.out.tfevents.1735916999.srvrocgpu011.uct.ac.za ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd8ed2ad4508427e6ebce1191a76fbde34b2fe765076c0314dd6cbd750f3e899
3
+ size 40
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 12.94854586129754,
3
+ "total_flos": 1.916331767234902e+19,
4
+ "train_loss": 1.512242957805765,
5
+ "train_runtime": 5075.2895,
6
+ "train_samples": 1785,
7
+ "train_samples_per_second": 10.551,
8
+ "train_steps_per_second": 1.318
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.3137281537055969,
3
+ "best_model_checkpoint": "/scratch/skscla001/speech/results/mms-1b-toigen-combined-model/checkpoint-2500",
4
+ "epoch": 12.94854586129754,
5
+ "eval_steps": 100,
6
+ "global_step": 2900,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.44742729306487694,
13
+ "grad_norm": 4.276019096374512,
14
+ "learning_rate": 0.000285,
15
+ "loss": 15.204,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.44742729306487694,
20
+ "eval_loss": 3.586716651916504,
21
+ "eval_runtime": 37.7917,
22
+ "eval_samples_per_second": 10.928,
23
+ "eval_steps_per_second": 2.752,
24
+ "eval_wer": 1.0672346304397882,
25
+ "step": 100
26
+ },
27
+ {
28
+ "epoch": 0.8948545861297539,
29
+ "grad_norm": 3.4514918327331543,
30
+ "learning_rate": 0.0002956752655538695,
31
+ "loss": 4.2355,
32
+ "step": 200
33
+ },
34
+ {
35
+ "epoch": 0.8948545861297539,
36
+ "eval_loss": 0.5745174288749695,
37
+ "eval_runtime": 37.6254,
38
+ "eval_samples_per_second": 10.977,
39
+ "eval_steps_per_second": 2.764,
40
+ "eval_wer": 0.5648169468109602,
41
+ "step": 200
42
+ },
43
+ {
44
+ "epoch": 1.3400447427293065,
45
+ "grad_norm": 1.3078416585922241,
46
+ "learning_rate": 0.00029112291350531105,
47
+ "loss": 1.4309,
48
+ "step": 300
49
+ },
50
+ {
51
+ "epoch": 1.3400447427293065,
52
+ "eval_loss": 0.4451114535331726,
53
+ "eval_runtime": 37.5628,
54
+ "eval_samples_per_second": 10.995,
55
+ "eval_steps_per_second": 2.769,
56
+ "eval_wer": 0.5084043288049735,
57
+ "step": 300
58
+ },
59
+ {
60
+ "epoch": 1.7874720357941833,
61
+ "grad_norm": 2.0819265842437744,
62
+ "learning_rate": 0.00028657056145675266,
63
+ "loss": 1.1797,
64
+ "step": 400
65
+ },
66
+ {
67
+ "epoch": 1.7874720357941833,
68
+ "eval_loss": 0.4035033583641052,
69
+ "eval_runtime": 37.4523,
70
+ "eval_samples_per_second": 11.027,
71
+ "eval_steps_per_second": 2.777,
72
+ "eval_wer": 0.4828459590145061,
73
+ "step": 400
74
+ },
75
+ {
76
+ "epoch": 2.232662192393736,
77
+ "grad_norm": 1.5729206800460815,
78
+ "learning_rate": 0.0002820182094081942,
79
+ "loss": 1.1218,
80
+ "step": 500
81
+ },
82
+ {
83
+ "epoch": 2.232662192393736,
84
+ "eval_loss": 0.3912412226200104,
85
+ "eval_runtime": 37.719,
86
+ "eval_samples_per_second": 10.949,
87
+ "eval_steps_per_second": 2.757,
88
+ "eval_wer": 0.46626755698825695,
89
+ "step": 500
90
+ },
91
+ {
92
+ "epoch": 2.680089485458613,
93
+ "grad_norm": 2.3895416259765625,
94
+ "learning_rate": 0.0002774658573596358,
95
+ "loss": 1.0287,
96
+ "step": 600
97
+ },
98
+ {
99
+ "epoch": 2.680089485458613,
100
+ "eval_loss": 0.3837586045265198,
101
+ "eval_runtime": 37.3967,
102
+ "eval_samples_per_second": 11.044,
103
+ "eval_steps_per_second": 2.781,
104
+ "eval_wer": 0.4552152889707575,
105
+ "step": 600
106
+ },
107
+ {
108
+ "epoch": 3.1252796420581657,
109
+ "grad_norm": 1.5206655263900757,
110
+ "learning_rate": 0.0002729135053110774,
111
+ "loss": 0.9773,
112
+ "step": 700
113
+ },
114
+ {
115
+ "epoch": 3.1252796420581657,
116
+ "eval_loss": 0.37507927417755127,
117
+ "eval_runtime": 37.546,
118
+ "eval_samples_per_second": 11.0,
119
+ "eval_steps_per_second": 2.77,
120
+ "eval_wer": 0.4480773658761225,
121
+ "step": 700
122
+ },
123
+ {
124
+ "epoch": 3.5727069351230423,
125
+ "grad_norm": 2.374558448791504,
126
+ "learning_rate": 0.00026836115326251894,
127
+ "loss": 1.038,
128
+ "step": 800
129
+ },
130
+ {
131
+ "epoch": 3.5727069351230423,
132
+ "eval_loss": 0.36651042103767395,
133
+ "eval_runtime": 37.4792,
134
+ "eval_samples_per_second": 11.019,
135
+ "eval_steps_per_second": 2.775,
136
+ "eval_wer": 0.44209072069997696,
137
+ "step": 800
138
+ },
139
+ {
140
+ "epoch": 4.017897091722595,
141
+ "grad_norm": 2.0145270824432373,
142
+ "learning_rate": 0.00026380880121396055,
143
+ "loss": 0.9878,
144
+ "step": 900
145
+ },
146
+ {
147
+ "epoch": 4.017897091722595,
148
+ "eval_loss": 0.35713937878608704,
149
+ "eval_runtime": 37.7259,
150
+ "eval_samples_per_second": 10.947,
151
+ "eval_steps_per_second": 2.757,
152
+ "eval_wer": 0.43564356435643564,
153
+ "step": 900
154
+ },
155
+ {
156
+ "epoch": 4.465324384787472,
157
+ "grad_norm": 1.52614164352417,
158
+ "learning_rate": 0.0002592564491654021,
159
+ "loss": 0.9888,
160
+ "step": 1000
161
+ },
162
+ {
163
+ "epoch": 4.465324384787472,
164
+ "eval_loss": 0.35100919008255005,
165
+ "eval_runtime": 37.6548,
166
+ "eval_samples_per_second": 10.968,
167
+ "eval_steps_per_second": 2.762,
168
+ "eval_wer": 0.4358738199401335,
169
+ "step": 1000
170
+ },
171
+ {
172
+ "epoch": 4.912751677852349,
173
+ "grad_norm": 1.7655729055404663,
174
+ "learning_rate": 0.00025470409711684367,
175
+ "loss": 0.8904,
176
+ "step": 1100
177
+ },
178
+ {
179
+ "epoch": 4.912751677852349,
180
+ "eval_loss": 0.3498484194278717,
181
+ "eval_runtime": 37.5797,
182
+ "eval_samples_per_second": 10.99,
183
+ "eval_steps_per_second": 2.767,
184
+ "eval_wer": 0.41722311766060327,
185
+ "step": 1100
186
+ },
187
+ {
188
+ "epoch": 5.357941834451902,
189
+ "grad_norm": 2.1046483516693115,
190
+ "learning_rate": 0.0002501517450682852,
191
+ "loss": 0.8178,
192
+ "step": 1200
193
+ },
194
+ {
195
+ "epoch": 5.357941834451902,
196
+ "eval_loss": 0.34563127160072327,
197
+ "eval_runtime": 37.6354,
198
+ "eval_samples_per_second": 10.974,
199
+ "eval_steps_per_second": 2.763,
200
+ "eval_wer": 0.4151508174073221,
201
+ "step": 1200
202
+ },
203
+ {
204
+ "epoch": 5.805369127516778,
205
+ "grad_norm": 2.459388494491577,
206
+ "learning_rate": 0.00024559939301972683,
207
+ "loss": 0.9608,
208
+ "step": 1300
209
+ },
210
+ {
211
+ "epoch": 5.805369127516778,
212
+ "eval_loss": 0.33835238218307495,
213
+ "eval_runtime": 37.9136,
214
+ "eval_samples_per_second": 10.893,
215
+ "eval_steps_per_second": 2.743,
216
+ "eval_wer": 0.4183743955790928,
217
+ "step": 1300
218
+ },
219
+ {
220
+ "epoch": 6.250559284116331,
221
+ "grad_norm": 4.638967514038086,
222
+ "learning_rate": 0.00024104704097116842,
223
+ "loss": 0.9166,
224
+ "step": 1400
225
+ },
226
+ {
227
+ "epoch": 6.250559284116331,
228
+ "eval_loss": 0.34155401587486267,
229
+ "eval_runtime": 37.6783,
230
+ "eval_samples_per_second": 10.961,
231
+ "eval_steps_per_second": 2.76,
232
+ "eval_wer": 0.4098549389822703,
233
+ "step": 1400
234
+ },
235
+ {
236
+ "epoch": 6.697986577181208,
237
+ "grad_norm": 1.7996180057525635,
238
+ "learning_rate": 0.00023649468892261,
239
+ "loss": 0.8623,
240
+ "step": 1500
241
+ },
242
+ {
243
+ "epoch": 6.697986577181208,
244
+ "eval_loss": 0.33514168858528137,
245
+ "eval_runtime": 37.6459,
246
+ "eval_samples_per_second": 10.971,
247
+ "eval_steps_per_second": 2.763,
248
+ "eval_wer": 0.403407782638729,
249
+ "step": 1500
250
+ },
251
+ {
252
+ "epoch": 7.143176733780761,
253
+ "grad_norm": 1.3787713050842285,
254
+ "learning_rate": 0.00023194233687405159,
255
+ "loss": 0.823,
256
+ "step": 1600
257
+ },
258
+ {
259
+ "epoch": 7.143176733780761,
260
+ "eval_loss": 0.3305976688861847,
261
+ "eval_runtime": 37.6795,
262
+ "eval_samples_per_second": 10.961,
263
+ "eval_steps_per_second": 2.76,
264
+ "eval_wer": 0.3976513930462814,
265
+ "step": 1600
266
+ },
267
+ {
268
+ "epoch": 7.590604026845638,
269
+ "grad_norm": 1.2738311290740967,
270
+ "learning_rate": 0.00022738998482549317,
271
+ "loss": 0.8495,
272
+ "step": 1700
273
+ },
274
+ {
275
+ "epoch": 7.590604026845638,
276
+ "eval_loss": 0.3321482837200165,
277
+ "eval_runtime": 37.8989,
278
+ "eval_samples_per_second": 10.897,
279
+ "eval_steps_per_second": 2.744,
280
+ "eval_wer": 0.393737048123417,
281
+ "step": 1700
282
+ },
283
+ {
284
+ "epoch": 8.03579418344519,
285
+ "grad_norm": 2.3098723888397217,
286
+ "learning_rate": 0.00022283763277693473,
287
+ "loss": 0.8691,
288
+ "step": 1800
289
+ },
290
+ {
291
+ "epoch": 8.03579418344519,
292
+ "eval_loss": 0.3243669867515564,
293
+ "eval_runtime": 37.6551,
294
+ "eval_samples_per_second": 10.968,
295
+ "eval_steps_per_second": 2.762,
296
+ "eval_wer": 0.39857241538107296,
297
+ "step": 1800
298
+ },
299
+ {
300
+ "epoch": 8.483221476510067,
301
+ "grad_norm": 2.650418281555176,
302
+ "learning_rate": 0.0002182852807283763,
303
+ "loss": 0.8225,
304
+ "step": 1900
305
+ },
306
+ {
307
+ "epoch": 8.483221476510067,
308
+ "eval_loss": 0.32606178522109985,
309
+ "eval_runtime": 37.8584,
310
+ "eval_samples_per_second": 10.909,
311
+ "eval_steps_per_second": 2.747,
312
+ "eval_wer": 0.39557909279300024,
313
+ "step": 1900
314
+ },
315
+ {
316
+ "epoch": 8.930648769574944,
317
+ "grad_norm": 1.043278694152832,
318
+ "learning_rate": 0.0002137329286798179,
319
+ "loss": 0.8193,
320
+ "step": 2000
321
+ },
322
+ {
323
+ "epoch": 8.930648769574944,
324
+ "eval_loss": 0.32239243388175964,
325
+ "eval_runtime": 37.9035,
326
+ "eval_samples_per_second": 10.896,
327
+ "eval_steps_per_second": 2.744,
328
+ "eval_wer": 0.39212525903753165,
329
+ "step": 2000
330
+ },
331
+ {
332
+ "epoch": 9.375838926174497,
333
+ "grad_norm": 1.5510300397872925,
334
+ "learning_rate": 0.00020918057663125948,
335
+ "loss": 0.79,
336
+ "step": 2100
337
+ },
338
+ {
339
+ "epoch": 9.375838926174497,
340
+ "eval_loss": 0.3181034028530121,
341
+ "eval_runtime": 38.1472,
342
+ "eval_samples_per_second": 10.826,
343
+ "eval_steps_per_second": 2.726,
344
+ "eval_wer": 0.3884411696983652,
345
+ "step": 2100
346
+ },
347
+ {
348
+ "epoch": 9.823266219239374,
349
+ "grad_norm": 2.215151071548462,
350
+ "learning_rate": 0.00020462822458270106,
351
+ "loss": 0.8035,
352
+ "step": 2200
353
+ },
354
+ {
355
+ "epoch": 9.823266219239374,
356
+ "eval_loss": 0.3272099196910858,
357
+ "eval_runtime": 37.6966,
358
+ "eval_samples_per_second": 10.956,
359
+ "eval_steps_per_second": 2.759,
360
+ "eval_wer": 0.3886714252820631,
361
+ "step": 2200
362
+ },
363
+ {
364
+ "epoch": 10.268456375838927,
365
+ "grad_norm": 2.966920852661133,
366
+ "learning_rate": 0.00020007587253414265,
367
+ "loss": 0.8391,
368
+ "step": 2300
369
+ },
370
+ {
371
+ "epoch": 10.268456375838927,
372
+ "eval_loss": 0.31765347719192505,
373
+ "eval_runtime": 37.7253,
374
+ "eval_samples_per_second": 10.948,
375
+ "eval_steps_per_second": 2.757,
376
+ "eval_wer": 0.3893621920331568,
377
+ "step": 2300
378
+ },
379
+ {
380
+ "epoch": 10.715883668903803,
381
+ "grad_norm": 21.085424423217773,
382
+ "learning_rate": 0.00019556904400606977,
383
+ "loss": 0.8055,
384
+ "step": 2400
385
+ },
386
+ {
387
+ "epoch": 10.715883668903803,
388
+ "eval_loss": 0.32548072934150696,
389
+ "eval_runtime": 37.805,
390
+ "eval_samples_per_second": 10.924,
391
+ "eval_steps_per_second": 2.751,
392
+ "eval_wer": 0.3790006907667511,
393
+ "step": 2400
394
+ },
395
+ {
396
+ "epoch": 11.161073825503356,
397
+ "grad_norm": 1.2982721328735352,
398
+ "learning_rate": 0.00019101669195751135,
399
+ "loss": 0.7124,
400
+ "step": 2500
401
+ },
402
+ {
403
+ "epoch": 11.161073825503356,
404
+ "eval_loss": 0.3137281537055969,
405
+ "eval_runtime": 38.012,
406
+ "eval_samples_per_second": 10.865,
407
+ "eval_steps_per_second": 2.736,
408
+ "eval_wer": 0.39120423670274,
409
+ "step": 2500
410
+ },
411
+ {
412
+ "epoch": 11.608501118568233,
413
+ "grad_norm": 4.419574737548828,
414
+ "learning_rate": 0.00018646433990895294,
415
+ "loss": 0.7747,
416
+ "step": 2600
417
+ },
418
+ {
419
+ "epoch": 11.608501118568233,
420
+ "eval_loss": 0.32637959718704224,
421
+ "eval_runtime": 37.8933,
422
+ "eval_samples_per_second": 10.899,
423
+ "eval_steps_per_second": 2.745,
424
+ "eval_wer": 0.38498733594289664,
425
+ "step": 2600
426
+ },
427
+ {
428
+ "epoch": 12.053691275167786,
429
+ "grad_norm": 6.3560638427734375,
430
+ "learning_rate": 0.00018191198786039452,
431
+ "loss": 0.795,
432
+ "step": 2700
433
+ },
434
+ {
435
+ "epoch": 12.053691275167786,
436
+ "eval_loss": 0.3149665594100952,
437
+ "eval_runtime": 37.7892,
438
+ "eval_samples_per_second": 10.929,
439
+ "eval_steps_per_second": 2.752,
440
+ "eval_wer": 0.3852175915265945,
441
+ "step": 2700
442
+ },
443
+ {
444
+ "epoch": 12.501118568232663,
445
+ "grad_norm": 9.020240783691406,
446
+ "learning_rate": 0.0001773596358118361,
447
+ "loss": 0.7749,
448
+ "step": 2800
449
+ },
450
+ {
451
+ "epoch": 12.501118568232663,
452
+ "eval_loss": 0.3177170157432556,
453
+ "eval_runtime": 37.7963,
454
+ "eval_samples_per_second": 10.927,
455
+ "eval_steps_per_second": 2.752,
456
+ "eval_wer": 0.3806124798526364,
457
+ "step": 2800
458
+ },
459
+ {
460
+ "epoch": 12.94854586129754,
461
+ "grad_norm": 1.3808608055114746,
462
+ "learning_rate": 0.0001728072837632777,
463
+ "loss": 0.7364,
464
+ "step": 2900
465
+ },
466
+ {
467
+ "epoch": 12.94854586129754,
468
+ "eval_loss": 0.3149511516094208,
469
+ "eval_runtime": 38.0376,
470
+ "eval_samples_per_second": 10.858,
471
+ "eval_steps_per_second": 2.734,
472
+ "eval_wer": 0.37623762376237624,
473
+ "step": 2900
474
+ },
475
+ {
476
+ "epoch": 12.94854586129754,
477
+ "step": 2900,
478
+ "total_flos": 1.916331767234902e+19,
479
+ "train_loss": 1.512242957805765,
480
+ "train_runtime": 5075.2895,
481
+ "train_samples_per_second": 10.551,
482
+ "train_steps_per_second": 1.318
483
+ }
484
+ ],
485
+ "logging_steps": 100,
486
+ "max_steps": 6690,
487
+ "num_input_tokens_seen": 0,
488
+ "num_train_epochs": 30,
489
+ "save_steps": 400,
490
+ "stateful_callbacks": {
491
+ "EarlyStoppingCallback": {
492
+ "args": {
493
+ "early_stopping_patience": 4,
494
+ "early_stopping_threshold": 0.0
495
+ },
496
+ "attributes": {
497
+ "early_stopping_patience_counter": 3
498
+ }
499
+ },
500
+ "TrainerControl": {
501
+ "args": {
502
+ "should_epoch_stop": false,
503
+ "should_evaluate": false,
504
+ "should_log": false,
505
+ "should_save": true,
506
+ "should_training_stop": false
507
+ },
508
+ "attributes": {}
509
+ }
510
+ },
511
+ "total_flos": 1.916331767234902e+19,
512
+ "train_batch_size": 4,
513
+ "trial_name": null,
514
+ "trial_params": null
515
+ }