DanJoshua commited on
Commit
6dfe962
·
verified ·
1 Parent(s): dc7e9ee

End of training

Browse files
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 9.09032258064516,
3
- "eval_accuracy": 0.8,
4
- "eval_loss": 0.7193946838378906,
5
- "eval_runtime": 151.4396,
6
- "eval_samples_per_second": 2.641,
7
- "eval_steps_per_second": 0.33
8
  }
 
1
  {
2
+ "epoch": 14.061648745519713,
3
+ "eval_accuracy": 0.81875,
4
+ "eval_loss": 0.6550884246826172,
5
+ "eval_runtime": 306.835,
6
+ "eval_samples_per_second": 2.607,
7
+ "eval_steps_per_second": 0.326
8
  }
runs/Jul21_20-42-21_a62a56ce0424/events.out.tfevents.1721608783.a62a56ce0424.5680.1 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a63e23e4ab1595b272dd3222027f64ca94f304dd09bf39740131b669355f362
3
- size 411
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31b18516b93fba2d4fea00053cdb8e5d832486101d4017aef5348963859ad83f
3
+ size 734
test_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 9.09032258064516,
3
- "eval_accuracy": 0.8,
4
- "eval_loss": 0.7193946838378906,
5
- "eval_runtime": 151.4396,
6
- "eval_samples_per_second": 2.641,
7
- "eval_steps_per_second": 0.33
8
  }
 
1
  {
2
+ "epoch": 14.061648745519713,
3
+ "eval_accuracy": 0.81875,
4
+ "eval_loss": 0.6550884246826172,
5
+ "eval_runtime": 306.835,
6
+ "eval_samples_per_second": 2.607,
7
+ "eval_steps_per_second": 0.326
8
  }
trainer_state.json CHANGED
@@ -1,784 +1,2131 @@
1
  {
2
- "best_metric": 0.9473684210526315,
3
- "best_model_checkpoint": "videomae-base-finetuned-rwf2000-subset/checkpoint-930",
4
- "epoch": 9.09032258064516,
5
  "eval_steps": 500,
6
- "global_step": 930,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.010752688172043012,
13
- "grad_norm": 5.772064208984375,
14
- "learning_rate": 5.376344086021506e-06,
15
- "loss": 0.8938,
16
  "step": 10
17
  },
18
  {
19
- "epoch": 0.021505376344086023,
20
- "grad_norm": 6.392468452453613,
21
- "learning_rate": 1.0752688172043012e-05,
22
- "loss": 0.7152,
23
  "step": 20
24
  },
25
  {
26
- "epoch": 0.03225806451612903,
27
- "grad_norm": 5.6630167961120605,
28
- "learning_rate": 1.6129032258064517e-05,
29
- "loss": 0.7008,
30
  "step": 30
31
  },
32
  {
33
- "epoch": 0.043010752688172046,
34
- "grad_norm": 7.545820713043213,
35
- "learning_rate": 2.1505376344086024e-05,
36
- "loss": 0.661,
37
  "step": 40
38
  },
39
  {
40
- "epoch": 0.053763440860215055,
41
- "grad_norm": 4.271369457244873,
42
- "learning_rate": 2.6881720430107527e-05,
43
- "loss": 0.6729,
44
  "step": 50
45
  },
46
  {
47
- "epoch": 0.06451612903225806,
48
- "grad_norm": 6.001001358032227,
49
- "learning_rate": 3.2258064516129034e-05,
50
- "loss": 0.6542,
51
  "step": 60
52
  },
53
  {
54
- "epoch": 0.07526881720430108,
55
- "grad_norm": 5.876163005828857,
56
- "learning_rate": 3.763440860215054e-05,
57
- "loss": 0.6541,
58
  "step": 70
59
  },
60
  {
61
- "epoch": 0.08602150537634409,
62
- "grad_norm": 4.0299906730651855,
63
- "learning_rate": 4.301075268817205e-05,
64
- "loss": 0.6077,
65
  "step": 80
66
  },
67
  {
68
- "epoch": 0.0967741935483871,
69
- "grad_norm": 5.537106990814209,
70
- "learning_rate": 4.8387096774193554e-05,
71
- "loss": 0.5577,
72
  "step": 90
73
  },
74
  {
75
- "epoch": 0.1010752688172043,
76
- "eval_accuracy": 0.7236842105263158,
77
- "eval_loss": 0.4835163354873657,
78
- "eval_runtime": 26.2728,
79
- "eval_samples_per_second": 2.893,
80
- "eval_steps_per_second": 0.381,
81
- "step": 94
82
- },
83
- {
84
- "epoch": 1.0064516129032257,
85
- "grad_norm": 4.136658191680908,
86
- "learning_rate": 4.9581839904420555e-05,
87
- "loss": 0.5856,
88
  "step": 100
89
  },
90
  {
91
- "epoch": 1.0172043010752687,
92
- "grad_norm": 7.560351848602295,
93
- "learning_rate": 4.898446833930705e-05,
94
- "loss": 0.5146,
95
  "step": 110
96
  },
97
  {
98
- "epoch": 1.027956989247312,
99
- "grad_norm": 3.6854827404022217,
100
- "learning_rate": 4.8387096774193554e-05,
101
- "loss": 0.6122,
102
  "step": 120
103
  },
104
  {
105
- "epoch": 1.038709677419355,
106
- "grad_norm": 3.7454755306243896,
107
- "learning_rate": 4.778972520908005e-05,
108
- "loss": 0.6283,
109
  "step": 130
110
  },
111
  {
112
- "epoch": 1.049462365591398,
113
- "grad_norm": 3.77704119682312,
114
- "learning_rate": 4.7192353643966546e-05,
115
- "loss": 0.5195,
116
  "step": 140
117
  },
118
  {
119
- "epoch": 1.060215053763441,
120
- "grad_norm": 6.745356559753418,
121
- "learning_rate": 4.659498207885305e-05,
122
- "loss": 0.605,
123
  "step": 150
124
  },
125
  {
126
- "epoch": 1.070967741935484,
127
- "grad_norm": 5.097397327423096,
128
- "learning_rate": 4.5997610513739546e-05,
129
- "loss": 0.4275,
130
  "step": 160
131
  },
132
  {
133
- "epoch": 1.081720430107527,
134
- "grad_norm": 11.386567115783691,
135
- "learning_rate": 4.540023894862604e-05,
136
- "loss": 0.4862,
137
  "step": 170
138
  },
139
  {
140
- "epoch": 1.09247311827957,
141
- "grad_norm": 2.083325147628784,
142
- "learning_rate": 4.4802867383512545e-05,
143
- "loss": 0.6696,
144
  "step": 180
145
  },
146
  {
147
- "epoch": 1.1010752688172043,
148
- "eval_accuracy": 0.8421052631578947,
149
- "eval_loss": 0.43573570251464844,
150
- "eval_runtime": 21.5443,
151
- "eval_samples_per_second": 3.528,
152
- "eval_steps_per_second": 0.464,
153
- "step": 188
154
  },
155
  {
156
- "epoch": 2.0021505376344084,
157
- "grad_norm": 8.380154609680176,
158
- "learning_rate": 4.420549581839905e-05,
159
- "loss": 0.5353,
160
  "step": 190
161
  },
162
  {
163
- "epoch": 2.0129032258064514,
164
- "grad_norm": 9.097419738769531,
165
- "learning_rate": 4.360812425328555e-05,
166
- "loss": 0.4885,
167
  "step": 200
168
  },
169
  {
170
- "epoch": 2.0236559139784944,
171
- "grad_norm": 3.875176191329956,
172
- "learning_rate": 4.301075268817205e-05,
173
- "loss": 0.505,
174
  "step": 210
175
  },
176
  {
177
- "epoch": 2.0344086021505374,
178
- "grad_norm": 8.177785873413086,
179
- "learning_rate": 4.241338112305854e-05,
180
- "loss": 0.6089,
181
  "step": 220
182
  },
183
  {
184
- "epoch": 2.0451612903225804,
185
- "grad_norm": 14.289607048034668,
186
- "learning_rate": 4.1816009557945046e-05,
187
- "loss": 0.5993,
188
  "step": 230
189
  },
190
  {
191
- "epoch": 2.055913978494624,
192
- "grad_norm": 4.532449722290039,
193
- "learning_rate": 4.121863799283154e-05,
194
- "loss": 0.7074,
195
  "step": 240
196
  },
197
  {
198
- "epoch": 2.066666666666667,
199
- "grad_norm": 4.093668460845947,
200
- "learning_rate": 4.062126642771804e-05,
201
- "loss": 0.4219,
202
  "step": 250
203
  },
204
  {
205
- "epoch": 2.07741935483871,
206
- "grad_norm": 14.380650520324707,
207
- "learning_rate": 4.002389486260454e-05,
208
- "loss": 0.4474,
209
  "step": 260
210
  },
211
  {
212
- "epoch": 2.088172043010753,
213
- "grad_norm": 6.174684047698975,
214
- "learning_rate": 3.9426523297491045e-05,
215
- "loss": 0.5363,
216
  "step": 270
217
  },
218
  {
219
- "epoch": 2.098924731182796,
220
- "grad_norm": 11.756628036499023,
221
- "learning_rate": 3.882915173237754e-05,
222
- "loss": 0.5608,
223
  "step": 280
224
  },
225
  {
226
- "epoch": 2.1010752688172043,
227
- "eval_accuracy": 0.8289473684210527,
228
- "eval_loss": 0.479496568441391,
229
- "eval_runtime": 21.9809,
230
- "eval_samples_per_second": 3.458,
231
- "eval_steps_per_second": 0.455,
232
- "step": 282
233
- },
234
- {
235
- "epoch": 3.0086021505376346,
236
- "grad_norm": 2.0791425704956055,
237
- "learning_rate": 3.8231780167264044e-05,
238
- "loss": 0.5157,
239
  "step": 290
240
  },
241
  {
242
- "epoch": 3.0193548387096776,
243
- "grad_norm": 6.193145751953125,
244
- "learning_rate": 3.763440860215054e-05,
245
- "loss": 0.4626,
246
  "step": 300
247
  },
248
  {
249
- "epoch": 3.0301075268817206,
250
- "grad_norm": 8.216785430908203,
251
- "learning_rate": 3.7037037037037037e-05,
252
- "loss": 0.6098,
253
  "step": 310
254
  },
255
  {
256
- "epoch": 3.0408602150537636,
257
- "grad_norm": 3.388540267944336,
258
- "learning_rate": 3.643966547192354e-05,
259
- "loss": 0.3466,
260
  "step": 320
261
  },
262
  {
263
- "epoch": 3.0516129032258066,
264
- "grad_norm": 3.5496809482574463,
265
- "learning_rate": 3.5842293906810036e-05,
266
- "loss": 0.3225,
267
  "step": 330
268
  },
269
  {
270
- "epoch": 3.0623655913978496,
271
- "grad_norm": 0.8877809047698975,
272
- "learning_rate": 3.524492234169653e-05,
273
- "loss": 0.513,
274
  "step": 340
275
  },
276
  {
277
- "epoch": 3.0731182795698926,
278
- "grad_norm": 7.285158634185791,
279
- "learning_rate": 3.4647550776583035e-05,
280
- "loss": 0.6786,
281
  "step": 350
282
  },
283
  {
284
- "epoch": 3.0838709677419356,
285
- "grad_norm": 9.199494361877441,
286
- "learning_rate": 3.405017921146954e-05,
287
- "loss": 0.575,
288
  "step": 360
289
  },
290
  {
291
- "epoch": 3.0946236559139786,
292
- "grad_norm": 2.362581253051758,
293
- "learning_rate": 3.3452807646356034e-05,
294
- "loss": 0.3918,
295
  "step": 370
296
  },
297
  {
298
- "epoch": 3.1010752688172043,
299
- "eval_accuracy": 0.7894736842105263,
300
- "eval_loss": 0.46907052397727966,
301
- "eval_runtime": 21.3364,
302
- "eval_samples_per_second": 3.562,
303
- "eval_steps_per_second": 0.469,
304
- "step": 376
305
  },
306
  {
307
- "epoch": 4.004301075268817,
308
- "grad_norm": 3.9972550868988037,
309
- "learning_rate": 3.285543608124254e-05,
310
- "loss": 0.4378,
311
  "step": 380
312
  },
313
  {
314
- "epoch": 4.01505376344086,
315
- "grad_norm": 6.704283237457275,
316
- "learning_rate": 3.2258064516129034e-05,
317
- "loss": 0.4316,
318
  "step": 390
319
  },
320
  {
321
- "epoch": 4.025806451612903,
322
- "grad_norm": 11.435125350952148,
323
- "learning_rate": 3.1660692951015537e-05,
324
- "loss": 0.5877,
325
  "step": 400
326
  },
327
  {
328
- "epoch": 4.036559139784946,
329
- "grad_norm": 7.740775108337402,
330
- "learning_rate": 3.106332138590203e-05,
331
- "loss": 0.3589,
332
  "step": 410
333
  },
334
  {
335
- "epoch": 4.047311827956989,
336
- "grad_norm": 2.672539710998535,
337
- "learning_rate": 3.046594982078853e-05,
338
- "loss": 0.3523,
339
  "step": 420
340
  },
341
  {
342
- "epoch": 4.058064516129032,
343
- "grad_norm": 3.296454429626465,
344
- "learning_rate": 2.9868578255675032e-05,
345
- "loss": 0.3457,
346
  "step": 430
347
  },
348
  {
349
- "epoch": 4.068817204301075,
350
- "grad_norm": 11.174155235290527,
351
- "learning_rate": 2.9271206690561532e-05,
352
- "loss": 0.4028,
353
  "step": 440
354
  },
355
  {
356
- "epoch": 4.079569892473118,
357
- "grad_norm": 2.355884313583374,
358
- "learning_rate": 2.8673835125448028e-05,
359
- "loss": 0.3813,
360
  "step": 450
361
  },
362
  {
363
- "epoch": 4.090322580645161,
364
- "grad_norm": 12.960975646972656,
365
- "learning_rate": 2.807646356033453e-05,
366
- "loss": 0.2315,
367
  "step": 460
368
  },
369
  {
370
- "epoch": 4.101075268817205,
371
- "grad_norm": 14.71526050567627,
372
- "learning_rate": 2.747909199522103e-05,
373
- "loss": 0.4756,
374
- "step": 470
375
- },
376
- {
377
- "epoch": 4.101075268817205,
378
- "eval_accuracy": 0.8421052631578947,
379
- "eval_loss": 0.44611287117004395,
380
- "eval_runtime": 27.2076,
381
- "eval_samples_per_second": 2.793,
382
- "eval_steps_per_second": 0.368,
383
  "step": 470
384
  },
385
  {
386
- "epoch": 5.010752688172043,
387
- "grad_norm": 14.609360694885254,
388
- "learning_rate": 2.6881720430107527e-05,
389
- "loss": 0.5645,
390
  "step": 480
391
  },
392
  {
393
- "epoch": 5.021505376344086,
394
- "grad_norm": 3.8693418502807617,
395
- "learning_rate": 2.628434886499403e-05,
396
- "loss": 0.3794,
397
  "step": 490
398
  },
399
  {
400
- "epoch": 5.032258064516129,
401
- "grad_norm": 1.618184208869934,
402
- "learning_rate": 2.5686977299880526e-05,
403
- "loss": 0.3139,
404
  "step": 500
405
  },
406
  {
407
- "epoch": 5.043010752688172,
408
- "grad_norm": 1.95063054561615,
409
- "learning_rate": 2.5089605734767026e-05,
410
- "loss": 0.3952,
411
  "step": 510
412
  },
413
  {
414
- "epoch": 5.053763440860215,
415
- "grad_norm": 9.131407737731934,
416
- "learning_rate": 2.4492234169653525e-05,
417
- "loss": 0.4587,
418
  "step": 520
419
  },
420
  {
421
- "epoch": 5.064516129032258,
422
- "grad_norm": 4.612659931182861,
423
- "learning_rate": 2.3894862604540025e-05,
424
- "loss": 0.2986,
425
  "step": 530
426
  },
427
  {
428
- "epoch": 5.075268817204301,
429
- "grad_norm": 3.612903594970703,
430
- "learning_rate": 2.3297491039426525e-05,
431
- "loss": 0.2318,
432
  "step": 540
433
  },
434
  {
435
- "epoch": 5.086021505376344,
436
- "grad_norm": 4.401362419128418,
437
- "learning_rate": 2.270011947431302e-05,
438
- "loss": 0.41,
439
  "step": 550
440
  },
441
  {
442
- "epoch": 5.096774193548387,
443
- "grad_norm": 12.280633926391602,
444
- "learning_rate": 2.2102747909199524e-05,
445
- "loss": 0.3794,
446
  "step": 560
447
  },
448
  {
449
- "epoch": 5.101075268817205,
450
- "eval_accuracy": 0.7368421052631579,
451
- "eval_loss": 0.8676896095275879,
452
- "eval_runtime": 21.0222,
453
- "eval_samples_per_second": 3.615,
454
- "eval_steps_per_second": 0.476,
455
- "step": 564
456
  },
457
  {
458
- "epoch": 6.006451612903226,
459
- "grad_norm": 1.297224998474121,
460
- "learning_rate": 2.1505376344086024e-05,
461
- "loss": 0.2989,
462
  "step": 570
463
  },
464
  {
465
- "epoch": 6.017204301075269,
466
- "grad_norm": 0.6060790419578552,
467
- "learning_rate": 2.0908004778972523e-05,
468
- "loss": 0.3388,
469
  "step": 580
470
  },
471
  {
472
- "epoch": 6.027956989247312,
473
- "grad_norm": 20.182498931884766,
474
- "learning_rate": 2.031063321385902e-05,
475
- "loss": 0.3357,
476
  "step": 590
477
  },
478
  {
479
- "epoch": 6.038709677419355,
480
- "grad_norm": 2.371579170227051,
481
- "learning_rate": 1.9713261648745522e-05,
482
- "loss": 0.2741,
483
  "step": 600
484
  },
485
  {
486
- "epoch": 6.049462365591398,
487
- "grad_norm": 11.418489456176758,
488
- "learning_rate": 1.9115890083632022e-05,
489
- "loss": 0.424,
490
  "step": 610
491
  },
492
  {
493
- "epoch": 6.060215053763441,
494
- "grad_norm": 2.740072727203369,
495
- "learning_rate": 1.8518518518518518e-05,
496
- "loss": 0.4774,
497
  "step": 620
498
  },
499
  {
500
- "epoch": 6.070967741935484,
501
- "grad_norm": 14.478816032409668,
502
- "learning_rate": 1.7921146953405018e-05,
503
- "loss": 0.2869,
504
  "step": 630
505
  },
506
  {
507
- "epoch": 6.081720430107527,
508
- "grad_norm": 19.758691787719727,
509
- "learning_rate": 1.7323775388291518e-05,
510
- "loss": 0.3443,
511
  "step": 640
512
  },
513
  {
514
- "epoch": 6.09247311827957,
515
- "grad_norm": 3.96628737449646,
516
- "learning_rate": 1.6726403823178017e-05,
517
- "loss": 0.3083,
518
  "step": 650
519
  },
520
  {
521
- "epoch": 6.101075268817205,
522
- "eval_accuracy": 0.868421052631579,
523
- "eval_loss": 0.39320939779281616,
524
- "eval_runtime": 25.7723,
525
- "eval_samples_per_second": 2.949,
526
- "eval_steps_per_second": 0.388,
527
- "step": 658
528
- },
529
- {
530
- "epoch": 7.002150537634408,
531
- "grad_norm": 14.803962707519531,
532
- "learning_rate": 1.6129032258064517e-05,
533
- "loss": 0.5458,
534
  "step": 660
535
  },
536
  {
537
- "epoch": 7.012903225806451,
538
- "grad_norm": 12.291862487792969,
539
- "learning_rate": 1.5531660692951016e-05,
540
- "loss": 0.3551,
541
  "step": 670
542
  },
543
  {
544
- "epoch": 7.023655913978494,
545
- "grad_norm": 16.88749885559082,
546
- "learning_rate": 1.4934289127837516e-05,
547
- "loss": 0.3371,
548
  "step": 680
549
  },
550
  {
551
- "epoch": 7.034408602150537,
552
- "grad_norm": 1.4436932802200317,
553
- "learning_rate": 1.4336917562724014e-05,
554
- "loss": 0.3117,
555
  "step": 690
556
  },
557
  {
558
- "epoch": 7.04516129032258,
559
- "grad_norm": 1.6931445598602295,
560
- "learning_rate": 1.3739545997610515e-05,
561
- "loss": 0.1534,
562
  "step": 700
563
  },
564
  {
565
- "epoch": 7.055913978494623,
566
- "grad_norm": 3.7684617042541504,
567
- "learning_rate": 1.3142174432497015e-05,
568
- "loss": 0.336,
569
  "step": 710
570
  },
571
  {
572
- "epoch": 7.066666666666666,
573
- "grad_norm": 11.843338966369629,
574
- "learning_rate": 1.2544802867383513e-05,
575
- "loss": 0.3442,
576
  "step": 720
577
  },
578
  {
579
- "epoch": 7.077419354838709,
580
- "grad_norm": 32.888648986816406,
581
- "learning_rate": 1.1947431302270013e-05,
582
- "loss": 0.6003,
583
  "step": 730
584
  },
585
  {
586
- "epoch": 7.088172043010752,
587
- "grad_norm": 13.602355003356934,
588
- "learning_rate": 1.135005973715651e-05,
589
- "loss": 0.411,
590
  "step": 740
591
  },
592
  {
593
- "epoch": 7.098924731182795,
594
- "grad_norm": 0.6173302531242371,
595
- "learning_rate": 1.0752688172043012e-05,
596
- "loss": 0.3336,
597
- "step": 750
 
 
598
  },
599
  {
600
- "epoch": 7.101075268817205,
601
- "eval_accuracy": 0.9078947368421053,
602
- "eval_loss": 0.32602888345718384,
603
- "eval_runtime": 22.1675,
604
- "eval_samples_per_second": 3.428,
605
- "eval_steps_per_second": 0.451,
606
- "step": 752
607
  },
608
  {
609
- "epoch": 8.008602150537634,
610
- "grad_norm": 9.76364803314209,
611
- "learning_rate": 1.015531660692951e-05,
612
- "loss": 0.2842,
613
  "step": 760
614
  },
615
  {
616
- "epoch": 8.019354838709678,
617
- "grad_norm": 9.685689926147461,
618
- "learning_rate": 9.557945041816011e-06,
619
- "loss": 0.2046,
620
  "step": 770
621
  },
622
  {
623
- "epoch": 8.03010752688172,
624
- "grad_norm": 4.679247856140137,
625
- "learning_rate": 8.960573476702509e-06,
626
- "loss": 0.2956,
627
  "step": 780
628
  },
629
  {
630
- "epoch": 8.040860215053764,
631
- "grad_norm": 4.2983078956604,
632
- "learning_rate": 8.363201911589009e-06,
633
- "loss": 0.1368,
634
  "step": 790
635
  },
636
  {
637
- "epoch": 8.051612903225806,
638
- "grad_norm": 4.975796699523926,
639
- "learning_rate": 7.765830346475508e-06,
640
- "loss": 0.3849,
641
  "step": 800
642
  },
643
  {
644
- "epoch": 8.06236559139785,
645
- "grad_norm": 23.385377883911133,
646
- "learning_rate": 7.168458781362007e-06,
647
- "loss": 0.3219,
648
  "step": 810
649
  },
650
  {
651
- "epoch": 8.073118279569892,
652
- "grad_norm": 9.758036613464355,
653
- "learning_rate": 6.5710872162485075e-06,
654
- "loss": 0.373,
655
  "step": 820
656
  },
657
  {
658
- "epoch": 8.083870967741936,
659
- "grad_norm": 5.094470977783203,
660
- "learning_rate": 5.973715651135006e-06,
661
- "loss": 0.2467,
662
  "step": 830
663
  },
664
  {
665
- "epoch": 8.094623655913978,
666
- "grad_norm": 9.906914710998535,
667
- "learning_rate": 5.376344086021506e-06,
668
- "loss": 0.2514,
669
  "step": 840
670
  },
671
  {
672
- "epoch": 8.101075268817205,
673
- "eval_accuracy": 0.9210526315789473,
674
- "eval_loss": 0.3206616938114166,
675
- "eval_runtime": 21.2555,
676
- "eval_samples_per_second": 3.576,
677
- "eval_steps_per_second": 0.47,
678
- "step": 846
679
- },
680
- {
681
- "epoch": 9.004301075268817,
682
- "grad_norm": 0.8976335525512695,
683
- "learning_rate": 4.7789725209080055e-06,
684
- "loss": 0.2381,
685
  "step": 850
686
  },
687
  {
688
- "epoch": 9.01505376344086,
689
- "grad_norm": 20.732986450195312,
690
- "learning_rate": 4.181600955794504e-06,
691
- "loss": 0.2403,
692
  "step": 860
693
  },
694
  {
695
- "epoch": 9.025806451612903,
696
- "grad_norm": 0.5518481731414795,
697
- "learning_rate": 3.5842293906810035e-06,
698
- "loss": 0.1456,
699
  "step": 870
700
  },
701
  {
702
- "epoch": 9.036559139784947,
703
- "grad_norm": 9.817909240722656,
704
- "learning_rate": 2.986857825567503e-06,
705
- "loss": 0.1722,
706
  "step": 880
707
  },
708
  {
709
- "epoch": 9.047311827956989,
710
- "grad_norm": 29.461748123168945,
711
- "learning_rate": 2.3894862604540028e-06,
712
- "loss": 0.1048,
713
  "step": 890
714
  },
715
  {
716
- "epoch": 9.058064516129033,
717
- "grad_norm": 8.807541847229004,
718
- "learning_rate": 1.7921146953405017e-06,
719
- "loss": 0.2389,
720
  "step": 900
721
  },
722
  {
723
- "epoch": 9.068817204301075,
724
- "grad_norm": 2.6277596950531006,
725
- "learning_rate": 1.1947431302270014e-06,
726
- "loss": 0.193,
727
  "step": 910
728
  },
729
  {
730
- "epoch": 9.079569892473119,
731
- "grad_norm": 16.283491134643555,
732
- "learning_rate": 5.973715651135007e-07,
733
- "loss": 0.3097,
734
  "step": 920
735
  },
736
  {
737
- "epoch": 9.09032258064516,
738
- "grad_norm": 21.258689880371094,
739
- "learning_rate": 0.0,
740
- "loss": 0.3523,
741
  "step": 930
742
  },
743
  {
744
- "epoch": 9.09032258064516,
745
- "eval_accuracy": 0.9473684210526315,
746
- "eval_loss": 0.2718675136566162,
747
- "eval_runtime": 25.0335,
748
- "eval_samples_per_second": 3.036,
749
- "eval_steps_per_second": 0.399,
750
- "step": 930
751
  },
752
  {
753
- "epoch": 9.09032258064516,
754
- "step": 930,
755
- "total_flos": 9.225862220448793e+18,
756
- "train_loss": 0.4273061529282601,
757
- "train_runtime": 3527.5997,
758
- "train_samples_per_second": 2.109,
759
- "train_steps_per_second": 0.264
760
  },
761
  {
762
- "epoch": 9.09032258064516,
763
- "eval_accuracy": 0.8,
764
- "eval_loss": 0.7193946838378906,
765
- "eval_runtime": 163.1453,
766
- "eval_samples_per_second": 2.452,
767
- "eval_steps_per_second": 0.306,
768
- "step": 930
769
  },
770
  {
771
- "epoch": 9.09032258064516,
772
- "eval_accuracy": 0.8,
773
- "eval_loss": 0.7193946838378906,
774
- "eval_runtime": 151.4396,
775
- "eval_samples_per_second": 2.641,
776
- "eval_steps_per_second": 0.33,
777
- "step": 930
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
778
  }
779
  ],
780
  "logging_steps": 10,
781
- "max_steps": 930,
782
  "num_input_tokens_seen": 0,
783
  "num_train_epochs": 9223372036854775807,
784
  "save_steps": 500,
@@ -794,7 +2141,7 @@
794
  "attributes": {}
795
  }
796
  },
797
- "total_flos": 9.225862220448793e+18,
798
  "train_batch_size": 8,
799
  "trial_name": null,
800
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.8782051282051282,
3
+ "best_model_checkpoint": "videomae-base-finetuned-rwf2000-subset/checkpoint-1870",
4
+ "epoch": 14.061648745519713,
5
  "eval_steps": 500,
6
+ "global_step": 2790,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0035842293906810036,
13
+ "grad_norm": 10.333197593688965,
14
+ "learning_rate": 1.7921146953405017e-06,
15
+ "loss": 0.4614,
16
  "step": 10
17
  },
18
  {
19
+ "epoch": 0.007168458781362007,
20
+ "grad_norm": 10.418760299682617,
21
+ "learning_rate": 3.5842293906810035e-06,
22
+ "loss": 0.5532,
23
  "step": 20
24
  },
25
  {
26
+ "epoch": 0.010752688172043012,
27
+ "grad_norm": 7.5556135177612305,
28
+ "learning_rate": 5.376344086021506e-06,
29
+ "loss": 0.5127,
30
  "step": 30
31
  },
32
  {
33
+ "epoch": 0.014336917562724014,
34
+ "grad_norm": 7.2956719398498535,
35
+ "learning_rate": 7.168458781362007e-06,
36
+ "loss": 0.4354,
37
  "step": 40
38
  },
39
  {
40
+ "epoch": 0.017921146953405017,
41
+ "grad_norm": 5.666509628295898,
42
+ "learning_rate": 8.960573476702509e-06,
43
+ "loss": 0.3259,
44
  "step": 50
45
  },
46
  {
47
+ "epoch": 0.021505376344086023,
48
+ "grad_norm": 11.845425605773926,
49
+ "learning_rate": 1.0752688172043012e-05,
50
+ "loss": 0.4544,
51
  "step": 60
52
  },
53
  {
54
+ "epoch": 0.025089605734767026,
55
+ "grad_norm": 4.353142738342285,
56
+ "learning_rate": 1.2544802867383513e-05,
57
+ "loss": 0.4147,
58
  "step": 70
59
  },
60
  {
61
+ "epoch": 0.02867383512544803,
62
+ "grad_norm": 8.852384567260742,
63
+ "learning_rate": 1.4336917562724014e-05,
64
+ "loss": 0.3996,
65
  "step": 80
66
  },
67
  {
68
+ "epoch": 0.03225806451612903,
69
+ "grad_norm": 8.823715209960938,
70
+ "learning_rate": 1.6129032258064517e-05,
71
+ "loss": 0.4497,
72
  "step": 90
73
  },
74
  {
75
+ "epoch": 0.035842293906810034,
76
+ "grad_norm": 6.714145660400391,
77
+ "learning_rate": 1.7921146953405018e-05,
78
+ "loss": 0.2831,
 
 
 
 
 
 
 
 
 
79
  "step": 100
80
  },
81
  {
82
+ "epoch": 0.03942652329749104,
83
+ "grad_norm": 10.755260467529297,
84
+ "learning_rate": 1.9713261648745522e-05,
85
+ "loss": 0.4612,
86
  "step": 110
87
  },
88
  {
89
+ "epoch": 0.043010752688172046,
90
+ "grad_norm": 11.755001068115234,
91
+ "learning_rate": 2.1505376344086024e-05,
92
+ "loss": 0.539,
93
  "step": 120
94
  },
95
  {
96
+ "epoch": 0.04659498207885305,
97
+ "grad_norm": 4.715596675872803,
98
+ "learning_rate": 2.3297491039426525e-05,
99
+ "loss": 0.4302,
100
  "step": 130
101
  },
102
  {
103
+ "epoch": 0.05017921146953405,
104
+ "grad_norm": 10.882904052734375,
105
+ "learning_rate": 2.5089605734767026e-05,
106
+ "loss": 0.429,
107
  "step": 140
108
  },
109
  {
110
+ "epoch": 0.053763440860215055,
111
+ "grad_norm": 17.894540786743164,
112
+ "learning_rate": 2.6881720430107527e-05,
113
+ "loss": 0.4703,
114
  "step": 150
115
  },
116
  {
117
+ "epoch": 0.05734767025089606,
118
+ "grad_norm": 9.539034843444824,
119
+ "learning_rate": 2.8673835125448028e-05,
120
+ "loss": 0.4972,
121
  "step": 160
122
  },
123
  {
124
+ "epoch": 0.06093189964157706,
125
+ "grad_norm": 6.870086193084717,
126
+ "learning_rate": 3.046594982078853e-05,
127
+ "loss": 0.37,
128
  "step": 170
129
  },
130
  {
131
+ "epoch": 0.06451612903225806,
132
+ "grad_norm": 7.155197620391846,
133
+ "learning_rate": 3.2258064516129034e-05,
134
+ "loss": 0.3956,
135
  "step": 180
136
  },
137
  {
138
+ "epoch": 0.06702508960573476,
139
+ "eval_accuracy": 0.717948717948718,
140
+ "eval_loss": 0.7066396474838257,
141
+ "eval_runtime": 51.1495,
142
+ "eval_samples_per_second": 3.05,
143
+ "eval_steps_per_second": 0.391,
144
+ "step": 187
145
  },
146
  {
147
+ "epoch": 1.0010752688172042,
148
+ "grad_norm": 23.43296241760254,
149
+ "learning_rate": 3.405017921146954e-05,
150
+ "loss": 0.3894,
151
  "step": 190
152
  },
153
  {
154
+ "epoch": 1.0046594982078854,
155
+ "grad_norm": 6.450241565704346,
156
+ "learning_rate": 3.5842293906810036e-05,
157
+ "loss": 0.5151,
158
  "step": 200
159
  },
160
  {
161
+ "epoch": 1.0082437275985663,
162
+ "grad_norm": 5.017886161804199,
163
+ "learning_rate": 3.763440860215054e-05,
164
+ "loss": 0.4517,
165
  "step": 210
166
  },
167
  {
168
+ "epoch": 1.0118279569892472,
169
+ "grad_norm": 20.295490264892578,
170
+ "learning_rate": 3.9426523297491045e-05,
171
+ "loss": 0.5027,
172
  "step": 220
173
  },
174
  {
175
+ "epoch": 1.0154121863799284,
176
+ "grad_norm": 14.873997688293457,
177
+ "learning_rate": 4.121863799283154e-05,
178
+ "loss": 0.6656,
179
  "step": 230
180
  },
181
  {
182
+ "epoch": 1.0189964157706093,
183
+ "grad_norm": 1.4430453777313232,
184
+ "learning_rate": 4.301075268817205e-05,
185
+ "loss": 0.3837,
186
  "step": 240
187
  },
188
  {
189
+ "epoch": 1.0225806451612902,
190
+ "grad_norm": 3.686954975128174,
191
+ "learning_rate": 4.4802867383512545e-05,
192
+ "loss": 0.4077,
193
  "step": 250
194
  },
195
  {
196
+ "epoch": 1.0261648745519714,
197
+ "grad_norm": 8.648886680603027,
198
+ "learning_rate": 4.659498207885305e-05,
199
+ "loss": 0.4828,
200
  "step": 260
201
  },
202
  {
203
+ "epoch": 1.0297491039426523,
204
+ "grad_norm": 13.670947074890137,
205
+ "learning_rate": 4.8387096774193554e-05,
206
+ "loss": 0.4882,
207
  "step": 270
208
  },
209
  {
210
+ "epoch": 1.0333333333333334,
211
+ "grad_norm": 1.407135248184204,
212
+ "learning_rate": 4.998008761449622e-05,
213
+ "loss": 0.4301,
214
  "step": 280
215
  },
216
  {
217
+ "epoch": 1.0369175627240144,
218
+ "grad_norm": 11.154088020324707,
219
+ "learning_rate": 4.978096375945839e-05,
220
+ "loss": 0.4001,
 
 
 
 
 
 
 
 
 
221
  "step": 290
222
  },
223
  {
224
+ "epoch": 1.0405017921146953,
225
+ "grad_norm": 6.446365833282471,
226
+ "learning_rate": 4.9581839904420555e-05,
227
+ "loss": 0.6634,
228
  "step": 300
229
  },
230
  {
231
+ "epoch": 1.0440860215053764,
232
+ "grad_norm": 8.026636123657227,
233
+ "learning_rate": 4.938271604938271e-05,
234
+ "loss": 0.2529,
235
  "step": 310
236
  },
237
  {
238
+ "epoch": 1.0476702508960574,
239
+ "grad_norm": 13.86358642578125,
240
+ "learning_rate": 4.9183592194344885e-05,
241
+ "loss": 0.6586,
242
  "step": 320
243
  },
244
  {
245
+ "epoch": 1.0512544802867383,
246
+ "grad_norm": 6.193373680114746,
247
+ "learning_rate": 4.898446833930705e-05,
248
+ "loss": 0.6053,
249
  "step": 330
250
  },
251
  {
252
+ "epoch": 1.0548387096774194,
253
+ "grad_norm": 3.7244551181793213,
254
+ "learning_rate": 4.8785344484269216e-05,
255
+ "loss": 0.4128,
256
  "step": 340
257
  },
258
  {
259
+ "epoch": 1.0584229390681004,
260
+ "grad_norm": 4.799228668212891,
261
+ "learning_rate": 4.858622062923138e-05,
262
+ "loss": 0.429,
263
  "step": 350
264
  },
265
  {
266
+ "epoch": 1.0620071684587813,
267
+ "grad_norm": 13.368525505065918,
268
+ "learning_rate": 4.8387096774193554e-05,
269
+ "loss": 0.5007,
270
  "step": 360
271
  },
272
  {
273
+ "epoch": 1.0655913978494624,
274
+ "grad_norm": 7.234383583068848,
275
+ "learning_rate": 4.818797291915572e-05,
276
+ "loss": 0.4019,
277
  "step": 370
278
  },
279
  {
280
+ "epoch": 1.0670250896057347,
281
+ "eval_accuracy": 0.7115384615384616,
282
+ "eval_loss": 0.6282246708869934,
283
+ "eval_runtime": 50.1248,
284
+ "eval_samples_per_second": 3.112,
285
+ "eval_steps_per_second": 0.399,
286
+ "step": 374
287
  },
288
  {
289
+ "epoch": 2.0021505376344084,
290
+ "grad_norm": 2.577439069747925,
291
+ "learning_rate": 4.7988849064117885e-05,
292
+ "loss": 0.494,
293
  "step": 380
294
  },
295
  {
296
+ "epoch": 2.0057347670250896,
297
+ "grad_norm": 4.06400203704834,
298
+ "learning_rate": 4.778972520908005e-05,
299
+ "loss": 0.3722,
300
  "step": 390
301
  },
302
  {
303
+ "epoch": 2.0093189964157707,
304
+ "grad_norm": 8.867179870605469,
305
+ "learning_rate": 4.759060135404222e-05,
306
+ "loss": 0.5412,
307
  "step": 400
308
  },
309
  {
310
+ "epoch": 2.0129032258064514,
311
+ "grad_norm": 2.0079333782196045,
312
+ "learning_rate": 4.739147749900438e-05,
313
+ "loss": 0.5593,
314
  "step": 410
315
  },
316
  {
317
+ "epoch": 2.0164874551971326,
318
+ "grad_norm": 9.170706748962402,
319
+ "learning_rate": 4.7192353643966546e-05,
320
+ "loss": 0.3964,
321
  "step": 420
322
  },
323
  {
324
+ "epoch": 2.0200716845878137,
325
+ "grad_norm": 9.581748962402344,
326
+ "learning_rate": 4.699322978892872e-05,
327
+ "loss": 0.5128,
328
  "step": 430
329
  },
330
  {
331
+ "epoch": 2.0236559139784944,
332
+ "grad_norm": 3.3790416717529297,
333
+ "learning_rate": 4.6794105933890884e-05,
334
+ "loss": 0.4637,
335
  "step": 440
336
  },
337
  {
338
+ "epoch": 2.0272401433691756,
339
+ "grad_norm": 9.568379402160645,
340
+ "learning_rate": 4.659498207885305e-05,
341
+ "loss": 0.4377,
342
  "step": 450
343
  },
344
  {
345
+ "epoch": 2.0308243727598567,
346
+ "grad_norm": 1.0472499132156372,
347
+ "learning_rate": 4.6395858223815215e-05,
348
+ "loss": 0.4241,
349
  "step": 460
350
  },
351
  {
352
+ "epoch": 2.0344086021505374,
353
+ "grad_norm": 21.286645889282227,
354
+ "learning_rate": 4.619673436877738e-05,
355
+ "loss": 0.7317,
 
 
 
 
 
 
 
 
 
356
  "step": 470
357
  },
358
  {
359
+ "epoch": 2.0379928315412186,
360
+ "grad_norm": 8.561319351196289,
361
+ "learning_rate": 4.5997610513739546e-05,
362
+ "loss": 0.5629,
363
  "step": 480
364
  },
365
  {
366
+ "epoch": 2.0415770609318997,
367
+ "grad_norm": 2.304678201675415,
368
+ "learning_rate": 4.579848665870172e-05,
369
+ "loss": 0.2492,
370
  "step": 490
371
  },
372
  {
373
+ "epoch": 2.0451612903225804,
374
+ "grad_norm": 13.397499084472656,
375
+ "learning_rate": 4.559936280366388e-05,
376
+ "loss": 0.4369,
377
  "step": 500
378
  },
379
  {
380
+ "epoch": 2.0487455197132616,
381
+ "grad_norm": 1.433841347694397,
382
+ "learning_rate": 4.540023894862604e-05,
383
+ "loss": 0.4091,
384
  "step": 510
385
  },
386
  {
387
+ "epoch": 2.0523297491039427,
388
+ "grad_norm": 4.154223918914795,
389
+ "learning_rate": 4.5201115093588214e-05,
390
+ "loss": 0.3906,
391
  "step": 520
392
  },
393
  {
394
+ "epoch": 2.055913978494624,
395
+ "grad_norm": 11.778128623962402,
396
+ "learning_rate": 4.500199123855038e-05,
397
+ "loss": 0.4366,
398
  "step": 530
399
  },
400
  {
401
+ "epoch": 2.0594982078853046,
402
+ "grad_norm": 0.3377319872379303,
403
+ "learning_rate": 4.4802867383512545e-05,
404
+ "loss": 0.464,
405
  "step": 540
406
  },
407
  {
408
+ "epoch": 2.0630824372759857,
409
+ "grad_norm": 6.4462103843688965,
410
+ "learning_rate": 4.460374352847471e-05,
411
+ "loss": 0.7451,
412
  "step": 550
413
  },
414
  {
415
+ "epoch": 2.066666666666667,
416
+ "grad_norm": 4.705774784088135,
417
+ "learning_rate": 4.440461967343688e-05,
418
+ "loss": 0.4473,
419
  "step": 560
420
  },
421
  {
422
+ "epoch": 2.067025089605735,
423
+ "eval_accuracy": 0.7692307692307693,
424
+ "eval_loss": 0.43944770097732544,
425
+ "eval_runtime": 51.1029,
426
+ "eval_samples_per_second": 3.053,
427
+ "eval_steps_per_second": 0.391,
428
+ "step": 561
429
  },
430
  {
431
+ "epoch": 3.003225806451613,
432
+ "grad_norm": 2.186035633087158,
433
+ "learning_rate": 4.420549581839905e-05,
434
+ "loss": 0.3464,
435
  "step": 570
436
  },
437
  {
438
+ "epoch": 3.006810035842294,
439
+ "grad_norm": 7.887436866760254,
440
+ "learning_rate": 4.400637196336121e-05,
441
+ "loss": 0.3988,
442
  "step": 580
443
  },
444
  {
445
+ "epoch": 3.010394265232975,
446
+ "grad_norm": 19.643508911132812,
447
+ "learning_rate": 4.380724810832338e-05,
448
+ "loss": 0.3721,
449
  "step": 590
450
  },
451
  {
452
+ "epoch": 3.013978494623656,
453
+ "grad_norm": 2.0498669147491455,
454
+ "learning_rate": 4.360812425328555e-05,
455
+ "loss": 0.5075,
456
  "step": 600
457
  },
458
  {
459
+ "epoch": 3.017562724014337,
460
+ "grad_norm": 2.9130897521972656,
461
+ "learning_rate": 4.340900039824771e-05,
462
+ "loss": 0.2018,
463
  "step": 610
464
  },
465
  {
466
+ "epoch": 3.021146953405018,
467
+ "grad_norm": 3.8146159648895264,
468
+ "learning_rate": 4.3209876543209875e-05,
469
+ "loss": 0.4042,
470
  "step": 620
471
  },
472
  {
473
+ "epoch": 3.024731182795699,
474
+ "grad_norm": 10.83116340637207,
475
+ "learning_rate": 4.301075268817205e-05,
476
+ "loss": 0.4721,
477
  "step": 630
478
  },
479
  {
480
+ "epoch": 3.02831541218638,
481
+ "grad_norm": 16.305315017700195,
482
+ "learning_rate": 4.281162883313421e-05,
483
+ "loss": 0.403,
484
  "step": 640
485
  },
486
  {
487
+ "epoch": 3.031899641577061,
488
+ "grad_norm": 2.8166565895080566,
489
+ "learning_rate": 4.261250497809638e-05,
490
+ "loss": 0.3613,
491
  "step": 650
492
  },
493
  {
494
+ "epoch": 3.035483870967742,
495
+ "grad_norm": 11.772737503051758,
496
+ "learning_rate": 4.241338112305854e-05,
497
+ "loss": 0.3021,
 
 
 
 
 
 
 
 
 
498
  "step": 660
499
  },
500
  {
501
+ "epoch": 3.039068100358423,
502
+ "grad_norm": 3.964470624923706,
503
+ "learning_rate": 4.2214257268020715e-05,
504
+ "loss": 0.4299,
505
  "step": 670
506
  },
507
  {
508
+ "epoch": 3.042652329749104,
509
+ "grad_norm": 12.679352760314941,
510
+ "learning_rate": 4.2015133412982874e-05,
511
+ "loss": 0.4539,
512
  "step": 680
513
  },
514
  {
515
+ "epoch": 3.046236559139785,
516
+ "grad_norm": 5.665447235107422,
517
+ "learning_rate": 4.1816009557945046e-05,
518
+ "loss": 0.4301,
519
  "step": 690
520
  },
521
  {
522
+ "epoch": 3.049820788530466,
523
+ "grad_norm": 9.17259693145752,
524
+ "learning_rate": 4.161688570290721e-05,
525
+ "loss": 0.3725,
526
  "step": 700
527
  },
528
  {
529
+ "epoch": 3.053405017921147,
530
+ "grad_norm": 10.35765266418457,
531
+ "learning_rate": 4.141776184786938e-05,
532
+ "loss": 0.3407,
533
  "step": 710
534
  },
535
  {
536
+ "epoch": 3.056989247311828,
537
+ "grad_norm": 7.472835063934326,
538
+ "learning_rate": 4.121863799283154e-05,
539
+ "loss": 0.2796,
540
  "step": 720
541
  },
542
  {
543
+ "epoch": 3.060573476702509,
544
+ "grad_norm": 17.262203216552734,
545
+ "learning_rate": 4.101951413779371e-05,
546
+ "loss": 0.5121,
547
  "step": 730
548
  },
549
  {
550
+ "epoch": 3.06415770609319,
551
+ "grad_norm": 5.791836261749268,
552
+ "learning_rate": 4.082039028275588e-05,
553
+ "loss": 0.3309,
554
  "step": 740
555
  },
556
  {
557
+ "epoch": 3.067025089605735,
558
+ "eval_accuracy": 0.782051282051282,
559
+ "eval_loss": 0.47816720604896545,
560
+ "eval_runtime": 51.9165,
561
+ "eval_samples_per_second": 3.005,
562
+ "eval_steps_per_second": 0.385,
563
+ "step": 748
564
  },
565
  {
566
+ "epoch": 4.000716845878136,
567
+ "grad_norm": 7.017057418823242,
568
+ "learning_rate": 4.062126642771804e-05,
569
+ "loss": 0.5454,
570
+ "step": 750
 
 
571
  },
572
  {
573
+ "epoch": 4.004301075268817,
574
+ "grad_norm": 3.8564045429229736,
575
+ "learning_rate": 4.042214257268021e-05,
576
+ "loss": 0.3336,
577
  "step": 760
578
  },
579
  {
580
+ "epoch": 4.007885304659498,
581
+ "grad_norm": 4.636725902557373,
582
+ "learning_rate": 4.0223018717642376e-05,
583
+ "loss": 0.2867,
584
  "step": 770
585
  },
586
  {
587
+ "epoch": 4.011469534050179,
588
+ "grad_norm": 8.09864616394043,
589
+ "learning_rate": 4.002389486260454e-05,
590
+ "loss": 0.2559,
591
  "step": 780
592
  },
593
  {
594
+ "epoch": 4.01505376344086,
595
+ "grad_norm": 8.088166236877441,
596
+ "learning_rate": 3.982477100756671e-05,
597
+ "loss": 0.3084,
598
  "step": 790
599
  },
600
  {
601
+ "epoch": 4.018637992831541,
602
+ "grad_norm": 13.00614070892334,
603
+ "learning_rate": 3.962564715252888e-05,
604
+ "loss": 0.3581,
605
  "step": 800
606
  },
607
  {
608
+ "epoch": 4.022222222222222,
609
+ "grad_norm": 4.3727641105651855,
610
+ "learning_rate": 3.9426523297491045e-05,
611
+ "loss": 0.3683,
612
  "step": 810
613
  },
614
  {
615
+ "epoch": 4.025806451612903,
616
+ "grad_norm": 10.826643943786621,
617
+ "learning_rate": 3.9227399442453203e-05,
618
+ "loss": 0.439,
619
  "step": 820
620
  },
621
  {
622
+ "epoch": 4.029390681003584,
623
+ "grad_norm": 8.039769172668457,
624
+ "learning_rate": 3.9028275587415376e-05,
625
+ "loss": 0.3645,
626
  "step": 830
627
  },
628
  {
629
+ "epoch": 4.032974910394265,
630
+ "grad_norm": 2.0027217864990234,
631
+ "learning_rate": 3.882915173237754e-05,
632
+ "loss": 0.3318,
633
  "step": 840
634
  },
635
  {
636
+ "epoch": 4.036559139784946,
637
+ "grad_norm": 6.5030198097229,
638
+ "learning_rate": 3.8630027877339706e-05,
639
+ "loss": 0.4075,
 
 
 
 
 
 
 
 
 
640
  "step": 850
641
  },
642
  {
643
+ "epoch": 4.040143369175627,
644
+ "grad_norm": 9.211589813232422,
645
+ "learning_rate": 3.843090402230187e-05,
646
+ "loss": 0.3196,
647
  "step": 860
648
  },
649
  {
650
+ "epoch": 4.043727598566308,
651
+ "grad_norm": 10.966934204101562,
652
+ "learning_rate": 3.8231780167264044e-05,
653
+ "loss": 0.1448,
654
  "step": 870
655
  },
656
  {
657
+ "epoch": 4.047311827956989,
658
+ "grad_norm": 12.808022499084473,
659
+ "learning_rate": 3.80326563122262e-05,
660
+ "loss": 0.3627,
661
  "step": 880
662
  },
663
  {
664
+ "epoch": 4.05089605734767,
665
+ "grad_norm": 6.971220016479492,
666
+ "learning_rate": 3.7833532457188375e-05,
667
+ "loss": 0.5394,
668
  "step": 890
669
  },
670
  {
671
+ "epoch": 4.054480286738351,
672
+ "grad_norm": 5.306809425354004,
673
+ "learning_rate": 3.763440860215054e-05,
674
+ "loss": 0.2983,
675
  "step": 900
676
  },
677
  {
678
+ "epoch": 4.058064516129032,
679
+ "grad_norm": 3.8580007553100586,
680
+ "learning_rate": 3.7435284747112706e-05,
681
+ "loss": 0.4265,
682
  "step": 910
683
  },
684
  {
685
+ "epoch": 4.061648745519713,
686
+ "grad_norm": 14.511212348937988,
687
+ "learning_rate": 3.723616089207487e-05,
688
+ "loss": 0.4302,
689
  "step": 920
690
  },
691
  {
692
+ "epoch": 4.065232974910394,
693
+ "grad_norm": 11.154169082641602,
694
+ "learning_rate": 3.7037037037037037e-05,
695
+ "loss": 0.4007,
696
  "step": 930
697
  },
698
  {
699
+ "epoch": 4.0670250896057345,
700
+ "eval_accuracy": 0.8461538461538461,
701
+ "eval_loss": 0.4134625196456909,
702
+ "eval_runtime": 50.528,
703
+ "eval_samples_per_second": 3.087,
704
+ "eval_steps_per_second": 0.396,
705
+ "step": 935
706
  },
707
  {
708
+ "epoch": 5.00179211469534,
709
+ "grad_norm": 1.4756416082382202,
710
+ "learning_rate": 3.683791318199921e-05,
711
+ "loss": 0.3005,
712
+ "step": 940
 
 
713
  },
714
  {
715
+ "epoch": 5.005376344086022,
716
+ "grad_norm": 7.019712924957275,
717
+ "learning_rate": 3.663878932696137e-05,
718
+ "loss": 0.3069,
719
+ "step": 950
 
 
720
  },
721
  {
722
+ "epoch": 5.008960573476703,
723
+ "grad_norm": 14.922005653381348,
724
+ "learning_rate": 3.643966547192354e-05,
725
+ "loss": 0.2685,
726
+ "step": 960
727
+ },
728
+ {
729
+ "epoch": 5.012544802867383,
730
+ "grad_norm": 17.716806411743164,
731
+ "learning_rate": 3.6240541616885705e-05,
732
+ "loss": 0.3368,
733
+ "step": 970
734
+ },
735
+ {
736
+ "epoch": 5.016129032258065,
737
+ "grad_norm": 30.634918212890625,
738
+ "learning_rate": 3.604141776184787e-05,
739
+ "loss": 0.4523,
740
+ "step": 980
741
+ },
742
+ {
743
+ "epoch": 5.019713261648746,
744
+ "grad_norm": 6.613483905792236,
745
+ "learning_rate": 3.5842293906810036e-05,
746
+ "loss": 0.4893,
747
+ "step": 990
748
+ },
749
+ {
750
+ "epoch": 5.023297491039426,
751
+ "grad_norm": 9.66140365600586,
752
+ "learning_rate": 3.564317005177221e-05,
753
+ "loss": 0.3979,
754
+ "step": 1000
755
+ },
756
+ {
757
+ "epoch": 5.026881720430108,
758
+ "grad_norm": 8.311437606811523,
759
+ "learning_rate": 3.5444046196734373e-05,
760
+ "loss": 0.2798,
761
+ "step": 1010
762
+ },
763
+ {
764
+ "epoch": 5.030465949820789,
765
+ "grad_norm": 3.4359304904937744,
766
+ "learning_rate": 3.524492234169653e-05,
767
+ "loss": 0.2007,
768
+ "step": 1020
769
+ },
770
+ {
771
+ "epoch": 5.034050179211469,
772
+ "grad_norm": 2.4346439838409424,
773
+ "learning_rate": 3.5045798486658704e-05,
774
+ "loss": 0.3485,
775
+ "step": 1030
776
+ },
777
+ {
778
+ "epoch": 5.037634408602151,
779
+ "grad_norm": 16.60686683654785,
780
+ "learning_rate": 3.484667463162087e-05,
781
+ "loss": 0.3642,
782
+ "step": 1040
783
+ },
784
+ {
785
+ "epoch": 5.041218637992832,
786
+ "grad_norm": 6.6109466552734375,
787
+ "learning_rate": 3.4647550776583035e-05,
788
+ "loss": 0.4869,
789
+ "step": 1050
790
+ },
791
+ {
792
+ "epoch": 5.044802867383512,
793
+ "grad_norm": 14.017187118530273,
794
+ "learning_rate": 3.44484269215452e-05,
795
+ "loss": 0.3117,
796
+ "step": 1060
797
+ },
798
+ {
799
+ "epoch": 5.048387096774194,
800
+ "grad_norm": 6.0544047355651855,
801
+ "learning_rate": 3.424930306650737e-05,
802
+ "loss": 0.3086,
803
+ "step": 1070
804
+ },
805
+ {
806
+ "epoch": 5.051971326164875,
807
+ "grad_norm": 12.737698554992676,
808
+ "learning_rate": 3.405017921146954e-05,
809
+ "loss": 0.3758,
810
+ "step": 1080
811
+ },
812
+ {
813
+ "epoch": 5.055555555555555,
814
+ "grad_norm": 3.5636813640594482,
815
+ "learning_rate": 3.3851055356431704e-05,
816
+ "loss": 0.3358,
817
+ "step": 1090
818
+ },
819
+ {
820
+ "epoch": 5.059139784946237,
821
+ "grad_norm": 15.545931816101074,
822
+ "learning_rate": 3.365193150139387e-05,
823
+ "loss": 0.442,
824
+ "step": 1100
825
+ },
826
+ {
827
+ "epoch": 5.062724014336918,
828
+ "grad_norm": 26.353408813476562,
829
+ "learning_rate": 3.3452807646356034e-05,
830
+ "loss": 0.2054,
831
+ "step": 1110
832
+ },
833
+ {
834
+ "epoch": 5.066308243727598,
835
+ "grad_norm": 28.000761032104492,
836
+ "learning_rate": 3.32536837913182e-05,
837
+ "loss": 0.3772,
838
+ "step": 1120
839
+ },
840
+ {
841
+ "epoch": 5.0670250896057345,
842
+ "eval_accuracy": 0.8461538461538461,
843
+ "eval_loss": 0.4329083263874054,
844
+ "eval_runtime": 50.4842,
845
+ "eval_samples_per_second": 3.09,
846
+ "eval_steps_per_second": 0.396,
847
+ "step": 1122
848
+ },
849
+ {
850
+ "epoch": 6.002867383512545,
851
+ "grad_norm": 6.335020065307617,
852
+ "learning_rate": 3.3054559936280365e-05,
853
+ "loss": 0.1473,
854
+ "step": 1130
855
+ },
856
+ {
857
+ "epoch": 6.006451612903226,
858
+ "grad_norm": 2.6890759468078613,
859
+ "learning_rate": 3.285543608124254e-05,
860
+ "loss": 0.4162,
861
+ "step": 1140
862
+ },
863
+ {
864
+ "epoch": 6.010035842293907,
865
+ "grad_norm": 11.375259399414062,
866
+ "learning_rate": 3.26563122262047e-05,
867
+ "loss": 0.3748,
868
+ "step": 1150
869
+ },
870
+ {
871
+ "epoch": 6.013620071684588,
872
+ "grad_norm": 8.632832527160645,
873
+ "learning_rate": 3.245718837116687e-05,
874
+ "loss": 0.2576,
875
+ "step": 1160
876
+ },
877
+ {
878
+ "epoch": 6.017204301075269,
879
+ "grad_norm": 6.899137496948242,
880
+ "learning_rate": 3.2258064516129034e-05,
881
+ "loss": 0.4902,
882
+ "step": 1170
883
+ },
884
+ {
885
+ "epoch": 6.02078853046595,
886
+ "grad_norm": 6.093740463256836,
887
+ "learning_rate": 3.2058940661091206e-05,
888
+ "loss": 0.3972,
889
+ "step": 1180
890
+ },
891
+ {
892
+ "epoch": 6.024372759856631,
893
+ "grad_norm": 8.65532398223877,
894
+ "learning_rate": 3.1859816806053364e-05,
895
+ "loss": 0.333,
896
+ "step": 1190
897
+ },
898
+ {
899
+ "epoch": 6.027956989247312,
900
+ "grad_norm": 4.51920747756958,
901
+ "learning_rate": 3.1660692951015537e-05,
902
+ "loss": 0.3342,
903
+ "step": 1200
904
+ },
905
+ {
906
+ "epoch": 6.031541218637993,
907
+ "grad_norm": 16.826913833618164,
908
+ "learning_rate": 3.14615690959777e-05,
909
+ "loss": 0.2567,
910
+ "step": 1210
911
+ },
912
+ {
913
+ "epoch": 6.035125448028674,
914
+ "grad_norm": 1.91347336769104,
915
+ "learning_rate": 3.126244524093986e-05,
916
+ "loss": 0.2524,
917
+ "step": 1220
918
+ },
919
+ {
920
+ "epoch": 6.038709677419355,
921
+ "grad_norm": 1.6974544525146484,
922
+ "learning_rate": 3.106332138590203e-05,
923
+ "loss": 0.4133,
924
+ "step": 1230
925
+ },
926
+ {
927
+ "epoch": 6.042293906810036,
928
+ "grad_norm": 6.3330864906311035,
929
+ "learning_rate": 3.08641975308642e-05,
930
+ "loss": 0.3822,
931
+ "step": 1240
932
+ },
933
+ {
934
+ "epoch": 6.045878136200717,
935
+ "grad_norm": 1.5088220834732056,
936
+ "learning_rate": 3.0665073675826364e-05,
937
+ "loss": 0.2523,
938
+ "step": 1250
939
+ },
940
+ {
941
+ "epoch": 6.049462365591398,
942
+ "grad_norm": 4.4250946044921875,
943
+ "learning_rate": 3.046594982078853e-05,
944
+ "loss": 0.2141,
945
+ "step": 1260
946
+ },
947
+ {
948
+ "epoch": 6.053046594982079,
949
+ "grad_norm": 0.5099472403526306,
950
+ "learning_rate": 3.02668259657507e-05,
951
+ "loss": 0.1307,
952
+ "step": 1270
953
+ },
954
+ {
955
+ "epoch": 6.05663082437276,
956
+ "grad_norm": 10.32909870147705,
957
+ "learning_rate": 3.0067702110712863e-05,
958
+ "loss": 0.5991,
959
+ "step": 1280
960
+ },
961
+ {
962
+ "epoch": 6.060215053763441,
963
+ "grad_norm": 5.504093647003174,
964
+ "learning_rate": 2.9868578255675032e-05,
965
+ "loss": 0.2848,
966
+ "step": 1290
967
+ },
968
+ {
969
+ "epoch": 6.063799283154122,
970
+ "grad_norm": 15.00183391571045,
971
+ "learning_rate": 2.9669454400637198e-05,
972
+ "loss": 0.4685,
973
+ "step": 1300
974
+ },
975
+ {
976
+ "epoch": 6.0670250896057345,
977
+ "eval_accuracy": 0.8653846153846154,
978
+ "eval_loss": 0.4190846085548401,
979
+ "eval_runtime": 52.3206,
980
+ "eval_samples_per_second": 2.982,
981
+ "eval_steps_per_second": 0.382,
982
+ "step": 1309
983
+ },
984
+ {
985
+ "epoch": 7.000358422939068,
986
+ "grad_norm": 20.213930130004883,
987
+ "learning_rate": 2.9470330545599363e-05,
988
+ "loss": 0.2573,
989
+ "step": 1310
990
+ },
991
+ {
992
+ "epoch": 7.003942652329749,
993
+ "grad_norm": 8.981283187866211,
994
+ "learning_rate": 2.9271206690561532e-05,
995
+ "loss": 0.2385,
996
+ "step": 1320
997
+ },
998
+ {
999
+ "epoch": 7.00752688172043,
1000
+ "grad_norm": 4.789116859436035,
1001
+ "learning_rate": 2.9072082835523694e-05,
1002
+ "loss": 0.4895,
1003
+ "step": 1330
1004
+ },
1005
+ {
1006
+ "epoch": 7.011111111111111,
1007
+ "grad_norm": 16.981555938720703,
1008
+ "learning_rate": 2.8872958980485866e-05,
1009
+ "loss": 0.2175,
1010
+ "step": 1340
1011
+ },
1012
+ {
1013
+ "epoch": 7.014695340501792,
1014
+ "grad_norm": 0.44299283623695374,
1015
+ "learning_rate": 2.8673835125448028e-05,
1016
+ "loss": 0.2241,
1017
+ "step": 1350
1018
+ },
1019
+ {
1020
+ "epoch": 7.018279569892473,
1021
+ "grad_norm": 18.93329429626465,
1022
+ "learning_rate": 2.8474711270410197e-05,
1023
+ "loss": 0.333,
1024
+ "step": 1360
1025
+ },
1026
+ {
1027
+ "epoch": 7.021863799283154,
1028
+ "grad_norm": 10.7378511428833,
1029
+ "learning_rate": 2.8275587415372362e-05,
1030
+ "loss": 0.3464,
1031
+ "step": 1370
1032
+ },
1033
+ {
1034
+ "epoch": 7.025448028673835,
1035
+ "grad_norm": 6.727279186248779,
1036
+ "learning_rate": 2.807646356033453e-05,
1037
+ "loss": 0.4513,
1038
+ "step": 1380
1039
+ },
1040
+ {
1041
+ "epoch": 7.029032258064516,
1042
+ "grad_norm": 0.6868817806243896,
1043
+ "learning_rate": 2.7877339705296696e-05,
1044
+ "loss": 0.3245,
1045
+ "step": 1390
1046
+ },
1047
+ {
1048
+ "epoch": 7.032616487455197,
1049
+ "grad_norm": 2.979949951171875,
1050
+ "learning_rate": 2.7678215850258865e-05,
1051
+ "loss": 0.3958,
1052
+ "step": 1400
1053
+ },
1054
+ {
1055
+ "epoch": 7.036200716845878,
1056
+ "grad_norm": 7.619071960449219,
1057
+ "learning_rate": 2.747909199522103e-05,
1058
+ "loss": 0.2745,
1059
+ "step": 1410
1060
+ },
1061
+ {
1062
+ "epoch": 7.039784946236559,
1063
+ "grad_norm": 6.652284145355225,
1064
+ "learning_rate": 2.7279968140183193e-05,
1065
+ "loss": 0.3702,
1066
+ "step": 1420
1067
+ },
1068
+ {
1069
+ "epoch": 7.04336917562724,
1070
+ "grad_norm": 7.907419204711914,
1071
+ "learning_rate": 2.708084428514536e-05,
1072
+ "loss": 0.2073,
1073
+ "step": 1430
1074
+ },
1075
+ {
1076
+ "epoch": 7.046953405017921,
1077
+ "grad_norm": 10.869131088256836,
1078
+ "learning_rate": 2.6881720430107527e-05,
1079
+ "loss": 0.2441,
1080
+ "step": 1440
1081
+ },
1082
+ {
1083
+ "epoch": 7.050537634408602,
1084
+ "grad_norm": 4.352448463439941,
1085
+ "learning_rate": 2.6682596575069696e-05,
1086
+ "loss": 0.2584,
1087
+ "step": 1450
1088
+ },
1089
+ {
1090
+ "epoch": 7.054121863799283,
1091
+ "grad_norm": 15.832218170166016,
1092
+ "learning_rate": 2.648347272003186e-05,
1093
+ "loss": 0.4251,
1094
+ "step": 1460
1095
+ },
1096
+ {
1097
+ "epoch": 7.057706093189964,
1098
+ "grad_norm": 4.763960361480713,
1099
+ "learning_rate": 2.628434886499403e-05,
1100
+ "loss": 0.1938,
1101
+ "step": 1470
1102
+ },
1103
+ {
1104
+ "epoch": 7.061290322580645,
1105
+ "grad_norm": 0.8033217191696167,
1106
+ "learning_rate": 2.6085225009956192e-05,
1107
+ "loss": 0.1949,
1108
+ "step": 1480
1109
+ },
1110
+ {
1111
+ "epoch": 7.064874551971326,
1112
+ "grad_norm": 18.37034797668457,
1113
+ "learning_rate": 2.5886101154918364e-05,
1114
+ "loss": 0.4056,
1115
+ "step": 1490
1116
+ },
1117
+ {
1118
+ "epoch": 7.0670250896057345,
1119
+ "eval_accuracy": 0.8012820512820513,
1120
+ "eval_loss": 0.5650048851966858,
1121
+ "eval_runtime": 60.9039,
1122
+ "eval_samples_per_second": 2.561,
1123
+ "eval_steps_per_second": 0.328,
1124
+ "step": 1496
1125
+ },
1126
+ {
1127
+ "epoch": 8.001433691756272,
1128
+ "grad_norm": 15.786755561828613,
1129
+ "learning_rate": 2.5686977299880526e-05,
1130
+ "loss": 0.2818,
1131
+ "step": 1500
1132
+ },
1133
+ {
1134
+ "epoch": 8.005017921146953,
1135
+ "grad_norm": 9.856786727905273,
1136
+ "learning_rate": 2.548785344484269e-05,
1137
+ "loss": 0.243,
1138
+ "step": 1510
1139
+ },
1140
+ {
1141
+ "epoch": 8.008602150537634,
1142
+ "grad_norm": 9.001197814941406,
1143
+ "learning_rate": 2.528872958980486e-05,
1144
+ "loss": 0.308,
1145
+ "step": 1520
1146
+ },
1147
+ {
1148
+ "epoch": 8.012186379928316,
1149
+ "grad_norm": 11.027730941772461,
1150
+ "learning_rate": 2.5089605734767026e-05,
1151
+ "loss": 0.1852,
1152
+ "step": 1530
1153
+ },
1154
+ {
1155
+ "epoch": 8.015770609318997,
1156
+ "grad_norm": 15.640365600585938,
1157
+ "learning_rate": 2.4890481879729195e-05,
1158
+ "loss": 0.3335,
1159
+ "step": 1540
1160
+ },
1161
+ {
1162
+ "epoch": 8.019354838709678,
1163
+ "grad_norm": 20.133060455322266,
1164
+ "learning_rate": 2.4691358024691357e-05,
1165
+ "loss": 0.3552,
1166
+ "step": 1550
1167
+ },
1168
+ {
1169
+ "epoch": 8.022939068100358,
1170
+ "grad_norm": 0.8335391283035278,
1171
+ "learning_rate": 2.4492234169653525e-05,
1172
+ "loss": 0.3064,
1173
+ "step": 1560
1174
+ },
1175
+ {
1176
+ "epoch": 8.026523297491039,
1177
+ "grad_norm": 0.4149135947227478,
1178
+ "learning_rate": 2.429311031461569e-05,
1179
+ "loss": 0.1344,
1180
+ "step": 1570
1181
+ },
1182
+ {
1183
+ "epoch": 8.03010752688172,
1184
+ "grad_norm": 0.058533698320388794,
1185
+ "learning_rate": 2.409398645957786e-05,
1186
+ "loss": 0.139,
1187
+ "step": 1580
1188
+ },
1189
+ {
1190
+ "epoch": 8.033691756272402,
1191
+ "grad_norm": 0.27755093574523926,
1192
+ "learning_rate": 2.3894862604540025e-05,
1193
+ "loss": 0.3311,
1194
+ "step": 1590
1195
+ },
1196
+ {
1197
+ "epoch": 8.037275985663083,
1198
+ "grad_norm": 3.6542258262634277,
1199
+ "learning_rate": 2.369573874950219e-05,
1200
+ "loss": 0.1689,
1201
+ "step": 1600
1202
+ },
1203
+ {
1204
+ "epoch": 8.040860215053764,
1205
+ "grad_norm": 0.14547933638095856,
1206
+ "learning_rate": 2.349661489446436e-05,
1207
+ "loss": 0.1994,
1208
+ "step": 1610
1209
+ },
1210
+ {
1211
+ "epoch": 8.044444444444444,
1212
+ "grad_norm": 0.17915168404579163,
1213
+ "learning_rate": 2.3297491039426525e-05,
1214
+ "loss": 0.3071,
1215
+ "step": 1620
1216
+ },
1217
+ {
1218
+ "epoch": 8.048028673835125,
1219
+ "grad_norm": 1.3044124841690063,
1220
+ "learning_rate": 2.309836718438869e-05,
1221
+ "loss": 0.2893,
1222
+ "step": 1630
1223
+ },
1224
+ {
1225
+ "epoch": 8.051612903225806,
1226
+ "grad_norm": 7.5866804122924805,
1227
+ "learning_rate": 2.289924332935086e-05,
1228
+ "loss": 0.245,
1229
+ "step": 1640
1230
+ },
1231
+ {
1232
+ "epoch": 8.055197132616488,
1233
+ "grad_norm": 16.545440673828125,
1234
+ "learning_rate": 2.270011947431302e-05,
1235
+ "loss": 0.1421,
1236
+ "step": 1650
1237
+ },
1238
+ {
1239
+ "epoch": 8.058781362007169,
1240
+ "grad_norm": 19.21019744873047,
1241
+ "learning_rate": 2.250099561927519e-05,
1242
+ "loss": 0.2807,
1243
+ "step": 1660
1244
+ },
1245
+ {
1246
+ "epoch": 8.06236559139785,
1247
+ "grad_norm": 7.801510334014893,
1248
+ "learning_rate": 2.2301871764237355e-05,
1249
+ "loss": 0.2407,
1250
+ "step": 1670
1251
+ },
1252
+ {
1253
+ "epoch": 8.06594982078853,
1254
+ "grad_norm": 9.918209075927734,
1255
+ "learning_rate": 2.2102747909199524e-05,
1256
+ "loss": 0.2306,
1257
+ "step": 1680
1258
+ },
1259
+ {
1260
+ "epoch": 8.067025089605735,
1261
+ "eval_accuracy": 0.8076923076923077,
1262
+ "eval_loss": 0.7092758417129517,
1263
+ "eval_runtime": 52.4717,
1264
+ "eval_samples_per_second": 2.973,
1265
+ "eval_steps_per_second": 0.381,
1266
+ "step": 1683
1267
+ },
1268
+ {
1269
+ "epoch": 9.002508960573477,
1270
+ "grad_norm": 9.930907249450684,
1271
+ "learning_rate": 2.190362405416169e-05,
1272
+ "loss": 0.2508,
1273
+ "step": 1690
1274
+ },
1275
+ {
1276
+ "epoch": 9.006093189964158,
1277
+ "grad_norm": 0.4230400621891022,
1278
+ "learning_rate": 2.1704500199123855e-05,
1279
+ "loss": 0.1182,
1280
+ "step": 1700
1281
+ },
1282
+ {
1283
+ "epoch": 9.009677419354839,
1284
+ "grad_norm": 19.38228416442871,
1285
+ "learning_rate": 2.1505376344086024e-05,
1286
+ "loss": 0.2058,
1287
+ "step": 1710
1288
+ },
1289
+ {
1290
+ "epoch": 9.01326164874552,
1291
+ "grad_norm": 0.1429557204246521,
1292
+ "learning_rate": 2.130625248904819e-05,
1293
+ "loss": 0.1859,
1294
+ "step": 1720
1295
+ },
1296
+ {
1297
+ "epoch": 9.0168458781362,
1298
+ "grad_norm": 14.09881591796875,
1299
+ "learning_rate": 2.1107128634010358e-05,
1300
+ "loss": 0.165,
1301
+ "step": 1730
1302
+ },
1303
+ {
1304
+ "epoch": 9.020430107526881,
1305
+ "grad_norm": 13.956668853759766,
1306
+ "learning_rate": 2.0908004778972523e-05,
1307
+ "loss": 0.5844,
1308
+ "step": 1740
1309
+ },
1310
+ {
1311
+ "epoch": 9.024014336917563,
1312
+ "grad_norm": 1.542794108390808,
1313
+ "learning_rate": 2.070888092393469e-05,
1314
+ "loss": 0.2968,
1315
+ "step": 1750
1316
+ },
1317
+ {
1318
+ "epoch": 9.027598566308244,
1319
+ "grad_norm": 17.280027389526367,
1320
+ "learning_rate": 2.0509757068896854e-05,
1321
+ "loss": 0.2715,
1322
+ "step": 1760
1323
+ },
1324
+ {
1325
+ "epoch": 9.031182795698925,
1326
+ "grad_norm": 0.40804412961006165,
1327
+ "learning_rate": 2.031063321385902e-05,
1328
+ "loss": 0.2193,
1329
+ "step": 1770
1330
+ },
1331
+ {
1332
+ "epoch": 9.034767025089605,
1333
+ "grad_norm": 0.2087012380361557,
1334
+ "learning_rate": 2.0111509358821188e-05,
1335
+ "loss": 0.3301,
1336
+ "step": 1780
1337
+ },
1338
+ {
1339
+ "epoch": 9.038351254480286,
1340
+ "grad_norm": 12.486738204956055,
1341
+ "learning_rate": 1.9912385503783354e-05,
1342
+ "loss": 0.3881,
1343
+ "step": 1790
1344
+ },
1345
+ {
1346
+ "epoch": 9.041935483870967,
1347
+ "grad_norm": 9.798795700073242,
1348
+ "learning_rate": 1.9713261648745522e-05,
1349
+ "loss": 0.366,
1350
+ "step": 1800
1351
+ },
1352
+ {
1353
+ "epoch": 9.04551971326165,
1354
+ "grad_norm": 25.9205379486084,
1355
+ "learning_rate": 1.9514137793707688e-05,
1356
+ "loss": 0.3944,
1357
+ "step": 1810
1358
+ },
1359
+ {
1360
+ "epoch": 9.04910394265233,
1361
+ "grad_norm": 14.600288391113281,
1362
+ "learning_rate": 1.9315013938669853e-05,
1363
+ "loss": 0.0899,
1364
+ "step": 1820
1365
+ },
1366
+ {
1367
+ "epoch": 9.05268817204301,
1368
+ "grad_norm": 0.17624612152576447,
1369
+ "learning_rate": 1.9115890083632022e-05,
1370
+ "loss": 0.4656,
1371
+ "step": 1830
1372
+ },
1373
+ {
1374
+ "epoch": 9.056272401433691,
1375
+ "grad_norm": 7.002784252166748,
1376
+ "learning_rate": 1.8916766228594187e-05,
1377
+ "loss": 0.2776,
1378
+ "step": 1840
1379
+ },
1380
+ {
1381
+ "epoch": 9.059856630824372,
1382
+ "grad_norm": 6.815585136413574,
1383
+ "learning_rate": 1.8717642373556353e-05,
1384
+ "loss": 0.1246,
1385
+ "step": 1850
1386
+ },
1387
+ {
1388
+ "epoch": 9.063440860215053,
1389
+ "grad_norm": 25.917802810668945,
1390
+ "learning_rate": 1.8518518518518518e-05,
1391
+ "loss": 0.1041,
1392
+ "step": 1860
1393
+ },
1394
+ {
1395
+ "epoch": 9.067025089605735,
1396
+ "grad_norm": 0.36158832907676697,
1397
+ "learning_rate": 1.8319394663480684e-05,
1398
+ "loss": 0.304,
1399
+ "step": 1870
1400
+ },
1401
+ {
1402
+ "epoch": 9.067025089605735,
1403
+ "eval_accuracy": 0.8782051282051282,
1404
+ "eval_loss": 0.39389485120773315,
1405
+ "eval_runtime": 50.2949,
1406
+ "eval_samples_per_second": 3.102,
1407
+ "eval_steps_per_second": 0.398,
1408
+ "step": 1870
1409
+ },
1410
+ {
1411
+ "epoch": 10.00358422939068,
1412
+ "grad_norm": 12.316838264465332,
1413
+ "learning_rate": 1.8120270808442852e-05,
1414
+ "loss": 0.2954,
1415
+ "step": 1880
1416
+ },
1417
+ {
1418
+ "epoch": 10.007168458781361,
1419
+ "grad_norm": 8.945836067199707,
1420
+ "learning_rate": 1.7921146953405018e-05,
1421
+ "loss": 0.3398,
1422
+ "step": 1890
1423
+ },
1424
+ {
1425
+ "epoch": 10.010752688172044,
1426
+ "grad_norm": 9.732643127441406,
1427
+ "learning_rate": 1.7722023098367187e-05,
1428
+ "loss": 0.3115,
1429
+ "step": 1900
1430
+ },
1431
+ {
1432
+ "epoch": 10.014336917562725,
1433
+ "grad_norm": 19.322099685668945,
1434
+ "learning_rate": 1.7522899243329352e-05,
1435
+ "loss": 0.1925,
1436
+ "step": 1910
1437
+ },
1438
+ {
1439
+ "epoch": 10.017921146953405,
1440
+ "grad_norm": 17.751829147338867,
1441
+ "learning_rate": 1.7323775388291518e-05,
1442
+ "loss": 0.1247,
1443
+ "step": 1920
1444
+ },
1445
+ {
1446
+ "epoch": 10.021505376344086,
1447
+ "grad_norm": 0.4507576525211334,
1448
+ "learning_rate": 1.7124651533253686e-05,
1449
+ "loss": 0.183,
1450
+ "step": 1930
1451
+ },
1452
+ {
1453
+ "epoch": 10.025089605734767,
1454
+ "grad_norm": 2.866403341293335,
1455
+ "learning_rate": 1.6925527678215852e-05,
1456
+ "loss": 0.2288,
1457
+ "step": 1940
1458
+ },
1459
+ {
1460
+ "epoch": 10.028673835125447,
1461
+ "grad_norm": 1.3826611042022705,
1462
+ "learning_rate": 1.6726403823178017e-05,
1463
+ "loss": 0.2216,
1464
+ "step": 1950
1465
+ },
1466
+ {
1467
+ "epoch": 10.03225806451613,
1468
+ "grad_norm": 0.2175895720720291,
1469
+ "learning_rate": 1.6527279968140183e-05,
1470
+ "loss": 0.3319,
1471
+ "step": 1960
1472
+ },
1473
+ {
1474
+ "epoch": 10.03584229390681,
1475
+ "grad_norm": 0.07152961939573288,
1476
+ "learning_rate": 1.632815611310235e-05,
1477
+ "loss": 0.1087,
1478
+ "step": 1970
1479
+ },
1480
+ {
1481
+ "epoch": 10.039426523297491,
1482
+ "grad_norm": 12.132889747619629,
1483
+ "learning_rate": 1.6129032258064517e-05,
1484
+ "loss": 0.2449,
1485
+ "step": 1980
1486
+ },
1487
+ {
1488
+ "epoch": 10.043010752688172,
1489
+ "grad_norm": 25.55087661743164,
1490
+ "learning_rate": 1.5929908403026682e-05,
1491
+ "loss": 0.1797,
1492
+ "step": 1990
1493
+ },
1494
+ {
1495
+ "epoch": 10.046594982078853,
1496
+ "grad_norm": 1.101890206336975,
1497
+ "learning_rate": 1.573078454798885e-05,
1498
+ "loss": 0.1244,
1499
+ "step": 2000
1500
+ },
1501
+ {
1502
+ "epoch": 10.050179211469533,
1503
+ "grad_norm": 2.2634036540985107,
1504
+ "learning_rate": 1.5531660692951016e-05,
1505
+ "loss": 0.2921,
1506
+ "step": 2010
1507
+ },
1508
+ {
1509
+ "epoch": 10.053763440860216,
1510
+ "grad_norm": 0.08446018397808075,
1511
+ "learning_rate": 1.5332536837913182e-05,
1512
+ "loss": 0.3367,
1513
+ "step": 2020
1514
+ },
1515
+ {
1516
+ "epoch": 10.057347670250897,
1517
+ "grad_norm": 18.109861373901367,
1518
+ "learning_rate": 1.513341298287535e-05,
1519
+ "loss": 0.1402,
1520
+ "step": 2030
1521
+ },
1522
+ {
1523
+ "epoch": 10.060931899641577,
1524
+ "grad_norm": 4.889894485473633,
1525
+ "learning_rate": 1.4934289127837516e-05,
1526
+ "loss": 0.3135,
1527
+ "step": 2040
1528
+ },
1529
+ {
1530
+ "epoch": 10.064516129032258,
1531
+ "grad_norm": 0.5321334004402161,
1532
+ "learning_rate": 1.4735165272799681e-05,
1533
+ "loss": 0.2418,
1534
+ "step": 2050
1535
+ },
1536
+ {
1537
+ "epoch": 10.067025089605735,
1538
+ "eval_accuracy": 0.8333333333333334,
1539
+ "eval_loss": 0.5524674654006958,
1540
+ "eval_runtime": 49.8947,
1541
+ "eval_samples_per_second": 3.127,
1542
+ "eval_steps_per_second": 0.401,
1543
+ "step": 2057
1544
+ },
1545
+ {
1546
+ "epoch": 11.001075268817205,
1547
+ "grad_norm": 8.704042434692383,
1548
+ "learning_rate": 1.4536041417761847e-05,
1549
+ "loss": 0.3554,
1550
+ "step": 2060
1551
+ },
1552
+ {
1553
+ "epoch": 11.004659498207886,
1554
+ "grad_norm": 2.6651906967163086,
1555
+ "learning_rate": 1.4336917562724014e-05,
1556
+ "loss": 0.1467,
1557
+ "step": 2070
1558
+ },
1559
+ {
1560
+ "epoch": 11.008243727598567,
1561
+ "grad_norm": 8.130305290222168,
1562
+ "learning_rate": 1.4137793707686181e-05,
1563
+ "loss": 0.1653,
1564
+ "step": 2080
1565
+ },
1566
+ {
1567
+ "epoch": 11.011827956989247,
1568
+ "grad_norm": 0.1315879374742508,
1569
+ "learning_rate": 1.3938669852648348e-05,
1570
+ "loss": 0.2511,
1571
+ "step": 2090
1572
+ },
1573
+ {
1574
+ "epoch": 11.015412186379928,
1575
+ "grad_norm": 0.27568188309669495,
1576
+ "learning_rate": 1.3739545997610515e-05,
1577
+ "loss": 0.1472,
1578
+ "step": 2100
1579
+ },
1580
+ {
1581
+ "epoch": 11.018996415770609,
1582
+ "grad_norm": 0.6477882862091064,
1583
+ "learning_rate": 1.354042214257268e-05,
1584
+ "loss": 0.2646,
1585
+ "step": 2110
1586
+ },
1587
+ {
1588
+ "epoch": 11.022580645161291,
1589
+ "grad_norm": 18.025819778442383,
1590
+ "learning_rate": 1.3341298287534848e-05,
1591
+ "loss": 0.1733,
1592
+ "step": 2120
1593
+ },
1594
+ {
1595
+ "epoch": 11.026164874551972,
1596
+ "grad_norm": 4.365772724151611,
1597
+ "learning_rate": 1.3142174432497015e-05,
1598
+ "loss": 0.3097,
1599
+ "step": 2130
1600
+ },
1601
+ {
1602
+ "epoch": 11.029749103942653,
1603
+ "grad_norm": 36.36382293701172,
1604
+ "learning_rate": 1.2943050577459182e-05,
1605
+ "loss": 0.3242,
1606
+ "step": 2140
1607
+ },
1608
+ {
1609
+ "epoch": 11.033333333333333,
1610
+ "grad_norm": 0.6666226983070374,
1611
+ "learning_rate": 1.2743926722421346e-05,
1612
+ "loss": 0.0725,
1613
+ "step": 2150
1614
+ },
1615
+ {
1616
+ "epoch": 11.036917562724014,
1617
+ "grad_norm": 17.053176879882812,
1618
+ "learning_rate": 1.2544802867383513e-05,
1619
+ "loss": 0.207,
1620
+ "step": 2160
1621
+ },
1622
+ {
1623
+ "epoch": 11.040501792114695,
1624
+ "grad_norm": 24.50665283203125,
1625
+ "learning_rate": 1.2345679012345678e-05,
1626
+ "loss": 0.1859,
1627
+ "step": 2170
1628
+ },
1629
+ {
1630
+ "epoch": 11.044086021505377,
1631
+ "grad_norm": 15.833036422729492,
1632
+ "learning_rate": 1.2146555157307845e-05,
1633
+ "loss": 0.2055,
1634
+ "step": 2180
1635
+ },
1636
+ {
1637
+ "epoch": 11.047670250896058,
1638
+ "grad_norm": 4.489932537078857,
1639
+ "learning_rate": 1.1947431302270013e-05,
1640
+ "loss": 0.1705,
1641
+ "step": 2190
1642
+ },
1643
+ {
1644
+ "epoch": 11.051254480286739,
1645
+ "grad_norm": 1.9370172023773193,
1646
+ "learning_rate": 1.174830744723218e-05,
1647
+ "loss": 0.2044,
1648
+ "step": 2200
1649
+ },
1650
+ {
1651
+ "epoch": 11.05483870967742,
1652
+ "grad_norm": 12.393142700195312,
1653
+ "learning_rate": 1.1549183592194345e-05,
1654
+ "loss": 0.1342,
1655
+ "step": 2210
1656
+ },
1657
+ {
1658
+ "epoch": 11.0584229390681,
1659
+ "grad_norm": 16.235197067260742,
1660
+ "learning_rate": 1.135005973715651e-05,
1661
+ "loss": 0.2124,
1662
+ "step": 2220
1663
+ },
1664
+ {
1665
+ "epoch": 11.06200716845878,
1666
+ "grad_norm": 0.11374145746231079,
1667
+ "learning_rate": 1.1150935882118678e-05,
1668
+ "loss": 0.235,
1669
+ "step": 2230
1670
+ },
1671
+ {
1672
+ "epoch": 11.065591397849463,
1673
+ "grad_norm": 41.60282897949219,
1674
+ "learning_rate": 1.0951812027080845e-05,
1675
+ "loss": 0.2089,
1676
+ "step": 2240
1677
+ },
1678
+ {
1679
+ "epoch": 11.067025089605735,
1680
+ "eval_accuracy": 0.8589743589743589,
1681
+ "eval_loss": 0.5139220356941223,
1682
+ "eval_runtime": 50.5308,
1683
+ "eval_samples_per_second": 3.087,
1684
+ "eval_steps_per_second": 0.396,
1685
+ "step": 2244
1686
+ },
1687
+ {
1688
+ "epoch": 12.002150537634408,
1689
+ "grad_norm": 24.850767135620117,
1690
+ "learning_rate": 1.0752688172043012e-05,
1691
+ "loss": 0.258,
1692
+ "step": 2250
1693
+ },
1694
+ {
1695
+ "epoch": 12.00573476702509,
1696
+ "grad_norm": 2.8716249465942383,
1697
+ "learning_rate": 1.0553564317005179e-05,
1698
+ "loss": 0.2319,
1699
+ "step": 2260
1700
+ },
1701
+ {
1702
+ "epoch": 12.00931899641577,
1703
+ "grad_norm": 1.9340592622756958,
1704
+ "learning_rate": 1.0354440461967344e-05,
1705
+ "loss": 0.1552,
1706
+ "step": 2270
1707
+ },
1708
+ {
1709
+ "epoch": 12.012903225806452,
1710
+ "grad_norm": 0.8818116784095764,
1711
+ "learning_rate": 1.015531660692951e-05,
1712
+ "loss": 0.0837,
1713
+ "step": 2280
1714
+ },
1715
+ {
1716
+ "epoch": 12.016487455197133,
1717
+ "grad_norm": 0.1528209149837494,
1718
+ "learning_rate": 9.956192751891677e-06,
1719
+ "loss": 0.0903,
1720
+ "step": 2290
1721
+ },
1722
+ {
1723
+ "epoch": 12.020071684587814,
1724
+ "grad_norm": 0.201262429356575,
1725
+ "learning_rate": 9.757068896853844e-06,
1726
+ "loss": 0.137,
1727
+ "step": 2300
1728
+ },
1729
+ {
1730
+ "epoch": 12.023655913978494,
1731
+ "grad_norm": 23.355026245117188,
1732
+ "learning_rate": 9.557945041816011e-06,
1733
+ "loss": 0.2225,
1734
+ "step": 2310
1735
+ },
1736
+ {
1737
+ "epoch": 12.027240143369175,
1738
+ "grad_norm": 22.52867889404297,
1739
+ "learning_rate": 9.358821186778176e-06,
1740
+ "loss": 0.1936,
1741
+ "step": 2320
1742
+ },
1743
+ {
1744
+ "epoch": 12.030824372759856,
1745
+ "grad_norm": 5.563544273376465,
1746
+ "learning_rate": 9.159697331740342e-06,
1747
+ "loss": 0.3059,
1748
+ "step": 2330
1749
+ },
1750
+ {
1751
+ "epoch": 12.034408602150538,
1752
+ "grad_norm": 26.484102249145508,
1753
+ "learning_rate": 8.960573476702509e-06,
1754
+ "loss": 0.2575,
1755
+ "step": 2340
1756
+ },
1757
+ {
1758
+ "epoch": 12.037992831541219,
1759
+ "grad_norm": 0.5328412055969238,
1760
+ "learning_rate": 8.761449621664676e-06,
1761
+ "loss": 0.2211,
1762
+ "step": 2350
1763
+ },
1764
+ {
1765
+ "epoch": 12.0415770609319,
1766
+ "grad_norm": 29.48992156982422,
1767
+ "learning_rate": 8.562325766626843e-06,
1768
+ "loss": 0.2132,
1769
+ "step": 2360
1770
+ },
1771
+ {
1772
+ "epoch": 12.04516129032258,
1773
+ "grad_norm": 0.5142113566398621,
1774
+ "learning_rate": 8.363201911589009e-06,
1775
+ "loss": 0.1998,
1776
+ "step": 2370
1777
+ },
1778
+ {
1779
+ "epoch": 12.048745519713261,
1780
+ "grad_norm": 22.66319465637207,
1781
+ "learning_rate": 8.164078056551176e-06,
1782
+ "loss": 0.2969,
1783
+ "step": 2380
1784
+ },
1785
+ {
1786
+ "epoch": 12.052329749103942,
1787
+ "grad_norm": 27.633180618286133,
1788
+ "learning_rate": 7.964954201513341e-06,
1789
+ "loss": 0.1352,
1790
+ "step": 2390
1791
+ },
1792
+ {
1793
+ "epoch": 12.055913978494624,
1794
+ "grad_norm": 8.562397956848145,
1795
+ "learning_rate": 7.765830346475508e-06,
1796
+ "loss": 0.1831,
1797
+ "step": 2400
1798
+ },
1799
+ {
1800
+ "epoch": 12.059498207885305,
1801
+ "grad_norm": 8.322550773620605,
1802
+ "learning_rate": 7.566706491437675e-06,
1803
+ "loss": 0.138,
1804
+ "step": 2410
1805
+ },
1806
+ {
1807
+ "epoch": 12.063082437275986,
1808
+ "grad_norm": 24.926652908325195,
1809
+ "learning_rate": 7.367582636399841e-06,
1810
+ "loss": 0.1709,
1811
+ "step": 2420
1812
+ },
1813
+ {
1814
+ "epoch": 12.066666666666666,
1815
+ "grad_norm": 17.362070083618164,
1816
+ "learning_rate": 7.168458781362007e-06,
1817
+ "loss": 0.3158,
1818
+ "step": 2430
1819
+ },
1820
+ {
1821
+ "epoch": 12.067025089605735,
1822
+ "eval_accuracy": 0.8589743589743589,
1823
+ "eval_loss": 0.5392084717750549,
1824
+ "eval_runtime": 50.6612,
1825
+ "eval_samples_per_second": 3.079,
1826
+ "eval_steps_per_second": 0.395,
1827
+ "step": 2431
1828
+ },
1829
+ {
1830
+ "epoch": 13.003225806451614,
1831
+ "grad_norm": 17.38275146484375,
1832
+ "learning_rate": 6.969334926324174e-06,
1833
+ "loss": 0.0979,
1834
+ "step": 2440
1835
+ },
1836
+ {
1837
+ "epoch": 13.006810035842294,
1838
+ "grad_norm": 0.16219447553157806,
1839
+ "learning_rate": 6.77021107128634e-06,
1840
+ "loss": 0.1422,
1841
+ "step": 2450
1842
+ },
1843
+ {
1844
+ "epoch": 13.010394265232975,
1845
+ "grad_norm": 0.48416832089424133,
1846
+ "learning_rate": 6.5710872162485075e-06,
1847
+ "loss": 0.0831,
1848
+ "step": 2460
1849
+ },
1850
+ {
1851
+ "epoch": 13.013978494623656,
1852
+ "grad_norm": 3.7526910305023193,
1853
+ "learning_rate": 6.371963361210673e-06,
1854
+ "loss": 0.2188,
1855
+ "step": 2470
1856
+ },
1857
+ {
1858
+ "epoch": 13.017562724014336,
1859
+ "grad_norm": 0.4946393072605133,
1860
+ "learning_rate": 6.172839506172839e-06,
1861
+ "loss": 0.0438,
1862
+ "step": 2480
1863
+ },
1864
+ {
1865
+ "epoch": 13.021146953405017,
1866
+ "grad_norm": 14.273146629333496,
1867
+ "learning_rate": 5.973715651135006e-06,
1868
+ "loss": 0.1903,
1869
+ "step": 2490
1870
+ },
1871
+ {
1872
+ "epoch": 13.0247311827957,
1873
+ "grad_norm": 0.20919331908226013,
1874
+ "learning_rate": 5.7745917960971725e-06,
1875
+ "loss": 0.1862,
1876
+ "step": 2500
1877
+ },
1878
+ {
1879
+ "epoch": 13.02831541218638,
1880
+ "grad_norm": 44.509796142578125,
1881
+ "learning_rate": 5.575467941059339e-06,
1882
+ "loss": 0.3252,
1883
+ "step": 2510
1884
+ },
1885
+ {
1886
+ "epoch": 13.031899641577061,
1887
+ "grad_norm": 1.4209870100021362,
1888
+ "learning_rate": 5.376344086021506e-06,
1889
+ "loss": 0.1379,
1890
+ "step": 2520
1891
+ },
1892
+ {
1893
+ "epoch": 13.035483870967742,
1894
+ "grad_norm": 32.848148345947266,
1895
+ "learning_rate": 5.177220230983672e-06,
1896
+ "loss": 0.1815,
1897
+ "step": 2530
1898
+ },
1899
+ {
1900
+ "epoch": 13.039068100358422,
1901
+ "grad_norm": 8.186372756958008,
1902
+ "learning_rate": 4.978096375945838e-06,
1903
+ "loss": 0.1547,
1904
+ "step": 2540
1905
+ },
1906
+ {
1907
+ "epoch": 13.042652329749105,
1908
+ "grad_norm": 2.0492045879364014,
1909
+ "learning_rate": 4.7789725209080055e-06,
1910
+ "loss": 0.1371,
1911
+ "step": 2550
1912
+ },
1913
+ {
1914
+ "epoch": 13.046236559139786,
1915
+ "grad_norm": 0.1944122165441513,
1916
+ "learning_rate": 4.579848665870171e-06,
1917
+ "loss": 0.0394,
1918
+ "step": 2560
1919
+ },
1920
+ {
1921
+ "epoch": 13.049820788530466,
1922
+ "grad_norm": 0.04899689927697182,
1923
+ "learning_rate": 4.380724810832338e-06,
1924
+ "loss": 0.0782,
1925
+ "step": 2570
1926
+ },
1927
+ {
1928
+ "epoch": 13.053405017921147,
1929
+ "grad_norm": 2.100203275680542,
1930
+ "learning_rate": 4.181600955794504e-06,
1931
+ "loss": 0.2894,
1932
+ "step": 2580
1933
+ },
1934
+ {
1935
+ "epoch": 13.056989247311828,
1936
+ "grad_norm": 8.302490234375,
1937
+ "learning_rate": 3.9824771007566706e-06,
1938
+ "loss": 0.4249,
1939
+ "step": 2590
1940
+ },
1941
+ {
1942
+ "epoch": 13.060573476702508,
1943
+ "grad_norm": 0.5646193027496338,
1944
+ "learning_rate": 3.7833532457188377e-06,
1945
+ "loss": 0.1387,
1946
+ "step": 2600
1947
+ },
1948
+ {
1949
+ "epoch": 13.06415770609319,
1950
+ "grad_norm": 19.096433639526367,
1951
+ "learning_rate": 3.5842293906810035e-06,
1952
+ "loss": 0.1726,
1953
+ "step": 2610
1954
+ },
1955
+ {
1956
+ "epoch": 13.067025089605735,
1957
+ "eval_accuracy": 0.8333333333333334,
1958
+ "eval_loss": 0.5430171489715576,
1959
+ "eval_runtime": 52.1786,
1960
+ "eval_samples_per_second": 2.99,
1961
+ "eval_steps_per_second": 0.383,
1962
+ "step": 2618
1963
+ },
1964
+ {
1965
+ "epoch": 14.000716845878136,
1966
+ "grad_norm": 0.11462392657995224,
1967
+ "learning_rate": 3.38510553564317e-06,
1968
+ "loss": 0.1894,
1969
+ "step": 2620
1970
+ },
1971
+ {
1972
+ "epoch": 14.004301075268817,
1973
+ "grad_norm": 0.08781033009290695,
1974
+ "learning_rate": 3.1859816806053364e-06,
1975
+ "loss": 0.2443,
1976
+ "step": 2630
1977
+ },
1978
+ {
1979
+ "epoch": 14.007885304659498,
1980
+ "grad_norm": 54.70309066772461,
1981
+ "learning_rate": 2.986857825567503e-06,
1982
+ "loss": 0.1809,
1983
+ "step": 2640
1984
+ },
1985
+ {
1986
+ "epoch": 14.01146953405018,
1987
+ "grad_norm": 0.0693989247083664,
1988
+ "learning_rate": 2.7877339705296694e-06,
1989
+ "loss": 0.0067,
1990
+ "step": 2650
1991
+ },
1992
+ {
1993
+ "epoch": 14.01505376344086,
1994
+ "grad_norm": 0.1357724964618683,
1995
+ "learning_rate": 2.588610115491836e-06,
1996
+ "loss": 0.0544,
1997
+ "step": 2660
1998
+ },
1999
+ {
2000
+ "epoch": 14.018637992831541,
2001
+ "grad_norm": 16.85883331298828,
2002
+ "learning_rate": 2.3894862604540028e-06,
2003
+ "loss": 0.1216,
2004
+ "step": 2670
2005
+ },
2006
+ {
2007
+ "epoch": 14.022222222222222,
2008
+ "grad_norm": 7.547858715057373,
2009
+ "learning_rate": 2.190362405416169e-06,
2010
+ "loss": 0.2463,
2011
+ "step": 2680
2012
+ },
2013
+ {
2014
+ "epoch": 14.025806451612903,
2015
+ "grad_norm": 2.388237714767456,
2016
+ "learning_rate": 1.9912385503783353e-06,
2017
+ "loss": 0.1674,
2018
+ "step": 2690
2019
+ },
2020
+ {
2021
+ "epoch": 14.029390681003584,
2022
+ "grad_norm": 0.4825267493724823,
2023
+ "learning_rate": 1.7921146953405017e-06,
2024
+ "loss": 0.1415,
2025
+ "step": 2700
2026
+ },
2027
+ {
2028
+ "epoch": 14.032974910394266,
2029
+ "grad_norm": 15.646313667297363,
2030
+ "learning_rate": 1.5929908403026682e-06,
2031
+ "loss": 0.2143,
2032
+ "step": 2710
2033
+ },
2034
+ {
2035
+ "epoch": 14.036559139784947,
2036
+ "grad_norm": 0.05193038657307625,
2037
+ "learning_rate": 1.3938669852648347e-06,
2038
+ "loss": 0.1399,
2039
+ "step": 2720
2040
+ },
2041
+ {
2042
+ "epoch": 14.040143369175627,
2043
+ "grad_norm": 10.611710548400879,
2044
+ "learning_rate": 1.1947431302270014e-06,
2045
+ "loss": 0.1608,
2046
+ "step": 2730
2047
+ },
2048
+ {
2049
+ "epoch": 14.043727598566308,
2050
+ "grad_norm": 28.249521255493164,
2051
+ "learning_rate": 9.956192751891676e-07,
2052
+ "loss": 0.3168,
2053
+ "step": 2740
2054
+ },
2055
+ {
2056
+ "epoch": 14.047311827956989,
2057
+ "grad_norm": 0.169923797249794,
2058
+ "learning_rate": 7.964954201513341e-07,
2059
+ "loss": 0.2273,
2060
+ "step": 2750
2061
+ },
2062
+ {
2063
+ "epoch": 14.05089605734767,
2064
+ "grad_norm": 0.6709271669387817,
2065
+ "learning_rate": 5.973715651135007e-07,
2066
+ "loss": 0.0479,
2067
+ "step": 2760
2068
+ },
2069
+ {
2070
+ "epoch": 14.054480286738352,
2071
+ "grad_norm": 23.617549896240234,
2072
+ "learning_rate": 3.9824771007566706e-07,
2073
+ "loss": 0.1528,
2074
+ "step": 2770
2075
+ },
2076
+ {
2077
+ "epoch": 14.058064516129033,
2078
+ "grad_norm": 0.37845826148986816,
2079
+ "learning_rate": 1.9912385503783353e-07,
2080
+ "loss": 0.2054,
2081
+ "step": 2780
2082
+ },
2083
+ {
2084
+ "epoch": 14.061648745519713,
2085
+ "grad_norm": 0.18122656643390656,
2086
+ "learning_rate": 0.0,
2087
+ "loss": 0.2543,
2088
+ "step": 2790
2089
+ },
2090
+ {
2091
+ "epoch": 14.061648745519713,
2092
+ "eval_accuracy": 0.8717948717948718,
2093
+ "eval_loss": 0.49779054522514343,
2094
+ "eval_runtime": 59.5535,
2095
+ "eval_samples_per_second": 2.619,
2096
+ "eval_steps_per_second": 0.336,
2097
+ "step": 2790
2098
+ },
2099
+ {
2100
+ "epoch": 14.061648745519713,
2101
+ "step": 2790,
2102
+ "total_flos": 2.7788486409809363e+19,
2103
+ "train_loss": 0.30916261622234914,
2104
+ "train_runtime": 10278.2383,
2105
+ "train_samples_per_second": 2.172,
2106
+ "train_steps_per_second": 0.271
2107
+ },
2108
+ {
2109
+ "epoch": 14.061648745519713,
2110
+ "eval_accuracy": 0.81875,
2111
+ "eval_loss": 0.6550884246826172,
2112
+ "eval_runtime": 302.2934,
2113
+ "eval_samples_per_second": 2.646,
2114
+ "eval_steps_per_second": 0.331,
2115
+ "step": 2790
2116
+ },
2117
+ {
2118
+ "epoch": 14.061648745519713,
2119
+ "eval_accuracy": 0.81875,
2120
+ "eval_loss": 0.6550884246826172,
2121
+ "eval_runtime": 306.835,
2122
+ "eval_samples_per_second": 2.607,
2123
+ "eval_steps_per_second": 0.326,
2124
+ "step": 2790
2125
  }
2126
  ],
2127
  "logging_steps": 10,
2128
+ "max_steps": 2790,
2129
  "num_input_tokens_seen": 0,
2130
  "num_train_epochs": 9223372036854775807,
2131
  "save_steps": 500,
 
2141
  "attributes": {}
2142
  }
2143
  },
2144
+ "total_flos": 2.7788486409809363e+19,
2145
  "train_batch_size": 8,
2146
  "trial_name": null,
2147
  "trial_params": null