Awais1718 commited on
Commit
69f34ad
·
verified ·
1 Parent(s): 530c742

End of training

Browse files
Files changed (3) hide show
  1. all_results.json +8 -0
  2. test_results.json +8 -0
  3. trainer_state.json +766 -0
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.098863636363637,
3
+ "eval_f1": 0.719188596491228,
4
+ "eval_loss": 0.9494466781616211,
5
+ "eval_runtime": 72.4119,
6
+ "eval_samples_per_second": 2.651,
7
+ "eval_steps_per_second": 0.331
8
+ }
test_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.098863636363637,
3
+ "eval_f1": 0.719188596491228,
4
+ "eval_loss": 0.9494466781616211,
5
+ "eval_runtime": 72.4119,
6
+ "eval_samples_per_second": 2.651,
7
+ "eval_steps_per_second": 0.331
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,766 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.719188596491228,
3
+ "best_model_checkpoint": "videomae-base-finetuned-kinetics-finetuned-shoplifting-dataset/checkpoint-353",
4
+ "epoch": 9.098863636363637,
5
+ "eval_steps": 500,
6
+ "global_step": 880,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.011363636363636364,
13
+ "grad_norm": 0.7822037935256958,
14
+ "learning_rate": 5.681818181818182e-06,
15
+ "loss": 0.1251,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.022727272727272728,
20
+ "grad_norm": 6.924298286437988,
21
+ "learning_rate": 1.1363636363636365e-05,
22
+ "loss": 0.2906,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.03409090909090909,
27
+ "grad_norm": 0.3597455322742462,
28
+ "learning_rate": 1.7045454545454546e-05,
29
+ "loss": 0.1046,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.045454545454545456,
34
+ "grad_norm": 20.039451599121094,
35
+ "learning_rate": 2.272727272727273e-05,
36
+ "loss": 0.3495,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.056818181818181816,
41
+ "grad_norm": 19.18282127380371,
42
+ "learning_rate": 2.8409090909090912e-05,
43
+ "loss": 0.3138,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.06818181818181818,
48
+ "grad_norm": 6.543797016143799,
49
+ "learning_rate": 3.409090909090909e-05,
50
+ "loss": 0.2203,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.07954545454545454,
55
+ "grad_norm": 9.147849082946777,
56
+ "learning_rate": 3.9772727272727275e-05,
57
+ "loss": 0.3068,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.09090909090909091,
62
+ "grad_norm": 1.336655616760254,
63
+ "learning_rate": 4.545454545454546e-05,
64
+ "loss": 0.1824,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.10113636363636364,
69
+ "eval_f1": 0.6832431989511635,
70
+ "eval_loss": 1.0067527294158936,
71
+ "eval_runtime": 76.7216,
72
+ "eval_samples_per_second": 2.503,
73
+ "eval_steps_per_second": 0.313,
74
+ "step": 89
75
+ },
76
+ {
77
+ "epoch": 1.0011363636363637,
78
+ "grad_norm": 8.309718132019043,
79
+ "learning_rate": 4.9873737373737375e-05,
80
+ "loss": 0.2712,
81
+ "step": 90
82
+ },
83
+ {
84
+ "epoch": 1.0125,
85
+ "grad_norm": 1.4587355852127075,
86
+ "learning_rate": 4.9242424242424245e-05,
87
+ "loss": 0.1706,
88
+ "step": 100
89
+ },
90
+ {
91
+ "epoch": 1.0238636363636364,
92
+ "grad_norm": 24.91333770751953,
93
+ "learning_rate": 4.8611111111111115e-05,
94
+ "loss": 0.2993,
95
+ "step": 110
96
+ },
97
+ {
98
+ "epoch": 1.0352272727272727,
99
+ "grad_norm": 2.1657192707061768,
100
+ "learning_rate": 4.797979797979798e-05,
101
+ "loss": 0.1297,
102
+ "step": 120
103
+ },
104
+ {
105
+ "epoch": 1.0465909090909091,
106
+ "grad_norm": 0.5277124643325806,
107
+ "learning_rate": 4.7348484848484855e-05,
108
+ "loss": 0.2617,
109
+ "step": 130
110
+ },
111
+ {
112
+ "epoch": 1.0579545454545454,
113
+ "grad_norm": 1.7243237495422363,
114
+ "learning_rate": 4.671717171717172e-05,
115
+ "loss": 0.1424,
116
+ "step": 140
117
+ },
118
+ {
119
+ "epoch": 1.0693181818181818,
120
+ "grad_norm": 1.7403205633163452,
121
+ "learning_rate": 4.608585858585859e-05,
122
+ "loss": 0.5873,
123
+ "step": 150
124
+ },
125
+ {
126
+ "epoch": 1.080681818181818,
127
+ "grad_norm": 2.7504332065582275,
128
+ "learning_rate": 4.545454545454546e-05,
129
+ "loss": 0.2966,
130
+ "step": 160
131
+ },
132
+ {
133
+ "epoch": 1.0920454545454545,
134
+ "grad_norm": 6.052179336547852,
135
+ "learning_rate": 4.482323232323233e-05,
136
+ "loss": 0.334,
137
+ "step": 170
138
+ },
139
+ {
140
+ "epoch": 1.1,
141
+ "eval_f1": 0.6804519337964154,
142
+ "eval_loss": 0.9260269999504089,
143
+ "eval_runtime": 73.8445,
144
+ "eval_samples_per_second": 2.6,
145
+ "eval_steps_per_second": 0.325,
146
+ "step": 177
147
+ },
148
+ {
149
+ "epoch": 2.003409090909091,
150
+ "grad_norm": 0.761626124382019,
151
+ "learning_rate": 4.41919191919192e-05,
152
+ "loss": 0.6221,
153
+ "step": 180
154
+ },
155
+ {
156
+ "epoch": 2.014772727272727,
157
+ "grad_norm": 0.5840346813201904,
158
+ "learning_rate": 4.356060606060606e-05,
159
+ "loss": 0.1877,
160
+ "step": 190
161
+ },
162
+ {
163
+ "epoch": 2.026136363636364,
164
+ "grad_norm": 1.2796881198883057,
165
+ "learning_rate": 4.292929292929293e-05,
166
+ "loss": 0.1515,
167
+ "step": 200
168
+ },
169
+ {
170
+ "epoch": 2.0375,
171
+ "grad_norm": 14.390783309936523,
172
+ "learning_rate": 4.2297979797979795e-05,
173
+ "loss": 0.1208,
174
+ "step": 210
175
+ },
176
+ {
177
+ "epoch": 2.0488636363636363,
178
+ "grad_norm": 8.565354347229004,
179
+ "learning_rate": 4.166666666666667e-05,
180
+ "loss": 0.1364,
181
+ "step": 220
182
+ },
183
+ {
184
+ "epoch": 2.0602272727272726,
185
+ "grad_norm": 39.79494094848633,
186
+ "learning_rate": 4.1035353535353535e-05,
187
+ "loss": 0.2243,
188
+ "step": 230
189
+ },
190
+ {
191
+ "epoch": 2.0715909090909093,
192
+ "grad_norm": 11.125761985778809,
193
+ "learning_rate": 4.0404040404040405e-05,
194
+ "loss": 0.1289,
195
+ "step": 240
196
+ },
197
+ {
198
+ "epoch": 2.0829545454545455,
199
+ "grad_norm": 0.10636977106332779,
200
+ "learning_rate": 3.9772727272727275e-05,
201
+ "loss": 0.2634,
202
+ "step": 250
203
+ },
204
+ {
205
+ "epoch": 2.0943181818181817,
206
+ "grad_norm": 6.6989617347717285,
207
+ "learning_rate": 3.9141414141414145e-05,
208
+ "loss": 0.2202,
209
+ "step": 260
210
+ },
211
+ {
212
+ "epoch": 2.1,
213
+ "eval_f1": 0.7139075032998273,
214
+ "eval_loss": 0.9855768084526062,
215
+ "eval_runtime": 80.7383,
216
+ "eval_samples_per_second": 2.378,
217
+ "eval_steps_per_second": 0.297,
218
+ "step": 265
219
+ },
220
+ {
221
+ "epoch": 3.0056818181818183,
222
+ "grad_norm": 7.529367446899414,
223
+ "learning_rate": 3.8510101010101015e-05,
224
+ "loss": 0.1857,
225
+ "step": 270
226
+ },
227
+ {
228
+ "epoch": 3.0170454545454546,
229
+ "grad_norm": 1.7394098043441772,
230
+ "learning_rate": 3.787878787878788e-05,
231
+ "loss": 0.1633,
232
+ "step": 280
233
+ },
234
+ {
235
+ "epoch": 3.028409090909091,
236
+ "grad_norm": 35.92728805541992,
237
+ "learning_rate": 3.724747474747475e-05,
238
+ "loss": 0.147,
239
+ "step": 290
240
+ },
241
+ {
242
+ "epoch": 3.039772727272727,
243
+ "grad_norm": 6.699644565582275,
244
+ "learning_rate": 3.661616161616162e-05,
245
+ "loss": 0.1791,
246
+ "step": 300
247
+ },
248
+ {
249
+ "epoch": 3.0511363636363638,
250
+ "grad_norm": 7.0385894775390625,
251
+ "learning_rate": 3.598484848484849e-05,
252
+ "loss": 0.148,
253
+ "step": 310
254
+ },
255
+ {
256
+ "epoch": 3.0625,
257
+ "grad_norm": 3.4319474697113037,
258
+ "learning_rate": 3.535353535353535e-05,
259
+ "loss": 0.1414,
260
+ "step": 320
261
+ },
262
+ {
263
+ "epoch": 3.0738636363636362,
264
+ "grad_norm": 1.3963415622711182,
265
+ "learning_rate": 3.472222222222222e-05,
266
+ "loss": 0.0888,
267
+ "step": 330
268
+ },
269
+ {
270
+ "epoch": 3.085227272727273,
271
+ "grad_norm": 0.17751815915107727,
272
+ "learning_rate": 3.409090909090909e-05,
273
+ "loss": 0.1007,
274
+ "step": 340
275
+ },
276
+ {
277
+ "epoch": 3.096590909090909,
278
+ "grad_norm": 8.843611717224121,
279
+ "learning_rate": 3.345959595959596e-05,
280
+ "loss": 0.2074,
281
+ "step": 350
282
+ },
283
+ {
284
+ "epoch": 3.1,
285
+ "eval_f1": 0.719188596491228,
286
+ "eval_loss": 0.9494466781616211,
287
+ "eval_runtime": 66.0371,
288
+ "eval_samples_per_second": 2.907,
289
+ "eval_steps_per_second": 0.363,
290
+ "step": 353
291
+ },
292
+ {
293
+ "epoch": 4.007954545454545,
294
+ "grad_norm": 0.11681011319160461,
295
+ "learning_rate": 3.282828282828283e-05,
296
+ "loss": 0.1654,
297
+ "step": 360
298
+ },
299
+ {
300
+ "epoch": 4.019318181818182,
301
+ "grad_norm": 0.03579840064048767,
302
+ "learning_rate": 3.2196969696969696e-05,
303
+ "loss": 0.0781,
304
+ "step": 370
305
+ },
306
+ {
307
+ "epoch": 4.030681818181818,
308
+ "grad_norm": 0.28152555227279663,
309
+ "learning_rate": 3.1565656565656566e-05,
310
+ "loss": 0.0961,
311
+ "step": 380
312
+ },
313
+ {
314
+ "epoch": 4.0420454545454545,
315
+ "grad_norm": 0.36765220761299133,
316
+ "learning_rate": 3.0934343434343436e-05,
317
+ "loss": 0.0591,
318
+ "step": 390
319
+ },
320
+ {
321
+ "epoch": 4.053409090909091,
322
+ "grad_norm": 13.338018417358398,
323
+ "learning_rate": 3.0303030303030306e-05,
324
+ "loss": 0.0945,
325
+ "step": 400
326
+ },
327
+ {
328
+ "epoch": 4.064772727272727,
329
+ "grad_norm": 9.662626266479492,
330
+ "learning_rate": 2.9671717171717172e-05,
331
+ "loss": 0.1343,
332
+ "step": 410
333
+ },
334
+ {
335
+ "epoch": 4.076136363636364,
336
+ "grad_norm": 1.916236400604248,
337
+ "learning_rate": 2.904040404040404e-05,
338
+ "loss": 0.3043,
339
+ "step": 420
340
+ },
341
+ {
342
+ "epoch": 4.0875,
343
+ "grad_norm": 12.770139694213867,
344
+ "learning_rate": 2.8409090909090912e-05,
345
+ "loss": 0.2391,
346
+ "step": 430
347
+ },
348
+ {
349
+ "epoch": 4.098863636363636,
350
+ "grad_norm": 7.032412052154541,
351
+ "learning_rate": 2.777777777777778e-05,
352
+ "loss": 0.0916,
353
+ "step": 440
354
+ },
355
+ {
356
+ "epoch": 4.1,
357
+ "eval_f1": 0.671078431372549,
358
+ "eval_loss": 1.3867233991622925,
359
+ "eval_runtime": 69.8609,
360
+ "eval_samples_per_second": 2.748,
361
+ "eval_steps_per_second": 0.344,
362
+ "step": 441
363
+ },
364
+ {
365
+ "epoch": 5.010227272727272,
366
+ "grad_norm": 9.611908912658691,
367
+ "learning_rate": 2.714646464646465e-05,
368
+ "loss": 0.1568,
369
+ "step": 450
370
+ },
371
+ {
372
+ "epoch": 5.021590909090909,
373
+ "grad_norm": 3.7834885120391846,
374
+ "learning_rate": 2.6515151515151516e-05,
375
+ "loss": 0.3173,
376
+ "step": 460
377
+ },
378
+ {
379
+ "epoch": 5.032954545454546,
380
+ "grad_norm": 0.057657867670059204,
381
+ "learning_rate": 2.5883838383838382e-05,
382
+ "loss": 0.1004,
383
+ "step": 470
384
+ },
385
+ {
386
+ "epoch": 5.0443181818181815,
387
+ "grad_norm": 1.133195161819458,
388
+ "learning_rate": 2.5252525252525256e-05,
389
+ "loss": 0.0869,
390
+ "step": 480
391
+ },
392
+ {
393
+ "epoch": 5.055681818181818,
394
+ "grad_norm": 6.4852166175842285,
395
+ "learning_rate": 2.4621212121212123e-05,
396
+ "loss": 0.0964,
397
+ "step": 490
398
+ },
399
+ {
400
+ "epoch": 5.067045454545455,
401
+ "grad_norm": 0.01637670025229454,
402
+ "learning_rate": 2.398989898989899e-05,
403
+ "loss": 0.0511,
404
+ "step": 500
405
+ },
406
+ {
407
+ "epoch": 5.078409090909091,
408
+ "grad_norm": 20.669986724853516,
409
+ "learning_rate": 2.335858585858586e-05,
410
+ "loss": 0.2204,
411
+ "step": 510
412
+ },
413
+ {
414
+ "epoch": 5.089772727272727,
415
+ "grad_norm": 0.07228785008192062,
416
+ "learning_rate": 2.272727272727273e-05,
417
+ "loss": 0.1092,
418
+ "step": 520
419
+ },
420
+ {
421
+ "epoch": 5.1,
422
+ "eval_f1": 0.6919812909064125,
423
+ "eval_loss": 1.3757562637329102,
424
+ "eval_runtime": 68.2311,
425
+ "eval_samples_per_second": 2.814,
426
+ "eval_steps_per_second": 0.352,
427
+ "step": 529
428
+ },
429
+ {
430
+ "epoch": 6.0011363636363635,
431
+ "grad_norm": 0.03453134745359421,
432
+ "learning_rate": 2.20959595959596e-05,
433
+ "loss": 0.1523,
434
+ "step": 530
435
+ },
436
+ {
437
+ "epoch": 6.0125,
438
+ "grad_norm": 0.4955722689628601,
439
+ "learning_rate": 2.1464646464646466e-05,
440
+ "loss": 0.1371,
441
+ "step": 540
442
+ },
443
+ {
444
+ "epoch": 6.023863636363636,
445
+ "grad_norm": 0.6915594339370728,
446
+ "learning_rate": 2.0833333333333336e-05,
447
+ "loss": 0.0278,
448
+ "step": 550
449
+ },
450
+ {
451
+ "epoch": 6.035227272727273,
452
+ "grad_norm": 0.030776426196098328,
453
+ "learning_rate": 2.0202020202020203e-05,
454
+ "loss": 0.0441,
455
+ "step": 560
456
+ },
457
+ {
458
+ "epoch": 6.046590909090909,
459
+ "grad_norm": 0.008703567087650299,
460
+ "learning_rate": 1.9570707070707073e-05,
461
+ "loss": 0.0117,
462
+ "step": 570
463
+ },
464
+ {
465
+ "epoch": 6.057954545454545,
466
+ "grad_norm": 12.471619606018066,
467
+ "learning_rate": 1.893939393939394e-05,
468
+ "loss": 0.0576,
469
+ "step": 580
470
+ },
471
+ {
472
+ "epoch": 6.069318181818182,
473
+ "grad_norm": 0.023786788806319237,
474
+ "learning_rate": 1.830808080808081e-05,
475
+ "loss": 0.0282,
476
+ "step": 590
477
+ },
478
+ {
479
+ "epoch": 6.0806818181818185,
480
+ "grad_norm": 0.24424441158771515,
481
+ "learning_rate": 1.7676767676767676e-05,
482
+ "loss": 0.1136,
483
+ "step": 600
484
+ },
485
+ {
486
+ "epoch": 6.092045454545454,
487
+ "grad_norm": 0.014285405166447163,
488
+ "learning_rate": 1.7045454545454546e-05,
489
+ "loss": 0.0804,
490
+ "step": 610
491
+ },
492
+ {
493
+ "epoch": 6.1,
494
+ "eval_f1": 0.6967864271457085,
495
+ "eval_loss": 1.3787533044815063,
496
+ "eval_runtime": 70.4409,
497
+ "eval_samples_per_second": 2.726,
498
+ "eval_steps_per_second": 0.341,
499
+ "step": 617
500
+ },
501
+ {
502
+ "epoch": 7.0034090909090905,
503
+ "grad_norm": 0.14964497089385986,
504
+ "learning_rate": 1.6414141414141416e-05,
505
+ "loss": 0.0217,
506
+ "step": 620
507
+ },
508
+ {
509
+ "epoch": 7.014772727272727,
510
+ "grad_norm": 22.6522274017334,
511
+ "learning_rate": 1.5782828282828283e-05,
512
+ "loss": 0.2281,
513
+ "step": 630
514
+ },
515
+ {
516
+ "epoch": 7.026136363636364,
517
+ "grad_norm": 0.24736760556697845,
518
+ "learning_rate": 1.5151515151515153e-05,
519
+ "loss": 0.0425,
520
+ "step": 640
521
+ },
522
+ {
523
+ "epoch": 7.0375,
524
+ "grad_norm": 0.11569665372371674,
525
+ "learning_rate": 1.452020202020202e-05,
526
+ "loss": 0.004,
527
+ "step": 650
528
+ },
529
+ {
530
+ "epoch": 7.048863636363636,
531
+ "grad_norm": 7.235473155975342,
532
+ "learning_rate": 1.388888888888889e-05,
533
+ "loss": 0.0268,
534
+ "step": 660
535
+ },
536
+ {
537
+ "epoch": 7.060227272727273,
538
+ "grad_norm": 0.006639318075031042,
539
+ "learning_rate": 1.3257575757575758e-05,
540
+ "loss": 0.0387,
541
+ "step": 670
542
+ },
543
+ {
544
+ "epoch": 7.071590909090909,
545
+ "grad_norm": 0.07593043893575668,
546
+ "learning_rate": 1.2626262626262628e-05,
547
+ "loss": 0.0685,
548
+ "step": 680
549
+ },
550
+ {
551
+ "epoch": 7.0829545454545455,
552
+ "grad_norm": 0.02903686836361885,
553
+ "learning_rate": 1.1994949494949495e-05,
554
+ "loss": 0.0889,
555
+ "step": 690
556
+ },
557
+ {
558
+ "epoch": 7.094318181818182,
559
+ "grad_norm": 0.10437527298927307,
560
+ "learning_rate": 1.1363636363636365e-05,
561
+ "loss": 0.0654,
562
+ "step": 700
563
+ },
564
+ {
565
+ "epoch": 7.1,
566
+ "eval_f1": 0.697334455667789,
567
+ "eval_loss": 1.2969599962234497,
568
+ "eval_runtime": 75.4359,
569
+ "eval_samples_per_second": 2.545,
570
+ "eval_steps_per_second": 0.318,
571
+ "step": 705
572
+ },
573
+ {
574
+ "epoch": 8.005681818181818,
575
+ "grad_norm": 0.3526234030723572,
576
+ "learning_rate": 1.0732323232323233e-05,
577
+ "loss": 0.1004,
578
+ "step": 710
579
+ },
580
+ {
581
+ "epoch": 8.017045454545455,
582
+ "grad_norm": 0.544172465801239,
583
+ "learning_rate": 1.0101010101010101e-05,
584
+ "loss": 0.0323,
585
+ "step": 720
586
+ },
587
+ {
588
+ "epoch": 8.028409090909092,
589
+ "grad_norm": 27.582374572753906,
590
+ "learning_rate": 9.46969696969697e-06,
591
+ "loss": 0.0817,
592
+ "step": 730
593
+ },
594
+ {
595
+ "epoch": 8.039772727272727,
596
+ "grad_norm": 0.03466358035802841,
597
+ "learning_rate": 8.838383838383838e-06,
598
+ "loss": 0.033,
599
+ "step": 740
600
+ },
601
+ {
602
+ "epoch": 8.051136363636363,
603
+ "grad_norm": 0.2677808105945587,
604
+ "learning_rate": 8.207070707070708e-06,
605
+ "loss": 0.0078,
606
+ "step": 750
607
+ },
608
+ {
609
+ "epoch": 8.0625,
610
+ "grad_norm": 0.026267457753419876,
611
+ "learning_rate": 7.5757575757575764e-06,
612
+ "loss": 0.1072,
613
+ "step": 760
614
+ },
615
+ {
616
+ "epoch": 8.073863636363637,
617
+ "grad_norm": 0.036878038197755814,
618
+ "learning_rate": 6.944444444444445e-06,
619
+ "loss": 0.175,
620
+ "step": 770
621
+ },
622
+ {
623
+ "epoch": 8.085227272727273,
624
+ "grad_norm": 3.7212748527526855,
625
+ "learning_rate": 6.313131313131314e-06,
626
+ "loss": 0.0646,
627
+ "step": 780
628
+ },
629
+ {
630
+ "epoch": 8.096590909090908,
631
+ "grad_norm": 0.06381477415561676,
632
+ "learning_rate": 5.681818181818182e-06,
633
+ "loss": 0.0065,
634
+ "step": 790
635
+ },
636
+ {
637
+ "epoch": 8.1,
638
+ "eval_f1": 0.7006284557943339,
639
+ "eval_loss": 1.4780327081680298,
640
+ "eval_runtime": 71.4359,
641
+ "eval_samples_per_second": 2.688,
642
+ "eval_steps_per_second": 0.336,
643
+ "step": 793
644
+ },
645
+ {
646
+ "epoch": 9.007954545454545,
647
+ "grad_norm": 0.14042355120182037,
648
+ "learning_rate": 5.050505050505051e-06,
649
+ "loss": 0.0604,
650
+ "step": 800
651
+ },
652
+ {
653
+ "epoch": 9.019318181818182,
654
+ "grad_norm": 0.007114021107554436,
655
+ "learning_rate": 4.419191919191919e-06,
656
+ "loss": 0.1103,
657
+ "step": 810
658
+ },
659
+ {
660
+ "epoch": 9.030681818181819,
661
+ "grad_norm": 3.3601884841918945,
662
+ "learning_rate": 3.7878787878787882e-06,
663
+ "loss": 0.1139,
664
+ "step": 820
665
+ },
666
+ {
667
+ "epoch": 9.042045454545455,
668
+ "grad_norm": 0.0043687219731509686,
669
+ "learning_rate": 3.156565656565657e-06,
670
+ "loss": 0.0034,
671
+ "step": 830
672
+ },
673
+ {
674
+ "epoch": 9.05340909090909,
675
+ "grad_norm": 0.04162032902240753,
676
+ "learning_rate": 2.5252525252525253e-06,
677
+ "loss": 0.0108,
678
+ "step": 840
679
+ },
680
+ {
681
+ "epoch": 9.064772727272727,
682
+ "grad_norm": 1.4939607381820679,
683
+ "learning_rate": 1.8939393939393941e-06,
684
+ "loss": 0.0889,
685
+ "step": 850
686
+ },
687
+ {
688
+ "epoch": 9.076136363636364,
689
+ "grad_norm": 0.6243687868118286,
690
+ "learning_rate": 1.2626262626262627e-06,
691
+ "loss": 0.0082,
692
+ "step": 860
693
+ },
694
+ {
695
+ "epoch": 9.0875,
696
+ "grad_norm": 0.010640038177371025,
697
+ "learning_rate": 6.313131313131313e-07,
698
+ "loss": 0.0318,
699
+ "step": 870
700
+ },
701
+ {
702
+ "epoch": 9.098863636363637,
703
+ "grad_norm": 0.6547775864601135,
704
+ "learning_rate": 0.0,
705
+ "loss": 0.0024,
706
+ "step": 880
707
+ },
708
+ {
709
+ "epoch": 9.098863636363637,
710
+ "eval_f1": 0.7006284557943339,
711
+ "eval_loss": 1.4463977813720703,
712
+ "eval_runtime": 70.5842,
713
+ "eval_samples_per_second": 2.72,
714
+ "eval_steps_per_second": 0.34,
715
+ "step": 880
716
+ },
717
+ {
718
+ "epoch": 9.098863636363637,
719
+ "step": 880,
720
+ "total_flos": 8.763572257754776e+18,
721
+ "train_loss": 0.14181382804474033,
722
+ "train_runtime": 4562.3352,
723
+ "train_samples_per_second": 1.543,
724
+ "train_steps_per_second": 0.193
725
+ },
726
+ {
727
+ "epoch": 9.098863636363637,
728
+ "eval_f1": 0.719188596491228,
729
+ "eval_loss": 0.9494466781616211,
730
+ "eval_runtime": 71.753,
731
+ "eval_samples_per_second": 2.676,
732
+ "eval_steps_per_second": 0.334,
733
+ "step": 880
734
+ },
735
+ {
736
+ "epoch": 9.098863636363637,
737
+ "eval_f1": 0.719188596491228,
738
+ "eval_loss": 0.9494466781616211,
739
+ "eval_runtime": 72.4119,
740
+ "eval_samples_per_second": 2.651,
741
+ "eval_steps_per_second": 0.331,
742
+ "step": 880
743
+ }
744
+ ],
745
+ "logging_steps": 10,
746
+ "max_steps": 880,
747
+ "num_input_tokens_seen": 0,
748
+ "num_train_epochs": 9223372036854775807,
749
+ "save_steps": 500,
750
+ "stateful_callbacks": {
751
+ "TrainerControl": {
752
+ "args": {
753
+ "should_epoch_stop": false,
754
+ "should_evaluate": false,
755
+ "should_log": false,
756
+ "should_save": true,
757
+ "should_training_stop": true
758
+ },
759
+ "attributes": {}
760
+ }
761
+ },
762
+ "total_flos": 8.763572257754776e+18,
763
+ "train_batch_size": 8,
764
+ "trial_name": null,
765
+ "trial_params": null
766
+ }