BTX24's picture
End of training
72d6ed0 verified
{
"best_metric": 0.6728110599078341,
"best_model_checkpoint": "convnextv2-base-22k-224-finetuned-tekno24-highdata-90/checkpoint-1553",
"epoch": 29.724770642201836,
"eval_steps": 500,
"global_step": 2430,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.12232415902140673,
"grad_norm": 15.331552505493164,
"learning_rate": 2.05761316872428e-06,
"loss": 1.3954,
"step": 10
},
{
"epoch": 0.24464831804281345,
"grad_norm": 13.410693168640137,
"learning_rate": 4.11522633744856e-06,
"loss": 1.3826,
"step": 20
},
{
"epoch": 0.3669724770642202,
"grad_norm": 14.4244966506958,
"learning_rate": 5.967078189300412e-06,
"loss": 1.3692,
"step": 30
},
{
"epoch": 0.4892966360856269,
"grad_norm": 13.09442138671875,
"learning_rate": 8.02469135802469e-06,
"loss": 1.3596,
"step": 40
},
{
"epoch": 0.6116207951070336,
"grad_norm": 14.526571273803711,
"learning_rate": 1.008230452674897e-05,
"loss": 1.3519,
"step": 50
},
{
"epoch": 0.7339449541284404,
"grad_norm": 21.34126091003418,
"learning_rate": 1.2139917695473252e-05,
"loss": 1.3338,
"step": 60
},
{
"epoch": 0.8562691131498471,
"grad_norm": 17.52889060974121,
"learning_rate": 1.3991769547325103e-05,
"loss": 1.3236,
"step": 70
},
{
"epoch": 0.9785932721712538,
"grad_norm": 17.072633743286133,
"learning_rate": 1.604938271604938e-05,
"loss": 1.3277,
"step": 80
},
{
"epoch": 0.9908256880733946,
"eval_accuracy": 0.4147465437788018,
"eval_f1": 0.32804846883348265,
"eval_loss": 1.2870396375656128,
"eval_precision": 0.3713606931606389,
"eval_recall": 0.4147465437788018,
"eval_runtime": 2.1883,
"eval_samples_per_second": 99.163,
"eval_steps_per_second": 6.398,
"step": 81
},
{
"epoch": 1.1009174311926606,
"grad_norm": 22.833921432495117,
"learning_rate": 1.8106995884773663e-05,
"loss": 1.3091,
"step": 90
},
{
"epoch": 1.2232415902140672,
"grad_norm": 10.650703430175781,
"learning_rate": 2.016460905349794e-05,
"loss": 1.3123,
"step": 100
},
{
"epoch": 1.345565749235474,
"grad_norm": 9.893655776977539,
"learning_rate": 2.2222222222222223e-05,
"loss": 1.2944,
"step": 110
},
{
"epoch": 1.4678899082568808,
"grad_norm": 54.886756896972656,
"learning_rate": 2.3868312757201647e-05,
"loss": 1.2842,
"step": 120
},
{
"epoch": 1.5902140672782874,
"grad_norm": 16.818227767944336,
"learning_rate": 2.5925925925925925e-05,
"loss": 1.2572,
"step": 130
},
{
"epoch": 1.7125382262996942,
"grad_norm": 9.368868827819824,
"learning_rate": 2.7983539094650207e-05,
"loss": 1.2222,
"step": 140
},
{
"epoch": 1.834862385321101,
"grad_norm": 12.728253364562988,
"learning_rate": 3.0041152263374488e-05,
"loss": 1.2346,
"step": 150
},
{
"epoch": 1.9571865443425076,
"grad_norm": 13.562764167785645,
"learning_rate": 3.209876543209876e-05,
"loss": 1.2024,
"step": 160
},
{
"epoch": 1.9938837920489296,
"eval_accuracy": 0.47465437788018433,
"eval_f1": 0.3906684622928812,
"eval_loss": 1.0890263319015503,
"eval_precision": 0.49441907661085743,
"eval_recall": 0.47465437788018433,
"eval_runtime": 2.1621,
"eval_samples_per_second": 100.363,
"eval_steps_per_second": 6.475,
"step": 163
},
{
"epoch": 2.079510703363914,
"grad_norm": 10.129838943481445,
"learning_rate": 3.4156378600823045e-05,
"loss": 1.2231,
"step": 170
},
{
"epoch": 2.2018348623853212,
"grad_norm": 10.82052230834961,
"learning_rate": 3.6213991769547327e-05,
"loss": 1.2082,
"step": 180
},
{
"epoch": 2.324159021406728,
"grad_norm": 15.055948257446289,
"learning_rate": 3.82716049382716e-05,
"loss": 1.1827,
"step": 190
},
{
"epoch": 2.4464831804281344,
"grad_norm": 14.08010196685791,
"learning_rate": 4.032921810699588e-05,
"loss": 1.1564,
"step": 200
},
{
"epoch": 2.5688073394495414,
"grad_norm": 19.530576705932617,
"learning_rate": 4.2386831275720165e-05,
"loss": 1.1508,
"step": 210
},
{
"epoch": 2.691131498470948,
"grad_norm": 17.710391998291016,
"learning_rate": 4.4444444444444447e-05,
"loss": 1.2407,
"step": 220
},
{
"epoch": 2.8134556574923546,
"grad_norm": 19.007694244384766,
"learning_rate": 4.650205761316873e-05,
"loss": 1.1333,
"step": 230
},
{
"epoch": 2.9357798165137616,
"grad_norm": 11.404845237731934,
"learning_rate": 4.855967078189301e-05,
"loss": 1.2067,
"step": 240
},
{
"epoch": 2.996941896024465,
"eval_accuracy": 0.543778801843318,
"eval_f1": 0.49651048113106194,
"eval_loss": 1.060142993927002,
"eval_precision": 0.5083802142557903,
"eval_recall": 0.543778801843318,
"eval_runtime": 2.1941,
"eval_samples_per_second": 98.904,
"eval_steps_per_second": 6.381,
"step": 245
},
{
"epoch": 3.058103975535168,
"grad_norm": 7.823658466339111,
"learning_rate": 4.993141289437586e-05,
"loss": 1.1512,
"step": 250
},
{
"epoch": 3.180428134556575,
"grad_norm": 7.272976875305176,
"learning_rate": 4.970278920896205e-05,
"loss": 1.2064,
"step": 260
},
{
"epoch": 3.302752293577982,
"grad_norm": 8.453497886657715,
"learning_rate": 4.9474165523548245e-05,
"loss": 1.1758,
"step": 270
},
{
"epoch": 3.4250764525993884,
"grad_norm": 6.063063144683838,
"learning_rate": 4.924554183813443e-05,
"loss": 1.1481,
"step": 280
},
{
"epoch": 3.547400611620795,
"grad_norm": 11.22547435760498,
"learning_rate": 4.901691815272063e-05,
"loss": 1.1818,
"step": 290
},
{
"epoch": 3.669724770642202,
"grad_norm": 8.580599784851074,
"learning_rate": 4.8788294467306815e-05,
"loss": 1.1345,
"step": 300
},
{
"epoch": 3.7920489296636086,
"grad_norm": 16.203880310058594,
"learning_rate": 4.855967078189301e-05,
"loss": 1.1451,
"step": 310
},
{
"epoch": 3.914373088685015,
"grad_norm": 7.94223165512085,
"learning_rate": 4.83310470964792e-05,
"loss": 1.206,
"step": 320
},
{
"epoch": 4.0,
"eval_accuracy": 0.5391705069124424,
"eval_f1": 0.515930279628824,
"eval_loss": 1.0142838954925537,
"eval_precision": 0.5180022054326838,
"eval_recall": 0.5391705069124424,
"eval_runtime": 2.1844,
"eval_samples_per_second": 99.339,
"eval_steps_per_second": 6.409,
"step": 327
},
{
"epoch": 4.036697247706422,
"grad_norm": 7.936139106750488,
"learning_rate": 4.810242341106539e-05,
"loss": 1.1749,
"step": 330
},
{
"epoch": 4.159021406727828,
"grad_norm": 6.124804496765137,
"learning_rate": 4.787379972565158e-05,
"loss": 1.1473,
"step": 340
},
{
"epoch": 4.281345565749236,
"grad_norm": 9.12104320526123,
"learning_rate": 4.764517604023777e-05,
"loss": 1.1809,
"step": 350
},
{
"epoch": 4.4036697247706424,
"grad_norm": 6.582239627838135,
"learning_rate": 4.741655235482396e-05,
"loss": 1.1728,
"step": 360
},
{
"epoch": 4.525993883792049,
"grad_norm": 7.613648891448975,
"learning_rate": 4.718792866941015e-05,
"loss": 1.1315,
"step": 370
},
{
"epoch": 4.648318042813456,
"grad_norm": 11.376667976379395,
"learning_rate": 4.6959304983996346e-05,
"loss": 1.1481,
"step": 380
},
{
"epoch": 4.770642201834862,
"grad_norm": 6.794886112213135,
"learning_rate": 4.6730681298582534e-05,
"loss": 1.0927,
"step": 390
},
{
"epoch": 4.892966360856269,
"grad_norm": 7.452205657958984,
"learning_rate": 4.650205761316873e-05,
"loss": 1.1049,
"step": 400
},
{
"epoch": 4.990825688073395,
"eval_accuracy": 0.576036866359447,
"eval_f1": 0.5450544928742728,
"eval_loss": 0.968787431716919,
"eval_precision": 0.5467442569565754,
"eval_recall": 0.576036866359447,
"eval_runtime": 2.1812,
"eval_samples_per_second": 99.488,
"eval_steps_per_second": 6.419,
"step": 408
},
{
"epoch": 5.015290519877676,
"grad_norm": 6.982997417449951,
"learning_rate": 4.6273433927754916e-05,
"loss": 1.1165,
"step": 410
},
{
"epoch": 5.137614678899083,
"grad_norm": 7.05279541015625,
"learning_rate": 4.604481024234111e-05,
"loss": 1.0755,
"step": 420
},
{
"epoch": 5.259938837920489,
"grad_norm": 9.04000186920166,
"learning_rate": 4.58161865569273e-05,
"loss": 1.1144,
"step": 430
},
{
"epoch": 5.382262996941896,
"grad_norm": 5.530284881591797,
"learning_rate": 4.558756287151349e-05,
"loss": 1.088,
"step": 440
},
{
"epoch": 5.504587155963303,
"grad_norm": 11.77010440826416,
"learning_rate": 4.535893918609968e-05,
"loss": 1.0805,
"step": 450
},
{
"epoch": 5.626911314984709,
"grad_norm": 9.45374870300293,
"learning_rate": 4.513031550068587e-05,
"loss": 1.1098,
"step": 460
},
{
"epoch": 5.749235474006117,
"grad_norm": 9.706932067871094,
"learning_rate": 4.4901691815272064e-05,
"loss": 1.0726,
"step": 470
},
{
"epoch": 5.871559633027523,
"grad_norm": 9.147931098937988,
"learning_rate": 4.467306812985825e-05,
"loss": 1.0934,
"step": 480
},
{
"epoch": 5.99388379204893,
"grad_norm": 6.612918376922607,
"learning_rate": 4.4444444444444447e-05,
"loss": 1.0931,
"step": 490
},
{
"epoch": 5.99388379204893,
"eval_accuracy": 0.5622119815668203,
"eval_f1": 0.5562044726083856,
"eval_loss": 1.0351340770721436,
"eval_precision": 0.5939199765927876,
"eval_recall": 0.5622119815668203,
"eval_runtime": 2.1678,
"eval_samples_per_second": 100.104,
"eval_steps_per_second": 6.458,
"step": 490
},
{
"epoch": 6.116207951070336,
"grad_norm": 10.358129501342773,
"learning_rate": 4.4215820759030634e-05,
"loss": 1.0469,
"step": 500
},
{
"epoch": 6.238532110091743,
"grad_norm": 5.924838542938232,
"learning_rate": 4.398719707361683e-05,
"loss": 1.0814,
"step": 510
},
{
"epoch": 6.36085626911315,
"grad_norm": 7.012035369873047,
"learning_rate": 4.3758573388203024e-05,
"loss": 1.1091,
"step": 520
},
{
"epoch": 6.483180428134556,
"grad_norm": 6.860468864440918,
"learning_rate": 4.352994970278921e-05,
"loss": 1.091,
"step": 530
},
{
"epoch": 6.605504587155964,
"grad_norm": 6.236204624176025,
"learning_rate": 4.3301326017375406e-05,
"loss": 1.1033,
"step": 540
},
{
"epoch": 6.72782874617737,
"grad_norm": 11.546574592590332,
"learning_rate": 4.3072702331961594e-05,
"loss": 1.067,
"step": 550
},
{
"epoch": 6.850152905198777,
"grad_norm": 10.157209396362305,
"learning_rate": 4.284407864654779e-05,
"loss": 1.054,
"step": 560
},
{
"epoch": 6.972477064220183,
"grad_norm": 7.421886444091797,
"learning_rate": 4.261545496113398e-05,
"loss": 1.0752,
"step": 570
},
{
"epoch": 6.996941896024465,
"eval_accuracy": 0.5898617511520737,
"eval_f1": 0.5592428280600323,
"eval_loss": 0.9370155930519104,
"eval_precision": 0.5730448203692986,
"eval_recall": 0.5898617511520737,
"eval_runtime": 2.1864,
"eval_samples_per_second": 99.25,
"eval_steps_per_second": 6.403,
"step": 572
},
{
"epoch": 7.09480122324159,
"grad_norm": 8.062494277954102,
"learning_rate": 4.2386831275720165e-05,
"loss": 1.0298,
"step": 580
},
{
"epoch": 7.217125382262997,
"grad_norm": 7.170814514160156,
"learning_rate": 4.215820759030636e-05,
"loss": 1.0451,
"step": 590
},
{
"epoch": 7.339449541284404,
"grad_norm": 7.2061662673950195,
"learning_rate": 4.192958390489255e-05,
"loss": 1.0586,
"step": 600
},
{
"epoch": 7.461773700305811,
"grad_norm": 5.97224760055542,
"learning_rate": 4.170096021947874e-05,
"loss": 1.0543,
"step": 610
},
{
"epoch": 7.584097859327217,
"grad_norm": 7.366073131561279,
"learning_rate": 4.147233653406493e-05,
"loss": 1.0903,
"step": 620
},
{
"epoch": 7.706422018348624,
"grad_norm": 6.186879634857178,
"learning_rate": 4.1243712848651125e-05,
"loss": 1.0919,
"step": 630
},
{
"epoch": 7.82874617737003,
"grad_norm": 8.208704948425293,
"learning_rate": 4.101508916323731e-05,
"loss": 1.1254,
"step": 640
},
{
"epoch": 7.951070336391437,
"grad_norm": 13.736848831176758,
"learning_rate": 4.078646547782351e-05,
"loss": 1.03,
"step": 650
},
{
"epoch": 8.0,
"eval_accuracy": 0.576036866359447,
"eval_f1": 0.5509975224553819,
"eval_loss": 0.9416872262954712,
"eval_precision": 0.5414011052041612,
"eval_recall": 0.576036866359447,
"eval_runtime": 2.1964,
"eval_samples_per_second": 98.796,
"eval_steps_per_second": 6.374,
"step": 654
},
{
"epoch": 8.073394495412844,
"grad_norm": 7.8366546630859375,
"learning_rate": 4.0557841792409695e-05,
"loss": 1.0581,
"step": 660
},
{
"epoch": 8.19571865443425,
"grad_norm": 6.876219272613525,
"learning_rate": 4.032921810699588e-05,
"loss": 1.04,
"step": 670
},
{
"epoch": 8.318042813455657,
"grad_norm": 7.931809902191162,
"learning_rate": 4.010059442158208e-05,
"loss": 1.0319,
"step": 680
},
{
"epoch": 8.440366972477065,
"grad_norm": 11.545510292053223,
"learning_rate": 3.9871970736168266e-05,
"loss": 1.0027,
"step": 690
},
{
"epoch": 8.562691131498472,
"grad_norm": 7.059225559234619,
"learning_rate": 3.964334705075446e-05,
"loss": 1.0164,
"step": 700
},
{
"epoch": 8.685015290519878,
"grad_norm": 6.870743751525879,
"learning_rate": 3.941472336534065e-05,
"loss": 1.0063,
"step": 710
},
{
"epoch": 8.807339449541285,
"grad_norm": 7.4264631271362305,
"learning_rate": 3.918609967992684e-05,
"loss": 1.0359,
"step": 720
},
{
"epoch": 8.929663608562691,
"grad_norm": 8.050666809082031,
"learning_rate": 3.895747599451303e-05,
"loss": 0.988,
"step": 730
},
{
"epoch": 8.990825688073395,
"eval_accuracy": 0.5990783410138248,
"eval_f1": 0.5772079574092954,
"eval_loss": 0.8942155838012695,
"eval_precision": 0.5818760258445811,
"eval_recall": 0.5990783410138248,
"eval_runtime": 2.178,
"eval_samples_per_second": 99.631,
"eval_steps_per_second": 6.428,
"step": 735
},
{
"epoch": 9.051987767584098,
"grad_norm": 6.86763858795166,
"learning_rate": 3.8728852309099226e-05,
"loss": 0.9927,
"step": 740
},
{
"epoch": 9.174311926605505,
"grad_norm": 7.786592483520508,
"learning_rate": 3.8500228623685414e-05,
"loss": 0.976,
"step": 750
},
{
"epoch": 9.296636085626911,
"grad_norm": 9.186685562133789,
"learning_rate": 3.82716049382716e-05,
"loss": 1.0239,
"step": 760
},
{
"epoch": 9.418960244648318,
"grad_norm": 8.555779457092285,
"learning_rate": 3.8042981252857796e-05,
"loss": 0.9526,
"step": 770
},
{
"epoch": 9.541284403669724,
"grad_norm": 9.215676307678223,
"learning_rate": 3.7814357567443984e-05,
"loss": 1.0258,
"step": 780
},
{
"epoch": 9.663608562691131,
"grad_norm": 7.625224590301514,
"learning_rate": 3.758573388203018e-05,
"loss": 1.0042,
"step": 790
},
{
"epoch": 9.785932721712538,
"grad_norm": 9.265899658203125,
"learning_rate": 3.7357110196616373e-05,
"loss": 1.008,
"step": 800
},
{
"epoch": 9.908256880733944,
"grad_norm": 7.059952735900879,
"learning_rate": 3.712848651120257e-05,
"loss": 0.9692,
"step": 810
},
{
"epoch": 9.99388379204893,
"eval_accuracy": 0.6082949308755761,
"eval_f1": 0.5936874933582452,
"eval_loss": 0.9091479182243347,
"eval_precision": 0.5980988666361812,
"eval_recall": 0.6082949308755761,
"eval_runtime": 2.1969,
"eval_samples_per_second": 98.777,
"eval_steps_per_second": 6.373,
"step": 817
},
{
"epoch": 10.030581039755353,
"grad_norm": 8.655425071716309,
"learning_rate": 3.6899862825788756e-05,
"loss": 1.0144,
"step": 820
},
{
"epoch": 10.15290519877676,
"grad_norm": 7.1922607421875,
"learning_rate": 3.6671239140374944e-05,
"loss": 0.9994,
"step": 830
},
{
"epoch": 10.275229357798166,
"grad_norm": 7.687134265899658,
"learning_rate": 3.644261545496114e-05,
"loss": 0.9879,
"step": 840
},
{
"epoch": 10.397553516819572,
"grad_norm": 9.036182403564453,
"learning_rate": 3.6213991769547327e-05,
"loss": 0.9218,
"step": 850
},
{
"epoch": 10.519877675840979,
"grad_norm": 11.018427848815918,
"learning_rate": 3.598536808413352e-05,
"loss": 1.0079,
"step": 860
},
{
"epoch": 10.642201834862385,
"grad_norm": 9.172164916992188,
"learning_rate": 3.575674439871971e-05,
"loss": 0.9795,
"step": 870
},
{
"epoch": 10.764525993883792,
"grad_norm": 10.210972785949707,
"learning_rate": 3.5528120713305904e-05,
"loss": 0.9675,
"step": 880
},
{
"epoch": 10.886850152905199,
"grad_norm": 9.860912322998047,
"learning_rate": 3.529949702789209e-05,
"loss": 0.9896,
"step": 890
},
{
"epoch": 10.996941896024465,
"eval_accuracy": 0.6036866359447005,
"eval_f1": 0.5904668698923761,
"eval_loss": 0.8689674139022827,
"eval_precision": 0.5936795435008436,
"eval_recall": 0.6036866359447005,
"eval_runtime": 2.1973,
"eval_samples_per_second": 98.756,
"eval_steps_per_second": 6.371,
"step": 899
},
{
"epoch": 11.009174311926605,
"grad_norm": 9.831244468688965,
"learning_rate": 3.5070873342478286e-05,
"loss": 0.9261,
"step": 900
},
{
"epoch": 11.131498470948012,
"grad_norm": 11.262879371643066,
"learning_rate": 3.4842249657064474e-05,
"loss": 0.9191,
"step": 910
},
{
"epoch": 11.253822629969418,
"grad_norm": 9.582681655883789,
"learning_rate": 3.461362597165066e-05,
"loss": 0.9026,
"step": 920
},
{
"epoch": 11.376146788990825,
"grad_norm": 9.12176513671875,
"learning_rate": 3.438500228623686e-05,
"loss": 0.9882,
"step": 930
},
{
"epoch": 11.498470948012232,
"grad_norm": 10.87016773223877,
"learning_rate": 3.4156378600823045e-05,
"loss": 0.9379,
"step": 940
},
{
"epoch": 11.62079510703364,
"grad_norm": 8.557551383972168,
"learning_rate": 3.392775491540924e-05,
"loss": 0.9312,
"step": 950
},
{
"epoch": 11.743119266055047,
"grad_norm": 8.361717224121094,
"learning_rate": 3.369913122999543e-05,
"loss": 0.9085,
"step": 960
},
{
"epoch": 11.865443425076453,
"grad_norm": 7.925179958343506,
"learning_rate": 3.347050754458162e-05,
"loss": 1.0008,
"step": 970
},
{
"epoch": 11.98776758409786,
"grad_norm": 9.740839004516602,
"learning_rate": 3.324188385916781e-05,
"loss": 0.9479,
"step": 980
},
{
"epoch": 12.0,
"eval_accuracy": 0.6405529953917051,
"eval_f1": 0.6268359763707001,
"eval_loss": 0.8705018162727356,
"eval_precision": 0.6307341542199126,
"eval_recall": 0.6405529953917051,
"eval_runtime": 2.1984,
"eval_samples_per_second": 98.71,
"eval_steps_per_second": 6.368,
"step": 981
},
{
"epoch": 12.110091743119266,
"grad_norm": 9.437625885009766,
"learning_rate": 3.3013260173754005e-05,
"loss": 0.8172,
"step": 990
},
{
"epoch": 12.232415902140673,
"grad_norm": 8.812919616699219,
"learning_rate": 3.278463648834019e-05,
"loss": 0.8996,
"step": 1000
},
{
"epoch": 12.35474006116208,
"grad_norm": 8.750353813171387,
"learning_rate": 3.255601280292638e-05,
"loss": 0.9655,
"step": 1010
},
{
"epoch": 12.477064220183486,
"grad_norm": 7.671780586242676,
"learning_rate": 3.2327389117512575e-05,
"loss": 0.9139,
"step": 1020
},
{
"epoch": 12.599388379204893,
"grad_norm": 9.657299041748047,
"learning_rate": 3.209876543209876e-05,
"loss": 0.9269,
"step": 1030
},
{
"epoch": 12.7217125382263,
"grad_norm": 9.090998649597168,
"learning_rate": 3.187014174668496e-05,
"loss": 0.8867,
"step": 1040
},
{
"epoch": 12.844036697247706,
"grad_norm": 9.977676391601562,
"learning_rate": 3.1641518061271146e-05,
"loss": 0.89,
"step": 1050
},
{
"epoch": 12.966360856269112,
"grad_norm": 10.456171035766602,
"learning_rate": 3.141289437585734e-05,
"loss": 0.898,
"step": 1060
},
{
"epoch": 12.990825688073395,
"eval_accuracy": 0.6497695852534562,
"eval_f1": 0.6439767898106951,
"eval_loss": 0.8568853735923767,
"eval_precision": 0.6465264959675692,
"eval_recall": 0.6497695852534562,
"eval_runtime": 2.1871,
"eval_samples_per_second": 99.216,
"eval_steps_per_second": 6.401,
"step": 1062
},
{
"epoch": 13.08868501529052,
"grad_norm": 9.029878616333008,
"learning_rate": 3.118427069044353e-05,
"loss": 0.9008,
"step": 1070
},
{
"epoch": 13.211009174311927,
"grad_norm": 11.082198143005371,
"learning_rate": 3.095564700502972e-05,
"loss": 0.8678,
"step": 1080
},
{
"epoch": 13.333333333333334,
"grad_norm": 8.106987953186035,
"learning_rate": 3.072702331961592e-05,
"loss": 0.8724,
"step": 1090
},
{
"epoch": 13.45565749235474,
"grad_norm": 9.417874336242676,
"learning_rate": 3.0498399634202106e-05,
"loss": 0.8435,
"step": 1100
},
{
"epoch": 13.577981651376147,
"grad_norm": 10.039156913757324,
"learning_rate": 3.0269775948788297e-05,
"loss": 0.7984,
"step": 1110
},
{
"epoch": 13.700305810397554,
"grad_norm": 10.906753540039062,
"learning_rate": 3.0041152263374488e-05,
"loss": 0.8578,
"step": 1120
},
{
"epoch": 13.82262996941896,
"grad_norm": 9.698974609375,
"learning_rate": 2.981252857796068e-05,
"loss": 0.8699,
"step": 1130
},
{
"epoch": 13.944954128440367,
"grad_norm": 9.868678092956543,
"learning_rate": 2.958390489254687e-05,
"loss": 0.9101,
"step": 1140
},
{
"epoch": 13.99388379204893,
"eval_accuracy": 0.6129032258064516,
"eval_f1": 0.6090804206647874,
"eval_loss": 0.8736193180084229,
"eval_precision": 0.6179152160718983,
"eval_recall": 0.6129032258064516,
"eval_runtime": 2.1919,
"eval_samples_per_second": 99.002,
"eval_steps_per_second": 6.387,
"step": 1144
},
{
"epoch": 14.067278287461773,
"grad_norm": 8.79443645477295,
"learning_rate": 2.9355281207133062e-05,
"loss": 0.8439,
"step": 1150
},
{
"epoch": 14.18960244648318,
"grad_norm": 9.417186737060547,
"learning_rate": 2.9126657521719253e-05,
"loss": 0.8763,
"step": 1160
},
{
"epoch": 14.311926605504587,
"grad_norm": 11.611254692077637,
"learning_rate": 2.8898033836305445e-05,
"loss": 0.8305,
"step": 1170
},
{
"epoch": 14.434250764525993,
"grad_norm": 10.025824546813965,
"learning_rate": 2.8669410150891636e-05,
"loss": 0.7743,
"step": 1180
},
{
"epoch": 14.5565749235474,
"grad_norm": 10.79056453704834,
"learning_rate": 2.8440786465477824e-05,
"loss": 0.8414,
"step": 1190
},
{
"epoch": 14.678899082568808,
"grad_norm": 8.00452709197998,
"learning_rate": 2.8212162780064015e-05,
"loss": 0.8934,
"step": 1200
},
{
"epoch": 14.801223241590215,
"grad_norm": 11.359430313110352,
"learning_rate": 2.7983539094650207e-05,
"loss": 0.8844,
"step": 1210
},
{
"epoch": 14.923547400611621,
"grad_norm": 10.003506660461426,
"learning_rate": 2.7754915409236398e-05,
"loss": 0.8431,
"step": 1220
},
{
"epoch": 14.996941896024465,
"eval_accuracy": 0.6451612903225806,
"eval_f1": 0.6418579664744979,
"eval_loss": 0.8684141039848328,
"eval_precision": 0.6447143927349024,
"eval_recall": 0.6451612903225806,
"eval_runtime": 2.1833,
"eval_samples_per_second": 99.392,
"eval_steps_per_second": 6.412,
"step": 1226
},
{
"epoch": 15.045871559633028,
"grad_norm": 8.596139907836914,
"learning_rate": 2.752629172382259e-05,
"loss": 0.8224,
"step": 1230
},
{
"epoch": 15.168195718654435,
"grad_norm": 11.239164352416992,
"learning_rate": 2.729766803840878e-05,
"loss": 0.8407,
"step": 1240
},
{
"epoch": 15.290519877675841,
"grad_norm": 13.581149101257324,
"learning_rate": 2.7069044352994972e-05,
"loss": 0.7794,
"step": 1250
},
{
"epoch": 15.412844036697248,
"grad_norm": 10.948393821716309,
"learning_rate": 2.6840420667581163e-05,
"loss": 0.8199,
"step": 1260
},
{
"epoch": 15.535168195718654,
"grad_norm": 9.145750999450684,
"learning_rate": 2.6611796982167354e-05,
"loss": 0.7257,
"step": 1270
},
{
"epoch": 15.65749235474006,
"grad_norm": 11.599444389343262,
"learning_rate": 2.6383173296753542e-05,
"loss": 0.7847,
"step": 1280
},
{
"epoch": 15.779816513761467,
"grad_norm": 11.259026527404785,
"learning_rate": 2.6154549611339734e-05,
"loss": 0.8404,
"step": 1290
},
{
"epoch": 15.902140672782874,
"grad_norm": 11.746826171875,
"learning_rate": 2.5925925925925925e-05,
"loss": 0.8187,
"step": 1300
},
{
"epoch": 16.0,
"eval_accuracy": 0.6221198156682027,
"eval_f1": 0.6198809621231663,
"eval_loss": 0.9032032489776611,
"eval_precision": 0.6206784143654135,
"eval_recall": 0.6221198156682027,
"eval_runtime": 2.1909,
"eval_samples_per_second": 99.047,
"eval_steps_per_second": 6.39,
"step": 1308
},
{
"epoch": 16.02446483180428,
"grad_norm": 9.55320930480957,
"learning_rate": 2.5697302240512116e-05,
"loss": 0.7986,
"step": 1310
},
{
"epoch": 16.146788990825687,
"grad_norm": 10.655790328979492,
"learning_rate": 2.5468678555098308e-05,
"loss": 0.7589,
"step": 1320
},
{
"epoch": 16.269113149847094,
"grad_norm": 10.707428932189941,
"learning_rate": 2.52400548696845e-05,
"loss": 0.7315,
"step": 1330
},
{
"epoch": 16.3914373088685,
"grad_norm": 12.252084732055664,
"learning_rate": 2.501143118427069e-05,
"loss": 0.7892,
"step": 1340
},
{
"epoch": 16.513761467889907,
"grad_norm": 9.307701110839844,
"learning_rate": 2.4782807498856885e-05,
"loss": 0.7698,
"step": 1350
},
{
"epoch": 16.636085626911314,
"grad_norm": 10.571149826049805,
"learning_rate": 2.4554183813443073e-05,
"loss": 0.7547,
"step": 1360
},
{
"epoch": 16.75840978593272,
"grad_norm": 11.456555366516113,
"learning_rate": 2.4325560128029264e-05,
"loss": 0.7723,
"step": 1370
},
{
"epoch": 16.88073394495413,
"grad_norm": 11.944501876831055,
"learning_rate": 2.4096936442615455e-05,
"loss": 0.7614,
"step": 1380
},
{
"epoch": 16.990825688073393,
"eval_accuracy": 0.6359447004608295,
"eval_f1": 0.6305311465399625,
"eval_loss": 0.9012994170188904,
"eval_precision": 0.6433955302551128,
"eval_recall": 0.6359447004608295,
"eval_runtime": 2.1782,
"eval_samples_per_second": 99.624,
"eval_steps_per_second": 6.427,
"step": 1389
},
{
"epoch": 17.003058103975537,
"grad_norm": 11.058562278747559,
"learning_rate": 2.3868312757201647e-05,
"loss": 0.7566,
"step": 1390
},
{
"epoch": 17.125382262996943,
"grad_norm": 12.137784004211426,
"learning_rate": 2.3639689071787838e-05,
"loss": 0.6842,
"step": 1400
},
{
"epoch": 17.24770642201835,
"grad_norm": 12.046640396118164,
"learning_rate": 2.341106538637403e-05,
"loss": 0.7126,
"step": 1410
},
{
"epoch": 17.370030581039757,
"grad_norm": 13.35624885559082,
"learning_rate": 2.318244170096022e-05,
"loss": 0.721,
"step": 1420
},
{
"epoch": 17.492354740061163,
"grad_norm": 10.566619873046875,
"learning_rate": 2.2953818015546412e-05,
"loss": 0.7424,
"step": 1430
},
{
"epoch": 17.61467889908257,
"grad_norm": 11.238773345947266,
"learning_rate": 2.2725194330132603e-05,
"loss": 0.7375,
"step": 1440
},
{
"epoch": 17.737003058103976,
"grad_norm": 11.583772659301758,
"learning_rate": 2.2496570644718794e-05,
"loss": 0.7177,
"step": 1450
},
{
"epoch": 17.859327217125383,
"grad_norm": 11.983872413635254,
"learning_rate": 2.2267946959304986e-05,
"loss": 0.7393,
"step": 1460
},
{
"epoch": 17.98165137614679,
"grad_norm": 9.852224349975586,
"learning_rate": 2.2039323273891177e-05,
"loss": 0.725,
"step": 1470
},
{
"epoch": 17.99388379204893,
"eval_accuracy": 0.5990783410138248,
"eval_f1": 0.5975030533834751,
"eval_loss": 0.9701642394065857,
"eval_precision": 0.6072043560801691,
"eval_recall": 0.5990783410138248,
"eval_runtime": 2.1744,
"eval_samples_per_second": 99.798,
"eval_steps_per_second": 6.439,
"step": 1471
},
{
"epoch": 18.103975535168196,
"grad_norm": 10.969733238220215,
"learning_rate": 2.1810699588477368e-05,
"loss": 0.6728,
"step": 1480
},
{
"epoch": 18.226299694189603,
"grad_norm": 11.671346664428711,
"learning_rate": 2.158207590306356e-05,
"loss": 0.6974,
"step": 1490
},
{
"epoch": 18.34862385321101,
"grad_norm": 8.975910186767578,
"learning_rate": 2.135345221764975e-05,
"loss": 0.6503,
"step": 1500
},
{
"epoch": 18.470948012232416,
"grad_norm": 13.026418685913086,
"learning_rate": 2.1124828532235942e-05,
"loss": 0.6879,
"step": 1510
},
{
"epoch": 18.593272171253822,
"grad_norm": 15.196861267089844,
"learning_rate": 2.089620484682213e-05,
"loss": 0.6912,
"step": 1520
},
{
"epoch": 18.71559633027523,
"grad_norm": 8.060693740844727,
"learning_rate": 2.066758116140832e-05,
"loss": 0.7287,
"step": 1530
},
{
"epoch": 18.837920489296636,
"grad_norm": 10.259760856628418,
"learning_rate": 2.0438957475994513e-05,
"loss": 0.6657,
"step": 1540
},
{
"epoch": 18.960244648318042,
"grad_norm": 10.083440780639648,
"learning_rate": 2.0210333790580704e-05,
"loss": 0.6938,
"step": 1550
},
{
"epoch": 18.996941896024463,
"eval_accuracy": 0.6728110599078341,
"eval_f1": 0.6659537595021466,
"eval_loss": 0.9598256349563599,
"eval_precision": 0.683992662820187,
"eval_recall": 0.6728110599078341,
"eval_runtime": 2.1821,
"eval_samples_per_second": 99.445,
"eval_steps_per_second": 6.416,
"step": 1553
},
{
"epoch": 19.08256880733945,
"grad_norm": 13.321717262268066,
"learning_rate": 1.9981710105166895e-05,
"loss": 0.6849,
"step": 1560
},
{
"epoch": 19.204892966360855,
"grad_norm": 9.200907707214355,
"learning_rate": 1.9753086419753087e-05,
"loss": 0.6397,
"step": 1570
},
{
"epoch": 19.327217125382262,
"grad_norm": 12.469816207885742,
"learning_rate": 1.9524462734339278e-05,
"loss": 0.6554,
"step": 1580
},
{
"epoch": 19.44954128440367,
"grad_norm": 10.692110061645508,
"learning_rate": 1.929583904892547e-05,
"loss": 0.656,
"step": 1590
},
{
"epoch": 19.571865443425075,
"grad_norm": 11.429883003234863,
"learning_rate": 1.906721536351166e-05,
"loss": 0.6959,
"step": 1600
},
{
"epoch": 19.69418960244648,
"grad_norm": 10.087126731872559,
"learning_rate": 1.8838591678097852e-05,
"loss": 0.6632,
"step": 1610
},
{
"epoch": 19.81651376146789,
"grad_norm": 16.846824645996094,
"learning_rate": 1.8609967992684043e-05,
"loss": 0.6307,
"step": 1620
},
{
"epoch": 19.938837920489295,
"grad_norm": 12.659772872924805,
"learning_rate": 1.8381344307270234e-05,
"loss": 0.6761,
"step": 1630
},
{
"epoch": 20.0,
"eval_accuracy": 0.6082949308755761,
"eval_f1": 0.6112076266773331,
"eval_loss": 0.9886434078216553,
"eval_precision": 0.6242090052989822,
"eval_recall": 0.6082949308755761,
"eval_runtime": 2.1998,
"eval_samples_per_second": 98.647,
"eval_steps_per_second": 6.364,
"step": 1635
},
{
"epoch": 20.061162079510705,
"grad_norm": 11.294504165649414,
"learning_rate": 1.8152720621856426e-05,
"loss": 0.6726,
"step": 1640
},
{
"epoch": 20.18348623853211,
"grad_norm": 10.856670379638672,
"learning_rate": 1.7924096936442617e-05,
"loss": 0.626,
"step": 1650
},
{
"epoch": 20.30581039755352,
"grad_norm": 13.345258712768555,
"learning_rate": 1.769547325102881e-05,
"loss": 0.6446,
"step": 1660
},
{
"epoch": 20.428134556574925,
"grad_norm": 9.459267616271973,
"learning_rate": 1.7466849565615e-05,
"loss": 0.6125,
"step": 1670
},
{
"epoch": 20.55045871559633,
"grad_norm": 14.87169361114502,
"learning_rate": 1.723822588020119e-05,
"loss": 0.575,
"step": 1680
},
{
"epoch": 20.672782874617738,
"grad_norm": 14.521281242370605,
"learning_rate": 1.700960219478738e-05,
"loss": 0.6999,
"step": 1690
},
{
"epoch": 20.795107033639145,
"grad_norm": 12.60116195678711,
"learning_rate": 1.678097850937357e-05,
"loss": 0.5862,
"step": 1700
},
{
"epoch": 20.91743119266055,
"grad_norm": 10.943224906921387,
"learning_rate": 1.655235482395976e-05,
"loss": 0.5865,
"step": 1710
},
{
"epoch": 20.990825688073393,
"eval_accuracy": 0.6497695852534562,
"eval_f1": 0.6428003832696357,
"eval_loss": 0.9367409348487854,
"eval_precision": 0.6431866677852993,
"eval_recall": 0.6497695852534562,
"eval_runtime": 2.1726,
"eval_samples_per_second": 99.881,
"eval_steps_per_second": 6.444,
"step": 1716
},
{
"epoch": 21.039755351681958,
"grad_norm": 11.365513801574707,
"learning_rate": 1.6323731138545953e-05,
"loss": 0.6488,
"step": 1720
},
{
"epoch": 21.162079510703364,
"grad_norm": 8.919206619262695,
"learning_rate": 1.6095107453132147e-05,
"loss": 0.6201,
"step": 1730
},
{
"epoch": 21.28440366972477,
"grad_norm": 12.377992630004883,
"learning_rate": 1.586648376771834e-05,
"loss": 0.5806,
"step": 1740
},
{
"epoch": 21.406727828746178,
"grad_norm": 12.146440505981445,
"learning_rate": 1.563786008230453e-05,
"loss": 0.5506,
"step": 1750
},
{
"epoch": 21.529051987767584,
"grad_norm": 11.200637817382812,
"learning_rate": 1.5409236396890718e-05,
"loss": 0.5667,
"step": 1760
},
{
"epoch": 21.65137614678899,
"grad_norm": 14.65993595123291,
"learning_rate": 1.5180612711476911e-05,
"loss": 0.5936,
"step": 1770
},
{
"epoch": 21.773700305810397,
"grad_norm": 14.228142738342285,
"learning_rate": 1.49519890260631e-05,
"loss": 0.5791,
"step": 1780
},
{
"epoch": 21.896024464831804,
"grad_norm": 12.14127254486084,
"learning_rate": 1.4723365340649292e-05,
"loss": 0.5857,
"step": 1790
},
{
"epoch": 21.99388379204893,
"eval_accuracy": 0.631336405529954,
"eval_f1": 0.6321523990020061,
"eval_loss": 0.9693921208381653,
"eval_precision": 0.6331329862312,
"eval_recall": 0.631336405529954,
"eval_runtime": 2.1772,
"eval_samples_per_second": 99.669,
"eval_steps_per_second": 6.43,
"step": 1798
},
{
"epoch": 22.01834862385321,
"grad_norm": 8.9346342086792,
"learning_rate": 1.4494741655235483e-05,
"loss": 0.553,
"step": 1800
},
{
"epoch": 22.140672782874617,
"grad_norm": 15.74846363067627,
"learning_rate": 1.4266117969821674e-05,
"loss": 0.5693,
"step": 1810
},
{
"epoch": 22.262996941896024,
"grad_norm": 17.549697875976562,
"learning_rate": 1.4037494284407866e-05,
"loss": 0.5955,
"step": 1820
},
{
"epoch": 22.38532110091743,
"grad_norm": 12.935697555541992,
"learning_rate": 1.3808870598994055e-05,
"loss": 0.5184,
"step": 1830
},
{
"epoch": 22.507645259938837,
"grad_norm": 14.531293869018555,
"learning_rate": 1.3580246913580247e-05,
"loss": 0.5873,
"step": 1840
},
{
"epoch": 22.629969418960243,
"grad_norm": 11.71330451965332,
"learning_rate": 1.3351623228166438e-05,
"loss": 0.538,
"step": 1850
},
{
"epoch": 22.75229357798165,
"grad_norm": 11.269133567810059,
"learning_rate": 1.312299954275263e-05,
"loss": 0.5819,
"step": 1860
},
{
"epoch": 22.874617737003057,
"grad_norm": 9.762799263000488,
"learning_rate": 1.2894375857338819e-05,
"loss": 0.5612,
"step": 1870
},
{
"epoch": 22.996941896024463,
"grad_norm": 9.858174324035645,
"learning_rate": 1.2665752171925014e-05,
"loss": 0.556,
"step": 1880
},
{
"epoch": 22.996941896024463,
"eval_accuracy": 0.6359447004608295,
"eval_f1": 0.629638753332476,
"eval_loss": 1.021164059638977,
"eval_precision": 0.6574294916995147,
"eval_recall": 0.6359447004608295,
"eval_runtime": 2.1738,
"eval_samples_per_second": 99.824,
"eval_steps_per_second": 6.44,
"step": 1880
},
{
"epoch": 23.119266055045873,
"grad_norm": 18.760498046875,
"learning_rate": 1.2437128486511203e-05,
"loss": 0.5428,
"step": 1890
},
{
"epoch": 23.24159021406728,
"grad_norm": 12.281718254089355,
"learning_rate": 1.2208504801097394e-05,
"loss": 0.5557,
"step": 1900
},
{
"epoch": 23.363914373088686,
"grad_norm": 11.702547073364258,
"learning_rate": 1.1979881115683586e-05,
"loss": 0.547,
"step": 1910
},
{
"epoch": 23.486238532110093,
"grad_norm": 9.12270450592041,
"learning_rate": 1.1751257430269777e-05,
"loss": 0.4957,
"step": 1920
},
{
"epoch": 23.6085626911315,
"grad_norm": 12.279609680175781,
"learning_rate": 1.1522633744855968e-05,
"loss": 0.5025,
"step": 1930
},
{
"epoch": 23.730886850152906,
"grad_norm": 12.506765365600586,
"learning_rate": 1.129401005944216e-05,
"loss": 0.5863,
"step": 1940
},
{
"epoch": 23.853211009174313,
"grad_norm": 13.289384841918945,
"learning_rate": 1.106538637402835e-05,
"loss": 0.5533,
"step": 1950
},
{
"epoch": 23.97553516819572,
"grad_norm": 12.350972175598145,
"learning_rate": 1.083676268861454e-05,
"loss": 0.4871,
"step": 1960
},
{
"epoch": 24.0,
"eval_accuracy": 0.5944700460829493,
"eval_f1": 0.587885178704199,
"eval_loss": 1.032782793045044,
"eval_precision": 0.5950577616931705,
"eval_recall": 0.5944700460829493,
"eval_runtime": 2.183,
"eval_samples_per_second": 99.404,
"eval_steps_per_second": 6.413,
"step": 1962
},
{
"epoch": 24.097859327217126,
"grad_norm": 9.447975158691406,
"learning_rate": 1.0608139003200732e-05,
"loss": 0.4881,
"step": 1970
},
{
"epoch": 24.220183486238533,
"grad_norm": 11.92766284942627,
"learning_rate": 1.0379515317786923e-05,
"loss": 0.5066,
"step": 1980
},
{
"epoch": 24.34250764525994,
"grad_norm": 11.550183296203613,
"learning_rate": 1.0150891632373114e-05,
"loss": 0.4547,
"step": 1990
},
{
"epoch": 24.464831804281346,
"grad_norm": 10.570334434509277,
"learning_rate": 9.922267946959306e-06,
"loss": 0.504,
"step": 2000
},
{
"epoch": 24.587155963302752,
"grad_norm": 12.56131362915039,
"learning_rate": 9.693644261545497e-06,
"loss": 0.4765,
"step": 2010
},
{
"epoch": 24.70948012232416,
"grad_norm": 10.12960433959961,
"learning_rate": 9.465020576131688e-06,
"loss": 0.4862,
"step": 2020
},
{
"epoch": 24.831804281345565,
"grad_norm": 14.820987701416016,
"learning_rate": 9.236396890717878e-06,
"loss": 0.4748,
"step": 2030
},
{
"epoch": 24.954128440366972,
"grad_norm": 13.520421981811523,
"learning_rate": 9.00777320530407e-06,
"loss": 0.5254,
"step": 2040
},
{
"epoch": 24.990825688073393,
"eval_accuracy": 0.5944700460829493,
"eval_f1": 0.5917457657549822,
"eval_loss": 1.0132023096084595,
"eval_precision": 0.5968091048736209,
"eval_recall": 0.5944700460829493,
"eval_runtime": 2.1697,
"eval_samples_per_second": 100.012,
"eval_steps_per_second": 6.452,
"step": 2043
},
{
"epoch": 25.07645259938838,
"grad_norm": 11.470428466796875,
"learning_rate": 8.77914951989026e-06,
"loss": 0.4879,
"step": 2050
},
{
"epoch": 25.198776758409785,
"grad_norm": 12.04905891418457,
"learning_rate": 8.550525834476454e-06,
"loss": 0.4719,
"step": 2060
},
{
"epoch": 25.321100917431192,
"grad_norm": 14.935491561889648,
"learning_rate": 8.321902149062643e-06,
"loss": 0.4989,
"step": 2070
},
{
"epoch": 25.4434250764526,
"grad_norm": 13.884613037109375,
"learning_rate": 8.093278463648834e-06,
"loss": 0.4718,
"step": 2080
},
{
"epoch": 25.565749235474005,
"grad_norm": 14.0990629196167,
"learning_rate": 7.864654778235026e-06,
"loss": 0.4833,
"step": 2090
},
{
"epoch": 25.68807339449541,
"grad_norm": 15.498763084411621,
"learning_rate": 7.636031092821217e-06,
"loss": 0.4719,
"step": 2100
},
{
"epoch": 25.810397553516818,
"grad_norm": 17.733688354492188,
"learning_rate": 7.4074074074074075e-06,
"loss": 0.5214,
"step": 2110
},
{
"epoch": 25.932721712538225,
"grad_norm": 12.92156982421875,
"learning_rate": 7.178783721993598e-06,
"loss": 0.5054,
"step": 2120
},
{
"epoch": 25.99388379204893,
"eval_accuracy": 0.5944700460829493,
"eval_f1": 0.5943911153011756,
"eval_loss": 1.038478136062622,
"eval_precision": 0.5987861045709855,
"eval_recall": 0.5944700460829493,
"eval_runtime": 2.2069,
"eval_samples_per_second": 98.326,
"eval_steps_per_second": 6.344,
"step": 2125
},
{
"epoch": 26.05504587155963,
"grad_norm": 12.785786628723145,
"learning_rate": 6.950160036579791e-06,
"loss": 0.5009,
"step": 2130
},
{
"epoch": 26.17737003058104,
"grad_norm": 12.252779006958008,
"learning_rate": 6.721536351165981e-06,
"loss": 0.4896,
"step": 2140
},
{
"epoch": 26.299694189602448,
"grad_norm": 8.970731735229492,
"learning_rate": 6.492912665752173e-06,
"loss": 0.4409,
"step": 2150
},
{
"epoch": 26.422018348623855,
"grad_norm": 14.119239807128906,
"learning_rate": 6.264288980338363e-06,
"loss": 0.4685,
"step": 2160
},
{
"epoch": 26.54434250764526,
"grad_norm": 13.817703247070312,
"learning_rate": 6.0356652949245544e-06,
"loss": 0.423,
"step": 2170
},
{
"epoch": 26.666666666666668,
"grad_norm": 11.813767433166504,
"learning_rate": 5.807041609510746e-06,
"loss": 0.4338,
"step": 2180
},
{
"epoch": 26.788990825688074,
"grad_norm": 10.11328411102295,
"learning_rate": 5.578417924096937e-06,
"loss": 0.4317,
"step": 2190
},
{
"epoch": 26.91131498470948,
"grad_norm": 12.501155853271484,
"learning_rate": 5.3497942386831275e-06,
"loss": 0.4706,
"step": 2200
},
{
"epoch": 26.996941896024463,
"eval_accuracy": 0.6036866359447005,
"eval_f1": 0.5983142033282286,
"eval_loss": 1.0625784397125244,
"eval_precision": 0.6100467719653185,
"eval_recall": 0.6036866359447005,
"eval_runtime": 2.1672,
"eval_samples_per_second": 100.128,
"eval_steps_per_second": 6.46,
"step": 2207
},
{
"epoch": 27.033639143730888,
"grad_norm": 12.064492225646973,
"learning_rate": 5.121170553269319e-06,
"loss": 0.4762,
"step": 2210
},
{
"epoch": 27.155963302752294,
"grad_norm": 12.347169876098633,
"learning_rate": 4.89254686785551e-06,
"loss": 0.4493,
"step": 2220
},
{
"epoch": 27.2782874617737,
"grad_norm": 12.065176010131836,
"learning_rate": 4.663923182441701e-06,
"loss": 0.4341,
"step": 2230
},
{
"epoch": 27.400611620795107,
"grad_norm": 12.744647979736328,
"learning_rate": 4.435299497027892e-06,
"loss": 0.4373,
"step": 2240
},
{
"epoch": 27.522935779816514,
"grad_norm": 14.091771125793457,
"learning_rate": 4.206675811614083e-06,
"loss": 0.4286,
"step": 2250
},
{
"epoch": 27.64525993883792,
"grad_norm": 11.384113311767578,
"learning_rate": 3.9780521262002744e-06,
"loss": 0.4338,
"step": 2260
},
{
"epoch": 27.767584097859327,
"grad_norm": 12.185340881347656,
"learning_rate": 3.7494284407864657e-06,
"loss": 0.4482,
"step": 2270
},
{
"epoch": 27.889908256880734,
"grad_norm": 10.328293800354004,
"learning_rate": 3.5208047553726566e-06,
"loss": 0.418,
"step": 2280
},
{
"epoch": 28.0,
"eval_accuracy": 0.5806451612903226,
"eval_f1": 0.5774461884155286,
"eval_loss": 1.053132176399231,
"eval_precision": 0.5829576016453093,
"eval_recall": 0.5806451612903226,
"eval_runtime": 2.1931,
"eval_samples_per_second": 98.948,
"eval_steps_per_second": 6.384,
"step": 2289
},
{
"epoch": 28.01223241590214,
"grad_norm": 13.196370124816895,
"learning_rate": 3.2921810699588483e-06,
"loss": 0.4523,
"step": 2290
},
{
"epoch": 28.134556574923547,
"grad_norm": 12.735855102539062,
"learning_rate": 3.063557384545039e-06,
"loss": 0.3987,
"step": 2300
},
{
"epoch": 28.256880733944953,
"grad_norm": 13.810506820678711,
"learning_rate": 2.83493369913123e-06,
"loss": 0.4244,
"step": 2310
},
{
"epoch": 28.37920489296636,
"grad_norm": 14.41588306427002,
"learning_rate": 2.6063100137174214e-06,
"loss": 0.4746,
"step": 2320
},
{
"epoch": 28.501529051987767,
"grad_norm": 13.99752426147461,
"learning_rate": 2.3776863283036123e-06,
"loss": 0.4344,
"step": 2330
},
{
"epoch": 28.623853211009173,
"grad_norm": 13.782439231872559,
"learning_rate": 2.1490626428898036e-06,
"loss": 0.4018,
"step": 2340
},
{
"epoch": 28.74617737003058,
"grad_norm": 10.79996109008789,
"learning_rate": 1.920438957475995e-06,
"loss": 0.3779,
"step": 2350
},
{
"epoch": 28.868501529051986,
"grad_norm": 12.254060745239258,
"learning_rate": 1.6918152720621857e-06,
"loss": 0.4671,
"step": 2360
},
{
"epoch": 28.990825688073393,
"grad_norm": 12.211686134338379,
"learning_rate": 1.4631915866483768e-06,
"loss": 0.455,
"step": 2370
},
{
"epoch": 28.990825688073393,
"eval_accuracy": 0.6082949308755761,
"eval_f1": 0.6039494237985392,
"eval_loss": 1.0340049266815186,
"eval_precision": 0.615058251488602,
"eval_recall": 0.6082949308755761,
"eval_runtime": 2.21,
"eval_samples_per_second": 98.191,
"eval_steps_per_second": 6.335,
"step": 2370
},
{
"epoch": 29.1131498470948,
"grad_norm": 12.376998901367188,
"learning_rate": 1.234567901234568e-06,
"loss": 0.405,
"step": 2380
},
{
"epoch": 29.235474006116206,
"grad_norm": 13.317730903625488,
"learning_rate": 1.0059442158207592e-06,
"loss": 0.3908,
"step": 2390
},
{
"epoch": 29.357798165137616,
"grad_norm": 14.004073143005371,
"learning_rate": 7.773205304069502e-07,
"loss": 0.4006,
"step": 2400
},
{
"epoch": 29.480122324159023,
"grad_norm": 11.803933143615723,
"learning_rate": 5.486968449931413e-07,
"loss": 0.4255,
"step": 2410
},
{
"epoch": 29.60244648318043,
"grad_norm": 13.087397575378418,
"learning_rate": 3.200731595793324e-07,
"loss": 0.4354,
"step": 2420
},
{
"epoch": 29.724770642201836,
"grad_norm": 11.753482818603516,
"learning_rate": 9.144947416552355e-08,
"loss": 0.4414,
"step": 2430
},
{
"epoch": 29.724770642201836,
"eval_accuracy": 0.6129032258064516,
"eval_f1": 0.6087354657244137,
"eval_loss": 1.0280050039291382,
"eval_precision": 0.6161055592910982,
"eval_recall": 0.6129032258064516,
"eval_runtime": 2.2362,
"eval_samples_per_second": 97.04,
"eval_steps_per_second": 6.261,
"step": 2430
},
{
"epoch": 29.724770642201836,
"step": 2430,
"total_flos": 1.2301888231484006e+19,
"train_loss": 0.8217329954414211,
"train_runtime": 6060.3586,
"train_samples_per_second": 25.865,
"train_steps_per_second": 0.401
}
],
"logging_steps": 10,
"max_steps": 2430,
"num_input_tokens_seen": 0,
"num_train_epochs": 30,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.2301888231484006e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}