vit-finetune-scrap / trainer_state.json
d071696's picture
Training in progress, step 1000
6d45468 verified
raw
history blame
25.7 kB
{
"best_metric": 0.11155818402767181,
"best_model_checkpoint": "./vit-finetune-scrap/checkpoint-1000",
"epoch": 4.0,
"eval_steps": 1000,
"global_step": 1556,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"grad_norm": 2.721052646636963,
"learning_rate": 0.0001987146529562982,
"loss": 2.3309,
"step": 10
},
{
"epoch": 0.05,
"grad_norm": 2.6302196979522705,
"learning_rate": 0.0001974293059125964,
"loss": 2.1693,
"step": 20
},
{
"epoch": 0.08,
"grad_norm": 3.2131187915802,
"learning_rate": 0.0001961439588688946,
"loss": 1.914,
"step": 30
},
{
"epoch": 0.1,
"grad_norm": 3.520822525024414,
"learning_rate": 0.00019485861182519281,
"loss": 1.6374,
"step": 40
},
{
"epoch": 0.13,
"grad_norm": 4.047000408172607,
"learning_rate": 0.000193573264781491,
"loss": 1.6214,
"step": 50
},
{
"epoch": 0.15,
"grad_norm": 3.1879024505615234,
"learning_rate": 0.0001922879177377892,
"loss": 1.6056,
"step": 60
},
{
"epoch": 0.18,
"grad_norm": 3.4971909523010254,
"learning_rate": 0.00019100257069408743,
"loss": 1.4073,
"step": 70
},
{
"epoch": 0.21,
"grad_norm": 5.953548908233643,
"learning_rate": 0.00018971722365038562,
"loss": 1.2913,
"step": 80
},
{
"epoch": 0.23,
"grad_norm": 3.639462471008301,
"learning_rate": 0.00018843187660668383,
"loss": 1.1548,
"step": 90
},
{
"epoch": 0.26,
"grad_norm": 5.055682182312012,
"learning_rate": 0.00018714652956298202,
"loss": 1.3169,
"step": 100
},
{
"epoch": 0.28,
"grad_norm": 2.787602186203003,
"learning_rate": 0.0001858611825192802,
"loss": 1.2729,
"step": 110
},
{
"epoch": 0.31,
"grad_norm": 5.495873928070068,
"learning_rate": 0.00018457583547557842,
"loss": 1.1431,
"step": 120
},
{
"epoch": 0.33,
"grad_norm": 2.6707160472869873,
"learning_rate": 0.0001832904884318766,
"loss": 1.0606,
"step": 130
},
{
"epoch": 0.36,
"grad_norm": 5.753376483917236,
"learning_rate": 0.00018200514138817483,
"loss": 1.0528,
"step": 140
},
{
"epoch": 0.39,
"grad_norm": 4.965968132019043,
"learning_rate": 0.000180719794344473,
"loss": 1.3918,
"step": 150
},
{
"epoch": 0.41,
"grad_norm": 4.477539539337158,
"learning_rate": 0.0001794344473007712,
"loss": 1.2681,
"step": 160
},
{
"epoch": 0.44,
"grad_norm": 6.264174938201904,
"learning_rate": 0.00017814910025706942,
"loss": 1.0713,
"step": 170
},
{
"epoch": 0.46,
"grad_norm": 3.289985179901123,
"learning_rate": 0.0001768637532133676,
"loss": 1.1408,
"step": 180
},
{
"epoch": 0.49,
"grad_norm": 4.64877986907959,
"learning_rate": 0.00017557840616966582,
"loss": 1.3037,
"step": 190
},
{
"epoch": 0.51,
"grad_norm": 3.4218943119049072,
"learning_rate": 0.000174293059125964,
"loss": 0.9621,
"step": 200
},
{
"epoch": 0.54,
"grad_norm": 5.507615566253662,
"learning_rate": 0.00017300771208226222,
"loss": 1.0243,
"step": 210
},
{
"epoch": 0.57,
"grad_norm": 4.309627532958984,
"learning_rate": 0.00017172236503856043,
"loss": 0.9739,
"step": 220
},
{
"epoch": 0.59,
"grad_norm": 4.056205749511719,
"learning_rate": 0.00017043701799485862,
"loss": 0.9158,
"step": 230
},
{
"epoch": 0.62,
"grad_norm": 3.1590564250946045,
"learning_rate": 0.00016915167095115684,
"loss": 0.8557,
"step": 240
},
{
"epoch": 0.64,
"grad_norm": 1.6367921829223633,
"learning_rate": 0.00016786632390745502,
"loss": 1.0898,
"step": 250
},
{
"epoch": 0.67,
"grad_norm": 5.508506774902344,
"learning_rate": 0.0001665809768637532,
"loss": 1.0173,
"step": 260
},
{
"epoch": 0.69,
"grad_norm": 5.602323532104492,
"learning_rate": 0.00016529562982005143,
"loss": 0.9706,
"step": 270
},
{
"epoch": 0.72,
"grad_norm": 8.27458381652832,
"learning_rate": 0.00016401028277634961,
"loss": 1.1064,
"step": 280
},
{
"epoch": 0.75,
"grad_norm": 3.5698864459991455,
"learning_rate": 0.00016272493573264783,
"loss": 0.9979,
"step": 290
},
{
"epoch": 0.77,
"grad_norm": 5.842220783233643,
"learning_rate": 0.00016143958868894602,
"loss": 1.0221,
"step": 300
},
{
"epoch": 0.8,
"grad_norm": 3.458761692047119,
"learning_rate": 0.00016015424164524423,
"loss": 0.9931,
"step": 310
},
{
"epoch": 0.82,
"grad_norm": 5.971825122833252,
"learning_rate": 0.00015886889460154242,
"loss": 1.1686,
"step": 320
},
{
"epoch": 0.85,
"grad_norm": 5.68731689453125,
"learning_rate": 0.0001575835475578406,
"loss": 0.9805,
"step": 330
},
{
"epoch": 0.87,
"grad_norm": 5.103214263916016,
"learning_rate": 0.00015629820051413882,
"loss": 0.8668,
"step": 340
},
{
"epoch": 0.9,
"grad_norm": 4.177506923675537,
"learning_rate": 0.00015501285347043704,
"loss": 1.1952,
"step": 350
},
{
"epoch": 0.93,
"grad_norm": 1.9655299186706543,
"learning_rate": 0.00015372750642673522,
"loss": 0.8981,
"step": 360
},
{
"epoch": 0.95,
"grad_norm": 4.982448577880859,
"learning_rate": 0.00015244215938303344,
"loss": 0.7721,
"step": 370
},
{
"epoch": 0.98,
"grad_norm": 5.1874775886535645,
"learning_rate": 0.00015115681233933163,
"loss": 0.97,
"step": 380
},
{
"epoch": 1.0,
"grad_norm": 4.942078590393066,
"learning_rate": 0.00014987146529562984,
"loss": 0.8415,
"step": 390
},
{
"epoch": 1.03,
"grad_norm": 3.160961389541626,
"learning_rate": 0.00014858611825192803,
"loss": 0.5367,
"step": 400
},
{
"epoch": 1.05,
"grad_norm": 5.394630432128906,
"learning_rate": 0.00014730077120822622,
"loss": 0.561,
"step": 410
},
{
"epoch": 1.08,
"grad_norm": 2.2095775604248047,
"learning_rate": 0.00014601542416452443,
"loss": 0.5548,
"step": 420
},
{
"epoch": 1.11,
"grad_norm": 4.9532575607299805,
"learning_rate": 0.00014473007712082262,
"loss": 0.6005,
"step": 430
},
{
"epoch": 1.13,
"grad_norm": 5.503066062927246,
"learning_rate": 0.00014344473007712083,
"loss": 0.514,
"step": 440
},
{
"epoch": 1.16,
"grad_norm": 5.952071189880371,
"learning_rate": 0.00014215938303341902,
"loss": 0.5386,
"step": 450
},
{
"epoch": 1.18,
"grad_norm": 4.198409557342529,
"learning_rate": 0.00014087403598971724,
"loss": 0.5937,
"step": 460
},
{
"epoch": 1.21,
"grad_norm": 4.768213272094727,
"learning_rate": 0.00013958868894601542,
"loss": 0.6504,
"step": 470
},
{
"epoch": 1.23,
"grad_norm": 4.068699359893799,
"learning_rate": 0.0001383033419023136,
"loss": 0.421,
"step": 480
},
{
"epoch": 1.26,
"grad_norm": 4.887763500213623,
"learning_rate": 0.00013701799485861185,
"loss": 0.5566,
"step": 490
},
{
"epoch": 1.29,
"grad_norm": 5.322113037109375,
"learning_rate": 0.00013573264781491004,
"loss": 0.514,
"step": 500
},
{
"epoch": 1.31,
"grad_norm": 3.480942726135254,
"learning_rate": 0.00013444730077120823,
"loss": 0.5012,
"step": 510
},
{
"epoch": 1.34,
"grad_norm": 3.2392122745513916,
"learning_rate": 0.00013316195372750644,
"loss": 0.5065,
"step": 520
},
{
"epoch": 1.36,
"grad_norm": 1.8148912191390991,
"learning_rate": 0.00013187660668380463,
"loss": 0.4932,
"step": 530
},
{
"epoch": 1.39,
"grad_norm": 1.9780988693237305,
"learning_rate": 0.00013059125964010284,
"loss": 0.6036,
"step": 540
},
{
"epoch": 1.41,
"grad_norm": 5.625373840332031,
"learning_rate": 0.00012930591259640103,
"loss": 0.564,
"step": 550
},
{
"epoch": 1.44,
"grad_norm": 9.524807929992676,
"learning_rate": 0.00012802056555269925,
"loss": 0.5695,
"step": 560
},
{
"epoch": 1.47,
"grad_norm": 1.463976263999939,
"learning_rate": 0.00012673521850899743,
"loss": 0.3198,
"step": 570
},
{
"epoch": 1.49,
"grad_norm": 6.108857154846191,
"learning_rate": 0.00012544987146529562,
"loss": 0.6759,
"step": 580
},
{
"epoch": 1.52,
"grad_norm": 1.5109316110610962,
"learning_rate": 0.00012416452442159384,
"loss": 0.4468,
"step": 590
},
{
"epoch": 1.54,
"grad_norm": 1.9603605270385742,
"learning_rate": 0.00012287917737789202,
"loss": 0.3569,
"step": 600
},
{
"epoch": 1.57,
"grad_norm": 7.527422904968262,
"learning_rate": 0.00012159383033419023,
"loss": 0.6518,
"step": 610
},
{
"epoch": 1.59,
"grad_norm": 5.3868255615234375,
"learning_rate": 0.00012030848329048843,
"loss": 0.5278,
"step": 620
},
{
"epoch": 1.62,
"grad_norm": 8.257445335388184,
"learning_rate": 0.00011902313624678665,
"loss": 0.6488,
"step": 630
},
{
"epoch": 1.65,
"grad_norm": 8.786994934082031,
"learning_rate": 0.00011773778920308484,
"loss": 0.6637,
"step": 640
},
{
"epoch": 1.67,
"grad_norm": 11.612885475158691,
"learning_rate": 0.00011645244215938304,
"loss": 0.5637,
"step": 650
},
{
"epoch": 1.7,
"grad_norm": 4.953100204467773,
"learning_rate": 0.00011516709511568124,
"loss": 0.3346,
"step": 660
},
{
"epoch": 1.72,
"grad_norm": 8.756507873535156,
"learning_rate": 0.00011388174807197945,
"loss": 0.5318,
"step": 670
},
{
"epoch": 1.75,
"grad_norm": 5.3309760093688965,
"learning_rate": 0.00011259640102827765,
"loss": 0.433,
"step": 680
},
{
"epoch": 1.77,
"grad_norm": 0.4981166422367096,
"learning_rate": 0.00011131105398457585,
"loss": 0.4548,
"step": 690
},
{
"epoch": 1.8,
"grad_norm": 7.036471366882324,
"learning_rate": 0.00011002570694087404,
"loss": 0.6301,
"step": 700
},
{
"epoch": 1.83,
"grad_norm": 5.0402021408081055,
"learning_rate": 0.00010874035989717224,
"loss": 0.6178,
"step": 710
},
{
"epoch": 1.85,
"grad_norm": 0.2094542682170868,
"learning_rate": 0.00010745501285347044,
"loss": 0.3818,
"step": 720
},
{
"epoch": 1.88,
"grad_norm": 5.399072647094727,
"learning_rate": 0.00010616966580976864,
"loss": 0.5482,
"step": 730
},
{
"epoch": 1.9,
"grad_norm": 9.017058372497559,
"learning_rate": 0.00010488431876606684,
"loss": 0.5286,
"step": 740
},
{
"epoch": 1.93,
"grad_norm": 2.5559568405151367,
"learning_rate": 0.00010359897172236503,
"loss": 0.4894,
"step": 750
},
{
"epoch": 1.95,
"grad_norm": 1.3460350036621094,
"learning_rate": 0.00010231362467866323,
"loss": 0.369,
"step": 760
},
{
"epoch": 1.98,
"grad_norm": 0.5879113078117371,
"learning_rate": 0.00010102827763496146,
"loss": 0.3088,
"step": 770
},
{
"epoch": 2.01,
"grad_norm": 1.1561224460601807,
"learning_rate": 9.974293059125965e-05,
"loss": 0.573,
"step": 780
},
{
"epoch": 2.03,
"grad_norm": 2.361337900161743,
"learning_rate": 9.845758354755785e-05,
"loss": 0.1348,
"step": 790
},
{
"epoch": 2.06,
"grad_norm": 2.3323395252227783,
"learning_rate": 9.717223650385605e-05,
"loss": 0.1292,
"step": 800
},
{
"epoch": 2.08,
"grad_norm": 0.5499300956726074,
"learning_rate": 9.588688946015425e-05,
"loss": 0.1817,
"step": 810
},
{
"epoch": 2.11,
"grad_norm": 0.2054494023323059,
"learning_rate": 9.460154241645245e-05,
"loss": 0.2232,
"step": 820
},
{
"epoch": 2.13,
"grad_norm": 2.15979266166687,
"learning_rate": 9.331619537275065e-05,
"loss": 0.2153,
"step": 830
},
{
"epoch": 2.16,
"grad_norm": 3.1036410331726074,
"learning_rate": 9.203084832904885e-05,
"loss": 0.1692,
"step": 840
},
{
"epoch": 2.19,
"grad_norm": 2.084644317626953,
"learning_rate": 9.074550128534704e-05,
"loss": 0.2034,
"step": 850
},
{
"epoch": 2.21,
"grad_norm": 2.1689724922180176,
"learning_rate": 8.946015424164524e-05,
"loss": 0.2217,
"step": 860
},
{
"epoch": 2.24,
"grad_norm": 1.0331225395202637,
"learning_rate": 8.817480719794346e-05,
"loss": 0.1232,
"step": 870
},
{
"epoch": 2.26,
"grad_norm": 3.129354953765869,
"learning_rate": 8.688946015424166e-05,
"loss": 0.1363,
"step": 880
},
{
"epoch": 2.29,
"grad_norm": 0.653751015663147,
"learning_rate": 8.560411311053986e-05,
"loss": 0.1845,
"step": 890
},
{
"epoch": 2.31,
"grad_norm": 0.20718339085578918,
"learning_rate": 8.431876606683805e-05,
"loss": 0.1638,
"step": 900
},
{
"epoch": 2.34,
"grad_norm": 5.0227274894714355,
"learning_rate": 8.303341902313625e-05,
"loss": 0.1423,
"step": 910
},
{
"epoch": 2.37,
"grad_norm": 0.7187924385070801,
"learning_rate": 8.174807197943445e-05,
"loss": 0.1702,
"step": 920
},
{
"epoch": 2.39,
"grad_norm": 1.2977266311645508,
"learning_rate": 8.046272493573265e-05,
"loss": 0.1354,
"step": 930
},
{
"epoch": 2.42,
"grad_norm": 2.3543667793273926,
"learning_rate": 7.917737789203086e-05,
"loss": 0.2209,
"step": 940
},
{
"epoch": 2.44,
"grad_norm": 0.8430781364440918,
"learning_rate": 7.789203084832905e-05,
"loss": 0.1487,
"step": 950
},
{
"epoch": 2.47,
"grad_norm": 0.08762349933385849,
"learning_rate": 7.660668380462725e-05,
"loss": 0.1038,
"step": 960
},
{
"epoch": 2.49,
"grad_norm": 8.408522605895996,
"learning_rate": 7.532133676092545e-05,
"loss": 0.2402,
"step": 970
},
{
"epoch": 2.52,
"grad_norm": 1.173913836479187,
"learning_rate": 7.403598971722365e-05,
"loss": 0.0641,
"step": 980
},
{
"epoch": 2.54,
"grad_norm": 7.908231735229492,
"learning_rate": 7.275064267352186e-05,
"loss": 0.2347,
"step": 990
},
{
"epoch": 2.57,
"grad_norm": 3.18058180809021,
"learning_rate": 7.146529562982006e-05,
"loss": 0.1326,
"step": 1000
},
{
"epoch": 2.57,
"eval_accuracy": 0.9694238815577728,
"eval_loss": 0.11155818402767181,
"eval_runtime": 52.5851,
"eval_samples_per_second": 59.085,
"eval_steps_per_second": 7.398,
"step": 1000
},
{
"epoch": 2.6,
"grad_norm": 0.4821953773498535,
"learning_rate": 7.017994858611826e-05,
"loss": 0.258,
"step": 1010
},
{
"epoch": 2.62,
"grad_norm": 4.647073268890381,
"learning_rate": 6.889460154241646e-05,
"loss": 0.1106,
"step": 1020
},
{
"epoch": 2.65,
"grad_norm": 0.07687141746282578,
"learning_rate": 6.760925449871466e-05,
"loss": 0.1768,
"step": 1030
},
{
"epoch": 2.67,
"grad_norm": 0.8537989854812622,
"learning_rate": 6.632390745501286e-05,
"loss": 0.1321,
"step": 1040
},
{
"epoch": 2.7,
"grad_norm": 1.6428909301757812,
"learning_rate": 6.503856041131106e-05,
"loss": 0.2679,
"step": 1050
},
{
"epoch": 2.72,
"grad_norm": 0.4707659184932709,
"learning_rate": 6.375321336760925e-05,
"loss": 0.192,
"step": 1060
},
{
"epoch": 2.75,
"grad_norm": 0.09739229083061218,
"learning_rate": 6.246786632390745e-05,
"loss": 0.2501,
"step": 1070
},
{
"epoch": 2.78,
"grad_norm": 2.0249221324920654,
"learning_rate": 6.118251928020567e-05,
"loss": 0.1988,
"step": 1080
},
{
"epoch": 2.8,
"grad_norm": 0.08042796701192856,
"learning_rate": 5.989717223650386e-05,
"loss": 0.04,
"step": 1090
},
{
"epoch": 2.83,
"grad_norm": 0.40489840507507324,
"learning_rate": 5.861182519280206e-05,
"loss": 0.1326,
"step": 1100
},
{
"epoch": 2.85,
"grad_norm": 8.32421875,
"learning_rate": 5.732647814910026e-05,
"loss": 0.1881,
"step": 1110
},
{
"epoch": 2.88,
"grad_norm": 0.3356345295906067,
"learning_rate": 5.604113110539846e-05,
"loss": 0.1638,
"step": 1120
},
{
"epoch": 2.9,
"grad_norm": 2.0262017250061035,
"learning_rate": 5.475578406169666e-05,
"loss": 0.0901,
"step": 1130
},
{
"epoch": 2.93,
"grad_norm": 5.13381290435791,
"learning_rate": 5.347043701799486e-05,
"loss": 0.1947,
"step": 1140
},
{
"epoch": 2.96,
"grad_norm": 4.401228904724121,
"learning_rate": 5.218508997429307e-05,
"loss": 0.1105,
"step": 1150
},
{
"epoch": 2.98,
"grad_norm": 3.711754083633423,
"learning_rate": 5.089974293059127e-05,
"loss": 0.082,
"step": 1160
},
{
"epoch": 3.01,
"grad_norm": 0.4783603847026825,
"learning_rate": 4.961439588688946e-05,
"loss": 0.1223,
"step": 1170
},
{
"epoch": 3.03,
"grad_norm": 6.101786136627197,
"learning_rate": 4.8329048843187664e-05,
"loss": 0.0386,
"step": 1180
},
{
"epoch": 3.06,
"grad_norm": 0.09219735115766525,
"learning_rate": 4.7043701799485865e-05,
"loss": 0.046,
"step": 1190
},
{
"epoch": 3.08,
"grad_norm": 0.09228511899709702,
"learning_rate": 4.5758354755784066e-05,
"loss": 0.0179,
"step": 1200
},
{
"epoch": 3.11,
"grad_norm": 0.06705611199140549,
"learning_rate": 4.447300771208227e-05,
"loss": 0.0401,
"step": 1210
},
{
"epoch": 3.14,
"grad_norm": 0.05702489614486694,
"learning_rate": 4.318766066838046e-05,
"loss": 0.0222,
"step": 1220
},
{
"epoch": 3.16,
"grad_norm": 0.051934726536273956,
"learning_rate": 4.190231362467866e-05,
"loss": 0.0249,
"step": 1230
},
{
"epoch": 3.19,
"grad_norm": 0.05382351949810982,
"learning_rate": 4.0616966580976864e-05,
"loss": 0.017,
"step": 1240
},
{
"epoch": 3.21,
"grad_norm": 0.10244094580411911,
"learning_rate": 3.9331619537275065e-05,
"loss": 0.1425,
"step": 1250
},
{
"epoch": 3.24,
"grad_norm": 0.04559057578444481,
"learning_rate": 3.8046272493573266e-05,
"loss": 0.0188,
"step": 1260
},
{
"epoch": 3.26,
"grad_norm": 1.9016327857971191,
"learning_rate": 3.676092544987147e-05,
"loss": 0.0196,
"step": 1270
},
{
"epoch": 3.29,
"grad_norm": 0.06497751176357269,
"learning_rate": 3.547557840616967e-05,
"loss": 0.0161,
"step": 1280
},
{
"epoch": 3.32,
"grad_norm": 0.05229075625538826,
"learning_rate": 3.419023136246787e-05,
"loss": 0.0165,
"step": 1290
},
{
"epoch": 3.34,
"grad_norm": 0.04599655419588089,
"learning_rate": 3.2904884318766064e-05,
"loss": 0.0338,
"step": 1300
},
{
"epoch": 3.37,
"grad_norm": 0.054148729890584946,
"learning_rate": 3.161953727506427e-05,
"loss": 0.0401,
"step": 1310
},
{
"epoch": 3.39,
"grad_norm": 0.135112926363945,
"learning_rate": 3.033419023136247e-05,
"loss": 0.0386,
"step": 1320
},
{
"epoch": 3.42,
"grad_norm": 0.05881468951702118,
"learning_rate": 2.9048843187660668e-05,
"loss": 0.0526,
"step": 1330
},
{
"epoch": 3.44,
"grad_norm": 0.11401781439781189,
"learning_rate": 2.7763496143958872e-05,
"loss": 0.0652,
"step": 1340
},
{
"epoch": 3.47,
"grad_norm": 0.6476575136184692,
"learning_rate": 2.647814910025707e-05,
"loss": 0.0772,
"step": 1350
},
{
"epoch": 3.5,
"grad_norm": 0.0521862767636776,
"learning_rate": 2.519280205655527e-05,
"loss": 0.0166,
"step": 1360
},
{
"epoch": 3.52,
"grad_norm": 0.05607061833143234,
"learning_rate": 2.3907455012853472e-05,
"loss": 0.0138,
"step": 1370
},
{
"epoch": 3.55,
"grad_norm": 0.05825699120759964,
"learning_rate": 2.262210796915167e-05,
"loss": 0.015,
"step": 1380
},
{
"epoch": 3.57,
"grad_norm": 4.6812334060668945,
"learning_rate": 2.133676092544987e-05,
"loss": 0.1053,
"step": 1390
},
{
"epoch": 3.6,
"grad_norm": 0.6198139786720276,
"learning_rate": 2.0051413881748076e-05,
"loss": 0.0178,
"step": 1400
},
{
"epoch": 3.62,
"grad_norm": 0.05886732041835785,
"learning_rate": 1.8766066838046273e-05,
"loss": 0.0278,
"step": 1410
},
{
"epoch": 3.65,
"grad_norm": 0.673959493637085,
"learning_rate": 1.7480719794344475e-05,
"loss": 0.0769,
"step": 1420
},
{
"epoch": 3.68,
"grad_norm": 0.31164562702178955,
"learning_rate": 1.6195372750642672e-05,
"loss": 0.1499,
"step": 1430
},
{
"epoch": 3.7,
"grad_norm": 0.2713916599750519,
"learning_rate": 1.4910025706940875e-05,
"loss": 0.0444,
"step": 1440
},
{
"epoch": 3.73,
"grad_norm": 2.0257036685943604,
"learning_rate": 1.3624678663239075e-05,
"loss": 0.071,
"step": 1450
},
{
"epoch": 3.75,
"grad_norm": 0.24306029081344604,
"learning_rate": 1.2339331619537276e-05,
"loss": 0.0114,
"step": 1460
},
{
"epoch": 3.78,
"grad_norm": 0.2247108817100525,
"learning_rate": 1.1053984575835475e-05,
"loss": 0.0132,
"step": 1470
},
{
"epoch": 3.8,
"grad_norm": 0.056268274784088135,
"learning_rate": 9.768637532133676e-06,
"loss": 0.0827,
"step": 1480
},
{
"epoch": 3.83,
"grad_norm": 0.04434029012918472,
"learning_rate": 8.483290488431877e-06,
"loss": 0.0138,
"step": 1490
},
{
"epoch": 3.86,
"grad_norm": 0.06419169157743454,
"learning_rate": 7.197943444730078e-06,
"loss": 0.077,
"step": 1500
},
{
"epoch": 3.88,
"grad_norm": 0.11697979271411896,
"learning_rate": 5.912596401028278e-06,
"loss": 0.0206,
"step": 1510
},
{
"epoch": 3.91,
"grad_norm": 1.25772225856781,
"learning_rate": 4.627249357326478e-06,
"loss": 0.02,
"step": 1520
},
{
"epoch": 3.93,
"grad_norm": 0.12491010874509811,
"learning_rate": 3.3419023136246787e-06,
"loss": 0.0257,
"step": 1530
},
{
"epoch": 3.96,
"grad_norm": 3.4478936195373535,
"learning_rate": 2.056555269922879e-06,
"loss": 0.0366,
"step": 1540
},
{
"epoch": 3.98,
"grad_norm": 0.0410100519657135,
"learning_rate": 7.712082262210797e-07,
"loss": 0.0525,
"step": 1550
},
{
"epoch": 4.0,
"step": 1556,
"total_flos": 9.63148132192297e+17,
"train_loss": 0.48303211886426173,
"train_runtime": 1041.8456,
"train_samples_per_second": 11.929,
"train_steps_per_second": 1.494
}
],
"logging_steps": 10,
"max_steps": 1556,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 1000,
"total_flos": 9.63148132192297e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}