vit-weldclassifyv2 / trainer_state.json
th041's picture
🍻 cheers
f28a27a verified
raw
history blame
41.2 kB
{
"best_metric": 0.46125656366348267,
"best_model_checkpoint": "vit-weldclassifyv2/checkpoint-700",
"epoch": 13.0,
"eval_steps": 100,
"global_step": 2028,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0641025641025641,
"grad_norm": 1.8029550313949585,
"learning_rate": 0.0001990138067061144,
"loss": 1.1982,
"step": 10
},
{
"epoch": 0.1282051282051282,
"grad_norm": 2.130356788635254,
"learning_rate": 0.0001980276134122288,
"loss": 1.2077,
"step": 20
},
{
"epoch": 0.19230769230769232,
"grad_norm": 0.5039390921592712,
"learning_rate": 0.0001970414201183432,
"loss": 1.1596,
"step": 30
},
{
"epoch": 0.2564102564102564,
"grad_norm": 1.2507092952728271,
"learning_rate": 0.0001960552268244576,
"loss": 1.1413,
"step": 40
},
{
"epoch": 0.32051282051282054,
"grad_norm": 1.2734973430633545,
"learning_rate": 0.000195069033530572,
"loss": 1.2107,
"step": 50
},
{
"epoch": 0.38461538461538464,
"grad_norm": 1.3238846063613892,
"learning_rate": 0.0001940828402366864,
"loss": 1.1462,
"step": 60
},
{
"epoch": 0.44871794871794873,
"grad_norm": 3.4750142097473145,
"learning_rate": 0.0001930966469428008,
"loss": 1.0551,
"step": 70
},
{
"epoch": 0.5128205128205128,
"grad_norm": 1.8171255588531494,
"learning_rate": 0.0001921104536489152,
"loss": 1.0318,
"step": 80
},
{
"epoch": 0.5769230769230769,
"grad_norm": 1.7789899110794067,
"learning_rate": 0.0001911242603550296,
"loss": 0.991,
"step": 90
},
{
"epoch": 0.6410256410256411,
"grad_norm": 2.315300464630127,
"learning_rate": 0.00019013806706114398,
"loss": 1.035,
"step": 100
},
{
"epoch": 0.6410256410256411,
"eval_accuracy": 0.4028776978417266,
"eval_loss": 1.1331541538238525,
"eval_runtime": 2.3815,
"eval_samples_per_second": 116.735,
"eval_steps_per_second": 14.697,
"step": 100
},
{
"epoch": 0.7051282051282052,
"grad_norm": 2.649456262588501,
"learning_rate": 0.00018915187376725837,
"loss": 1.077,
"step": 110
},
{
"epoch": 0.7692307692307693,
"grad_norm": 1.4918590784072876,
"learning_rate": 0.00018816568047337278,
"loss": 0.8772,
"step": 120
},
{
"epoch": 0.8333333333333334,
"grad_norm": 4.240333557128906,
"learning_rate": 0.0001871794871794872,
"loss": 0.892,
"step": 130
},
{
"epoch": 0.8974358974358975,
"grad_norm": 2.2014944553375244,
"learning_rate": 0.0001861932938856016,
"loss": 0.9662,
"step": 140
},
{
"epoch": 0.9615384615384616,
"grad_norm": 2.9727776050567627,
"learning_rate": 0.00018520710059171598,
"loss": 0.9294,
"step": 150
},
{
"epoch": 1.0256410256410255,
"grad_norm": 2.4391555786132812,
"learning_rate": 0.0001842209072978304,
"loss": 0.9048,
"step": 160
},
{
"epoch": 1.0897435897435896,
"grad_norm": 2.942401170730591,
"learning_rate": 0.00018323471400394478,
"loss": 0.868,
"step": 170
},
{
"epoch": 1.1538461538461537,
"grad_norm": 2.937878131866455,
"learning_rate": 0.00018224852071005917,
"loss": 0.8108,
"step": 180
},
{
"epoch": 1.217948717948718,
"grad_norm": 1.754459261894226,
"learning_rate": 0.00018126232741617356,
"loss": 0.8036,
"step": 190
},
{
"epoch": 1.282051282051282,
"grad_norm": 2.544161319732666,
"learning_rate": 0.00018027613412228798,
"loss": 0.6893,
"step": 200
},
{
"epoch": 1.282051282051282,
"eval_accuracy": 0.6654676258992805,
"eval_loss": 0.734099268913269,
"eval_runtime": 2.2653,
"eval_samples_per_second": 122.719,
"eval_steps_per_second": 15.45,
"step": 200
},
{
"epoch": 1.3461538461538463,
"grad_norm": 3.5553886890411377,
"learning_rate": 0.0001792899408284024,
"loss": 0.6454,
"step": 210
},
{
"epoch": 1.4102564102564101,
"grad_norm": 2.9865598678588867,
"learning_rate": 0.00017830374753451678,
"loss": 0.7549,
"step": 220
},
{
"epoch": 1.4743589743589745,
"grad_norm": 2.5597012042999268,
"learning_rate": 0.00017731755424063117,
"loss": 0.658,
"step": 230
},
{
"epoch": 1.5384615384615383,
"grad_norm": 1.5761655569076538,
"learning_rate": 0.00017633136094674556,
"loss": 0.6302,
"step": 240
},
{
"epoch": 1.6025641025641026,
"grad_norm": 2.0157439708709717,
"learning_rate": 0.00017534516765285997,
"loss": 0.6723,
"step": 250
},
{
"epoch": 1.6666666666666665,
"grad_norm": 2.5978243350982666,
"learning_rate": 0.00017435897435897436,
"loss": 0.6001,
"step": 260
},
{
"epoch": 1.7307692307692308,
"grad_norm": 1.687144160270691,
"learning_rate": 0.00017337278106508875,
"loss": 0.5901,
"step": 270
},
{
"epoch": 1.7948717948717947,
"grad_norm": 3.0179696083068848,
"learning_rate": 0.00017238658777120317,
"loss": 0.8432,
"step": 280
},
{
"epoch": 1.858974358974359,
"grad_norm": 2.943056344985962,
"learning_rate": 0.00017140039447731758,
"loss": 0.5568,
"step": 290
},
{
"epoch": 1.9230769230769231,
"grad_norm": 4.608463764190674,
"learning_rate": 0.00017041420118343197,
"loss": 0.5618,
"step": 300
},
{
"epoch": 1.9230769230769231,
"eval_accuracy": 0.7553956834532374,
"eval_loss": 0.559623658657074,
"eval_runtime": 2.7527,
"eval_samples_per_second": 100.99,
"eval_steps_per_second": 12.715,
"step": 300
},
{
"epoch": 1.9871794871794872,
"grad_norm": 2.1685523986816406,
"learning_rate": 0.00016942800788954636,
"loss": 0.6462,
"step": 310
},
{
"epoch": 2.051282051282051,
"grad_norm": 2.486382484436035,
"learning_rate": 0.00016844181459566075,
"loss": 0.6036,
"step": 320
},
{
"epoch": 2.1153846153846154,
"grad_norm": 3.2706494331359863,
"learning_rate": 0.00016745562130177514,
"loss": 0.5013,
"step": 330
},
{
"epoch": 2.1794871794871793,
"grad_norm": 3.133267641067505,
"learning_rate": 0.00016646942800788956,
"loss": 0.4468,
"step": 340
},
{
"epoch": 2.2435897435897436,
"grad_norm": 4.209103584289551,
"learning_rate": 0.00016548323471400394,
"loss": 0.2345,
"step": 350
},
{
"epoch": 2.3076923076923075,
"grad_norm": 4.08372163772583,
"learning_rate": 0.00016449704142011836,
"loss": 0.3699,
"step": 360
},
{
"epoch": 2.371794871794872,
"grad_norm": 4.698208808898926,
"learning_rate": 0.00016351084812623275,
"loss": 0.4143,
"step": 370
},
{
"epoch": 2.435897435897436,
"grad_norm": 1.3208674192428589,
"learning_rate": 0.00016252465483234716,
"loss": 0.4155,
"step": 380
},
{
"epoch": 2.5,
"grad_norm": 3.844871759414673,
"learning_rate": 0.00016153846153846155,
"loss": 0.4743,
"step": 390
},
{
"epoch": 2.564102564102564,
"grad_norm": 4.011849880218506,
"learning_rate": 0.00016055226824457594,
"loss": 0.4344,
"step": 400
},
{
"epoch": 2.564102564102564,
"eval_accuracy": 0.7769784172661871,
"eval_loss": 0.5951021909713745,
"eval_runtime": 2.2481,
"eval_samples_per_second": 123.658,
"eval_steps_per_second": 15.568,
"step": 400
},
{
"epoch": 2.628205128205128,
"grad_norm": 2.3567044734954834,
"learning_rate": 0.00015956607495069033,
"loss": 0.3425,
"step": 410
},
{
"epoch": 2.6923076923076925,
"grad_norm": 2.8400659561157227,
"learning_rate": 0.00015857988165680475,
"loss": 0.3185,
"step": 420
},
{
"epoch": 2.7564102564102564,
"grad_norm": 2.8755509853363037,
"learning_rate": 0.00015759368836291914,
"loss": 0.3558,
"step": 430
},
{
"epoch": 2.8205128205128203,
"grad_norm": 1.4690508842468262,
"learning_rate": 0.00015660749506903355,
"loss": 0.2615,
"step": 440
},
{
"epoch": 2.8846153846153846,
"grad_norm": 1.9505876302719116,
"learning_rate": 0.00015562130177514794,
"loss": 0.4032,
"step": 450
},
{
"epoch": 2.948717948717949,
"grad_norm": 3.414158821105957,
"learning_rate": 0.00015463510848126233,
"loss": 0.3802,
"step": 460
},
{
"epoch": 3.0128205128205128,
"grad_norm": 1.8766813278198242,
"learning_rate": 0.00015364891518737675,
"loss": 0.3009,
"step": 470
},
{
"epoch": 3.076923076923077,
"grad_norm": 4.2605977058410645,
"learning_rate": 0.00015266272189349113,
"loss": 0.1754,
"step": 480
},
{
"epoch": 3.141025641025641,
"grad_norm": 1.4261757135391235,
"learning_rate": 0.00015167652859960552,
"loss": 0.2581,
"step": 490
},
{
"epoch": 3.2051282051282053,
"grad_norm": 4.654241561889648,
"learning_rate": 0.0001506903353057199,
"loss": 0.1591,
"step": 500
},
{
"epoch": 3.2051282051282053,
"eval_accuracy": 0.8453237410071942,
"eval_loss": 0.4667339622974396,
"eval_runtime": 2.34,
"eval_samples_per_second": 118.802,
"eval_steps_per_second": 14.957,
"step": 500
},
{
"epoch": 3.269230769230769,
"grad_norm": 8.496054649353027,
"learning_rate": 0.00014970414201183433,
"loss": 0.2108,
"step": 510
},
{
"epoch": 3.3333333333333335,
"grad_norm": 6.387451648712158,
"learning_rate": 0.00014871794871794872,
"loss": 0.2114,
"step": 520
},
{
"epoch": 3.3974358974358974,
"grad_norm": 4.0780534744262695,
"learning_rate": 0.00014773175542406313,
"loss": 0.1414,
"step": 530
},
{
"epoch": 3.4615384615384617,
"grad_norm": 4.167177200317383,
"learning_rate": 0.00014674556213017752,
"loss": 0.1957,
"step": 540
},
{
"epoch": 3.5256410256410255,
"grad_norm": 2.674177646636963,
"learning_rate": 0.0001457593688362919,
"loss": 0.2123,
"step": 550
},
{
"epoch": 3.58974358974359,
"grad_norm": 0.5835099816322327,
"learning_rate": 0.00014477317554240633,
"loss": 0.2326,
"step": 560
},
{
"epoch": 3.6538461538461537,
"grad_norm": 0.5650436878204346,
"learning_rate": 0.00014378698224852072,
"loss": 0.0843,
"step": 570
},
{
"epoch": 3.717948717948718,
"grad_norm": 0.2847900092601776,
"learning_rate": 0.0001428007889546351,
"loss": 0.1814,
"step": 580
},
{
"epoch": 3.782051282051282,
"grad_norm": 0.19226200878620148,
"learning_rate": 0.0001418145956607495,
"loss": 0.0547,
"step": 590
},
{
"epoch": 3.8461538461538463,
"grad_norm": 4.472414970397949,
"learning_rate": 0.0001408284023668639,
"loss": 0.1821,
"step": 600
},
{
"epoch": 3.8461538461538463,
"eval_accuracy": 0.8345323741007195,
"eval_loss": 0.5082367658615112,
"eval_runtime": 3.1963,
"eval_samples_per_second": 86.976,
"eval_steps_per_second": 10.95,
"step": 600
},
{
"epoch": 3.91025641025641,
"grad_norm": 1.0175515413284302,
"learning_rate": 0.00013984220907297832,
"loss": 0.1435,
"step": 610
},
{
"epoch": 3.9743589743589745,
"grad_norm": 10.650223731994629,
"learning_rate": 0.0001388560157790927,
"loss": 0.208,
"step": 620
},
{
"epoch": 4.038461538461538,
"grad_norm": 1.7321878671646118,
"learning_rate": 0.0001378698224852071,
"loss": 0.1781,
"step": 630
},
{
"epoch": 4.102564102564102,
"grad_norm": 0.2047787755727768,
"learning_rate": 0.00013688362919132152,
"loss": 0.1453,
"step": 640
},
{
"epoch": 4.166666666666667,
"grad_norm": 0.09600222110748291,
"learning_rate": 0.0001358974358974359,
"loss": 0.1117,
"step": 650
},
{
"epoch": 4.230769230769231,
"grad_norm": 4.425860404968262,
"learning_rate": 0.0001349112426035503,
"loss": 0.1983,
"step": 660
},
{
"epoch": 4.294871794871795,
"grad_norm": 5.773613452911377,
"learning_rate": 0.00013392504930966468,
"loss": 0.1013,
"step": 670
},
{
"epoch": 4.358974358974359,
"grad_norm": 15.129050254821777,
"learning_rate": 0.0001329388560157791,
"loss": 0.1176,
"step": 680
},
{
"epoch": 4.423076923076923,
"grad_norm": 8.11106014251709,
"learning_rate": 0.00013195266272189352,
"loss": 0.09,
"step": 690
},
{
"epoch": 4.487179487179487,
"grad_norm": 8.092696189880371,
"learning_rate": 0.0001309664694280079,
"loss": 0.0811,
"step": 700
},
{
"epoch": 4.487179487179487,
"eval_accuracy": 0.8633093525179856,
"eval_loss": 0.46125656366348267,
"eval_runtime": 2.3167,
"eval_samples_per_second": 120.0,
"eval_steps_per_second": 15.108,
"step": 700
},
{
"epoch": 4.551282051282051,
"grad_norm": 0.07582438737154007,
"learning_rate": 0.0001299802761341223,
"loss": 0.0288,
"step": 710
},
{
"epoch": 4.615384615384615,
"grad_norm": 0.4562825858592987,
"learning_rate": 0.00012899408284023668,
"loss": 0.0675,
"step": 720
},
{
"epoch": 4.67948717948718,
"grad_norm": 8.025144577026367,
"learning_rate": 0.0001280078895463511,
"loss": 0.1006,
"step": 730
},
{
"epoch": 4.743589743589744,
"grad_norm": 0.06422187387943268,
"learning_rate": 0.0001270216962524655,
"loss": 0.1304,
"step": 740
},
{
"epoch": 4.8076923076923075,
"grad_norm": 9.110365867614746,
"learning_rate": 0.00012603550295857988,
"loss": 0.1603,
"step": 750
},
{
"epoch": 4.871794871794872,
"grad_norm": 0.26326459646224976,
"learning_rate": 0.0001250493096646943,
"loss": 0.0867,
"step": 760
},
{
"epoch": 4.935897435897436,
"grad_norm": 11.61680793762207,
"learning_rate": 0.00012406311637080868,
"loss": 0.2536,
"step": 770
},
{
"epoch": 5.0,
"grad_norm": 7.968641757965088,
"learning_rate": 0.0001230769230769231,
"loss": 0.3829,
"step": 780
},
{
"epoch": 5.064102564102564,
"grad_norm": 1.6801921129226685,
"learning_rate": 0.00012209072978303749,
"loss": 0.1797,
"step": 790
},
{
"epoch": 5.128205128205128,
"grad_norm": 6.869860649108887,
"learning_rate": 0.00012110453648915188,
"loss": 0.1729,
"step": 800
},
{
"epoch": 5.128205128205128,
"eval_accuracy": 0.7985611510791367,
"eval_loss": 0.6382251977920532,
"eval_runtime": 2.3271,
"eval_samples_per_second": 119.461,
"eval_steps_per_second": 15.04,
"step": 800
},
{
"epoch": 5.1923076923076925,
"grad_norm": 3.878455638885498,
"learning_rate": 0.00012011834319526626,
"loss": 0.0561,
"step": 810
},
{
"epoch": 5.256410256410256,
"grad_norm": 2.88934588432312,
"learning_rate": 0.00011913214990138068,
"loss": 0.0457,
"step": 820
},
{
"epoch": 5.32051282051282,
"grad_norm": 0.9121398329734802,
"learning_rate": 0.00011814595660749508,
"loss": 0.0338,
"step": 830
},
{
"epoch": 5.384615384615385,
"grad_norm": 0.2235652208328247,
"learning_rate": 0.00011715976331360947,
"loss": 0.0263,
"step": 840
},
{
"epoch": 5.448717948717949,
"grad_norm": 3.4060192108154297,
"learning_rate": 0.00011617357001972386,
"loss": 0.1453,
"step": 850
},
{
"epoch": 5.512820512820513,
"grad_norm": 4.755865573883057,
"learning_rate": 0.00011518737672583828,
"loss": 0.056,
"step": 860
},
{
"epoch": 5.576923076923077,
"grad_norm": 0.14291535317897797,
"learning_rate": 0.00011420118343195268,
"loss": 0.1059,
"step": 870
},
{
"epoch": 5.641025641025641,
"grad_norm": 0.09121762961149216,
"learning_rate": 0.00011321499013806707,
"loss": 0.1008,
"step": 880
},
{
"epoch": 5.705128205128205,
"grad_norm": 0.2447558492422104,
"learning_rate": 0.00011222879684418146,
"loss": 0.0654,
"step": 890
},
{
"epoch": 5.769230769230769,
"grad_norm": 6.003875255584717,
"learning_rate": 0.00011124260355029586,
"loss": 0.1174,
"step": 900
},
{
"epoch": 5.769230769230769,
"eval_accuracy": 0.8669064748201439,
"eval_loss": 0.4974174201488495,
"eval_runtime": 2.866,
"eval_samples_per_second": 96.999,
"eval_steps_per_second": 12.212,
"step": 900
},
{
"epoch": 5.833333333333333,
"grad_norm": 1.183342695236206,
"learning_rate": 0.00011025641025641027,
"loss": 0.0889,
"step": 910
},
{
"epoch": 5.897435897435898,
"grad_norm": 4.268181324005127,
"learning_rate": 0.00010927021696252466,
"loss": 0.0612,
"step": 920
},
{
"epoch": 5.961538461538462,
"grad_norm": 0.4750482439994812,
"learning_rate": 0.00010828402366863905,
"loss": 0.0288,
"step": 930
},
{
"epoch": 6.0256410256410255,
"grad_norm": 0.04578419402241707,
"learning_rate": 0.00010729783037475345,
"loss": 0.056,
"step": 940
},
{
"epoch": 6.089743589743589,
"grad_norm": 0.03350140154361725,
"learning_rate": 0.00010631163708086787,
"loss": 0.0399,
"step": 950
},
{
"epoch": 6.153846153846154,
"grad_norm": 0.07433091104030609,
"learning_rate": 0.00010532544378698226,
"loss": 0.0111,
"step": 960
},
{
"epoch": 6.217948717948718,
"grad_norm": 0.16331972181797028,
"learning_rate": 0.00010433925049309665,
"loss": 0.0594,
"step": 970
},
{
"epoch": 6.282051282051282,
"grad_norm": 0.04476318508386612,
"learning_rate": 0.00010335305719921105,
"loss": 0.1227,
"step": 980
},
{
"epoch": 6.346153846153846,
"grad_norm": 0.07675521820783615,
"learning_rate": 0.00010236686390532544,
"loss": 0.0712,
"step": 990
},
{
"epoch": 6.410256410256411,
"grad_norm": 0.6457383632659912,
"learning_rate": 0.00010138067061143986,
"loss": 0.0389,
"step": 1000
},
{
"epoch": 6.410256410256411,
"eval_accuracy": 0.8453237410071942,
"eval_loss": 0.6048529148101807,
"eval_runtime": 2.3412,
"eval_samples_per_second": 118.744,
"eval_steps_per_second": 14.95,
"step": 1000
},
{
"epoch": 6.4743589743589745,
"grad_norm": 0.028587404638528824,
"learning_rate": 0.00010039447731755424,
"loss": 0.0797,
"step": 1010
},
{
"epoch": 6.538461538461538,
"grad_norm": 0.4266931712627411,
"learning_rate": 9.940828402366865e-05,
"loss": 0.0073,
"step": 1020
},
{
"epoch": 6.602564102564102,
"grad_norm": 0.034420862793922424,
"learning_rate": 9.842209072978305e-05,
"loss": 0.0094,
"step": 1030
},
{
"epoch": 6.666666666666667,
"grad_norm": 0.03525734320282936,
"learning_rate": 9.743589743589744e-05,
"loss": 0.0773,
"step": 1040
},
{
"epoch": 6.730769230769231,
"grad_norm": 0.03397082909941673,
"learning_rate": 9.644970414201184e-05,
"loss": 0.0599,
"step": 1050
},
{
"epoch": 6.794871794871795,
"grad_norm": 0.3723975419998169,
"learning_rate": 9.546351084812624e-05,
"loss": 0.0285,
"step": 1060
},
{
"epoch": 6.858974358974359,
"grad_norm": 8.852377891540527,
"learning_rate": 9.447731755424064e-05,
"loss": 0.0417,
"step": 1070
},
{
"epoch": 6.923076923076923,
"grad_norm": 0.05648146569728851,
"learning_rate": 9.349112426035503e-05,
"loss": 0.0058,
"step": 1080
},
{
"epoch": 6.987179487179487,
"grad_norm": 0.5381138920783997,
"learning_rate": 9.250493096646942e-05,
"loss": 0.0398,
"step": 1090
},
{
"epoch": 7.051282051282051,
"grad_norm": 0.7571529746055603,
"learning_rate": 9.151873767258384e-05,
"loss": 0.0099,
"step": 1100
},
{
"epoch": 7.051282051282051,
"eval_accuracy": 0.8561151079136691,
"eval_loss": 0.6146668195724487,
"eval_runtime": 2.319,
"eval_samples_per_second": 119.88,
"eval_steps_per_second": 15.093,
"step": 1100
},
{
"epoch": 7.115384615384615,
"grad_norm": 0.022464392706751823,
"learning_rate": 9.053254437869823e-05,
"loss": 0.0617,
"step": 1110
},
{
"epoch": 7.17948717948718,
"grad_norm": 0.030899520963430405,
"learning_rate": 8.954635108481263e-05,
"loss": 0.117,
"step": 1120
},
{
"epoch": 7.243589743589744,
"grad_norm": 0.09432167559862137,
"learning_rate": 8.856015779092702e-05,
"loss": 0.0943,
"step": 1130
},
{
"epoch": 7.3076923076923075,
"grad_norm": 2.3787362575531006,
"learning_rate": 8.757396449704143e-05,
"loss": 0.0533,
"step": 1140
},
{
"epoch": 7.371794871794872,
"grad_norm": 0.020734421908855438,
"learning_rate": 8.658777120315582e-05,
"loss": 0.013,
"step": 1150
},
{
"epoch": 7.435897435897436,
"grad_norm": 0.029544230550527573,
"learning_rate": 8.560157790927023e-05,
"loss": 0.026,
"step": 1160
},
{
"epoch": 7.5,
"grad_norm": 0.02122228778898716,
"learning_rate": 8.461538461538461e-05,
"loss": 0.0236,
"step": 1170
},
{
"epoch": 7.564102564102564,
"grad_norm": 0.04070986807346344,
"learning_rate": 8.362919132149902e-05,
"loss": 0.0119,
"step": 1180
},
{
"epoch": 7.628205128205128,
"grad_norm": 0.026547027751803398,
"learning_rate": 8.264299802761342e-05,
"loss": 0.0084,
"step": 1190
},
{
"epoch": 7.6923076923076925,
"grad_norm": 2.327425956726074,
"learning_rate": 8.165680473372781e-05,
"loss": 0.0342,
"step": 1200
},
{
"epoch": 7.6923076923076925,
"eval_accuracy": 0.8741007194244604,
"eval_loss": 0.5602902173995972,
"eval_runtime": 3.1225,
"eval_samples_per_second": 89.031,
"eval_steps_per_second": 11.209,
"step": 1200
},
{
"epoch": 7.756410256410256,
"grad_norm": 0.035591427236795425,
"learning_rate": 8.067061143984221e-05,
"loss": 0.016,
"step": 1210
},
{
"epoch": 7.82051282051282,
"grad_norm": 0.032526835799217224,
"learning_rate": 7.968441814595661e-05,
"loss": 0.0187,
"step": 1220
},
{
"epoch": 7.884615384615385,
"grad_norm": 0.02054983749985695,
"learning_rate": 7.869822485207101e-05,
"loss": 0.0037,
"step": 1230
},
{
"epoch": 7.948717948717949,
"grad_norm": 0.02111811563372612,
"learning_rate": 7.77120315581854e-05,
"loss": 0.0042,
"step": 1240
},
{
"epoch": 8.012820512820513,
"grad_norm": 0.026984859257936478,
"learning_rate": 7.67258382642998e-05,
"loss": 0.0043,
"step": 1250
},
{
"epoch": 8.076923076923077,
"grad_norm": 0.026681529358029366,
"learning_rate": 7.573964497041421e-05,
"loss": 0.0045,
"step": 1260
},
{
"epoch": 8.14102564102564,
"grad_norm": 0.01863095909357071,
"learning_rate": 7.475345167652861e-05,
"loss": 0.0037,
"step": 1270
},
{
"epoch": 8.205128205128204,
"grad_norm": 0.39768093824386597,
"learning_rate": 7.3767258382643e-05,
"loss": 0.0037,
"step": 1280
},
{
"epoch": 8.26923076923077,
"grad_norm": 0.015911702066659927,
"learning_rate": 7.27810650887574e-05,
"loss": 0.0225,
"step": 1290
},
{
"epoch": 8.333333333333334,
"grad_norm": 0.014970704913139343,
"learning_rate": 7.17948717948718e-05,
"loss": 0.0175,
"step": 1300
},
{
"epoch": 8.333333333333334,
"eval_accuracy": 0.8848920863309353,
"eval_loss": 0.5678784847259521,
"eval_runtime": 2.3014,
"eval_samples_per_second": 120.796,
"eval_steps_per_second": 15.208,
"step": 1300
},
{
"epoch": 8.397435897435898,
"grad_norm": 0.043215423822402954,
"learning_rate": 7.08086785009862e-05,
"loss": 0.0035,
"step": 1310
},
{
"epoch": 8.461538461538462,
"grad_norm": 0.017348231747746468,
"learning_rate": 6.98224852071006e-05,
"loss": 0.0034,
"step": 1320
},
{
"epoch": 8.525641025641026,
"grad_norm": 0.015917859971523285,
"learning_rate": 6.883629191321498e-05,
"loss": 0.0033,
"step": 1330
},
{
"epoch": 8.58974358974359,
"grad_norm": 0.016305677592754364,
"learning_rate": 6.78500986193294e-05,
"loss": 0.0388,
"step": 1340
},
{
"epoch": 8.653846153846153,
"grad_norm": 0.22299791872501373,
"learning_rate": 6.686390532544379e-05,
"loss": 0.0036,
"step": 1350
},
{
"epoch": 8.717948717948717,
"grad_norm": 0.01393035240471363,
"learning_rate": 6.587771203155819e-05,
"loss": 0.0035,
"step": 1360
},
{
"epoch": 8.782051282051283,
"grad_norm": 0.017584780231118202,
"learning_rate": 6.489151873767258e-05,
"loss": 0.0048,
"step": 1370
},
{
"epoch": 8.846153846153847,
"grad_norm": 0.016196981072425842,
"learning_rate": 6.390532544378698e-05,
"loss": 0.0046,
"step": 1380
},
{
"epoch": 8.91025641025641,
"grad_norm": 0.0213446244597435,
"learning_rate": 6.291913214990139e-05,
"loss": 0.0031,
"step": 1390
},
{
"epoch": 8.974358974358974,
"grad_norm": 0.01509836595505476,
"learning_rate": 6.193293885601579e-05,
"loss": 0.0177,
"step": 1400
},
{
"epoch": 8.974358974358974,
"eval_accuracy": 0.8669064748201439,
"eval_loss": 0.6591752767562866,
"eval_runtime": 2.4447,
"eval_samples_per_second": 113.714,
"eval_steps_per_second": 14.316,
"step": 1400
},
{
"epoch": 9.038461538461538,
"grad_norm": 0.012759982608258724,
"learning_rate": 6.094674556213018e-05,
"loss": 0.0061,
"step": 1410
},
{
"epoch": 9.102564102564102,
"grad_norm": 0.013235015794634819,
"learning_rate": 5.996055226824457e-05,
"loss": 0.0026,
"step": 1420
},
{
"epoch": 9.166666666666666,
"grad_norm": 0.01351054199039936,
"learning_rate": 5.897435897435898e-05,
"loss": 0.0028,
"step": 1430
},
{
"epoch": 9.23076923076923,
"grad_norm": 0.014609870500862598,
"learning_rate": 5.798816568047337e-05,
"loss": 0.0028,
"step": 1440
},
{
"epoch": 9.294871794871796,
"grad_norm": 0.012192473746836185,
"learning_rate": 5.700197238658778e-05,
"loss": 0.0026,
"step": 1450
},
{
"epoch": 9.35897435897436,
"grad_norm": 0.012684819288551807,
"learning_rate": 5.601577909270217e-05,
"loss": 0.0028,
"step": 1460
},
{
"epoch": 9.423076923076923,
"grad_norm": 0.01298692636191845,
"learning_rate": 5.502958579881658e-05,
"loss": 0.0027,
"step": 1470
},
{
"epoch": 9.487179487179487,
"grad_norm": 0.014186136424541473,
"learning_rate": 5.4043392504930966e-05,
"loss": 0.0026,
"step": 1480
},
{
"epoch": 9.551282051282051,
"grad_norm": 0.013288854621350765,
"learning_rate": 5.3057199211045375e-05,
"loss": 0.0024,
"step": 1490
},
{
"epoch": 9.615384615384615,
"grad_norm": 0.014771179296076298,
"learning_rate": 5.2071005917159764e-05,
"loss": 0.0025,
"step": 1500
},
{
"epoch": 9.615384615384615,
"eval_accuracy": 0.8669064748201439,
"eval_loss": 0.5999904274940491,
"eval_runtime": 2.9224,
"eval_samples_per_second": 95.126,
"eval_steps_per_second": 11.976,
"step": 1500
},
{
"epoch": 9.679487179487179,
"grad_norm": 0.014581016264855862,
"learning_rate": 5.1084812623274167e-05,
"loss": 0.0024,
"step": 1510
},
{
"epoch": 9.743589743589745,
"grad_norm": 0.013936012051999569,
"learning_rate": 5.009861932938856e-05,
"loss": 0.0025,
"step": 1520
},
{
"epoch": 9.807692307692308,
"grad_norm": 0.011059069074690342,
"learning_rate": 4.9112426035502965e-05,
"loss": 0.0024,
"step": 1530
},
{
"epoch": 9.871794871794872,
"grad_norm": 0.01148083247244358,
"learning_rate": 4.812623274161736e-05,
"loss": 0.0024,
"step": 1540
},
{
"epoch": 9.935897435897436,
"grad_norm": 0.013756124302744865,
"learning_rate": 4.714003944773176e-05,
"loss": 0.0024,
"step": 1550
},
{
"epoch": 10.0,
"grad_norm": 0.011675598099827766,
"learning_rate": 4.615384615384616e-05,
"loss": 0.0024,
"step": 1560
},
{
"epoch": 10.064102564102564,
"grad_norm": 0.014996953308582306,
"learning_rate": 4.5167652859960554e-05,
"loss": 0.0023,
"step": 1570
},
{
"epoch": 10.128205128205128,
"grad_norm": 0.01139632984995842,
"learning_rate": 4.418145956607495e-05,
"loss": 0.0023,
"step": 1580
},
{
"epoch": 10.192307692307692,
"grad_norm": 0.013790671713650227,
"learning_rate": 4.319526627218935e-05,
"loss": 0.0023,
"step": 1590
},
{
"epoch": 10.256410256410255,
"grad_norm": 0.011263075284659863,
"learning_rate": 4.220907297830375e-05,
"loss": 0.0021,
"step": 1600
},
{
"epoch": 10.256410256410255,
"eval_accuracy": 0.8597122302158273,
"eval_loss": 0.6059572696685791,
"eval_runtime": 2.2851,
"eval_samples_per_second": 121.656,
"eval_steps_per_second": 15.316,
"step": 1600
},
{
"epoch": 10.320512820512821,
"grad_norm": 0.011118494905531406,
"learning_rate": 4.122287968441815e-05,
"loss": 0.0022,
"step": 1610
},
{
"epoch": 10.384615384615385,
"grad_norm": 0.01060671266168356,
"learning_rate": 4.0236686390532545e-05,
"loss": 0.0024,
"step": 1620
},
{
"epoch": 10.448717948717949,
"grad_norm": 0.01056158822029829,
"learning_rate": 3.925049309664695e-05,
"loss": 0.0021,
"step": 1630
},
{
"epoch": 10.512820512820513,
"grad_norm": 0.01061705406755209,
"learning_rate": 3.826429980276134e-05,
"loss": 0.0021,
"step": 1640
},
{
"epoch": 10.576923076923077,
"grad_norm": 0.010555809363722801,
"learning_rate": 3.7278106508875746e-05,
"loss": 0.0021,
"step": 1650
},
{
"epoch": 10.64102564102564,
"grad_norm": 0.011333225294947624,
"learning_rate": 3.629191321499014e-05,
"loss": 0.0021,
"step": 1660
},
{
"epoch": 10.705128205128204,
"grad_norm": 0.010467233136296272,
"learning_rate": 3.5305719921104544e-05,
"loss": 0.0022,
"step": 1670
},
{
"epoch": 10.76923076923077,
"grad_norm": 0.010869566351175308,
"learning_rate": 3.431952662721893e-05,
"loss": 0.0022,
"step": 1680
},
{
"epoch": 10.833333333333334,
"grad_norm": 0.009824937209486961,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.0021,
"step": 1690
},
{
"epoch": 10.897435897435898,
"grad_norm": 0.010621228255331516,
"learning_rate": 3.234714003944773e-05,
"loss": 0.002,
"step": 1700
},
{
"epoch": 10.897435897435898,
"eval_accuracy": 0.8597122302158273,
"eval_loss": 0.6113312840461731,
"eval_runtime": 2.2287,
"eval_samples_per_second": 124.735,
"eval_steps_per_second": 15.704,
"step": 1700
},
{
"epoch": 10.961538461538462,
"grad_norm": 0.010598575696349144,
"learning_rate": 3.136094674556213e-05,
"loss": 0.002,
"step": 1710
},
{
"epoch": 11.025641025641026,
"grad_norm": 0.010133952833712101,
"learning_rate": 3.037475345167653e-05,
"loss": 0.002,
"step": 1720
},
{
"epoch": 11.08974358974359,
"grad_norm": 0.00947494339197874,
"learning_rate": 2.9388560157790928e-05,
"loss": 0.002,
"step": 1730
},
{
"epoch": 11.153846153846153,
"grad_norm": 0.010458361357450485,
"learning_rate": 2.8402366863905327e-05,
"loss": 0.002,
"step": 1740
},
{
"epoch": 11.217948717948717,
"grad_norm": 0.009749571792781353,
"learning_rate": 2.7416173570019726e-05,
"loss": 0.0019,
"step": 1750
},
{
"epoch": 11.282051282051283,
"grad_norm": 0.010477598756551743,
"learning_rate": 2.6429980276134125e-05,
"loss": 0.0021,
"step": 1760
},
{
"epoch": 11.346153846153847,
"grad_norm": 0.009760042652487755,
"learning_rate": 2.5443786982248524e-05,
"loss": 0.002,
"step": 1770
},
{
"epoch": 11.41025641025641,
"grad_norm": 0.009649330750107765,
"learning_rate": 2.445759368836292e-05,
"loss": 0.0019,
"step": 1780
},
{
"epoch": 11.474358974358974,
"grad_norm": 0.010434734635055065,
"learning_rate": 2.3471400394477318e-05,
"loss": 0.0019,
"step": 1790
},
{
"epoch": 11.538461538461538,
"grad_norm": 0.010121128521859646,
"learning_rate": 2.2485207100591717e-05,
"loss": 0.0019,
"step": 1800
},
{
"epoch": 11.538461538461538,
"eval_accuracy": 0.8561151079136691,
"eval_loss": 0.6177904605865479,
"eval_runtime": 3.2293,
"eval_samples_per_second": 86.086,
"eval_steps_per_second": 10.838,
"step": 1800
},
{
"epoch": 11.602564102564102,
"grad_norm": 0.010182279162108898,
"learning_rate": 2.1499013806706113e-05,
"loss": 0.002,
"step": 1810
},
{
"epoch": 11.666666666666666,
"grad_norm": 0.009429926984012127,
"learning_rate": 2.0512820512820512e-05,
"loss": 0.0019,
"step": 1820
},
{
"epoch": 11.73076923076923,
"grad_norm": 0.009011182934045792,
"learning_rate": 1.952662721893491e-05,
"loss": 0.0019,
"step": 1830
},
{
"epoch": 11.794871794871796,
"grad_norm": 0.009614336304366589,
"learning_rate": 1.854043392504931e-05,
"loss": 0.0019,
"step": 1840
},
{
"epoch": 11.85897435897436,
"grad_norm": 0.00916591938585043,
"learning_rate": 1.755424063116371e-05,
"loss": 0.0019,
"step": 1850
},
{
"epoch": 11.923076923076923,
"grad_norm": 0.009193377569317818,
"learning_rate": 1.6568047337278108e-05,
"loss": 0.002,
"step": 1860
},
{
"epoch": 11.987179487179487,
"grad_norm": 0.011150417849421501,
"learning_rate": 1.5581854043392503e-05,
"loss": 0.0019,
"step": 1870
},
{
"epoch": 12.051282051282051,
"grad_norm": 0.01022055372595787,
"learning_rate": 1.4595660749506904e-05,
"loss": 0.0019,
"step": 1880
},
{
"epoch": 12.115384615384615,
"grad_norm": 0.010277939960360527,
"learning_rate": 1.3609467455621303e-05,
"loss": 0.0019,
"step": 1890
},
{
"epoch": 12.179487179487179,
"grad_norm": 0.011330553330481052,
"learning_rate": 1.2623274161735702e-05,
"loss": 0.0019,
"step": 1900
},
{
"epoch": 12.179487179487179,
"eval_accuracy": 0.8561151079136691,
"eval_loss": 0.6213667988777161,
"eval_runtime": 2.2604,
"eval_samples_per_second": 122.988,
"eval_steps_per_second": 15.484,
"step": 1900
},
{
"epoch": 12.243589743589743,
"grad_norm": 0.01133573055267334,
"learning_rate": 1.16370808678501e-05,
"loss": 0.0018,
"step": 1910
},
{
"epoch": 12.307692307692308,
"grad_norm": 0.009599977172911167,
"learning_rate": 1.0650887573964498e-05,
"loss": 0.0018,
"step": 1920
},
{
"epoch": 12.371794871794872,
"grad_norm": 0.008852086029946804,
"learning_rate": 9.664694280078896e-06,
"loss": 0.0018,
"step": 1930
},
{
"epoch": 12.435897435897436,
"grad_norm": 0.009392149746418,
"learning_rate": 8.678500986193295e-06,
"loss": 0.0018,
"step": 1940
},
{
"epoch": 12.5,
"grad_norm": 0.00876608770340681,
"learning_rate": 7.692307692307694e-06,
"loss": 0.0019,
"step": 1950
},
{
"epoch": 12.564102564102564,
"grad_norm": 0.008951264433562756,
"learning_rate": 6.706114398422091e-06,
"loss": 0.0019,
"step": 1960
},
{
"epoch": 12.628205128205128,
"grad_norm": 0.008855776861310005,
"learning_rate": 5.71992110453649e-06,
"loss": 0.0019,
"step": 1970
},
{
"epoch": 12.692307692307692,
"grad_norm": 0.00874117761850357,
"learning_rate": 4.733727810650888e-06,
"loss": 0.0018,
"step": 1980
},
{
"epoch": 12.756410256410255,
"grad_norm": 0.009209462441504002,
"learning_rate": 3.7475345167652858e-06,
"loss": 0.0018,
"step": 1990
},
{
"epoch": 12.820512820512821,
"grad_norm": 0.009217181243002415,
"learning_rate": 2.7613412228796843e-06,
"loss": 0.002,
"step": 2000
},
{
"epoch": 12.820512820512821,
"eval_accuracy": 0.8561151079136691,
"eval_loss": 0.6227695941925049,
"eval_runtime": 4.1,
"eval_samples_per_second": 67.805,
"eval_steps_per_second": 8.537,
"step": 2000
},
{
"epoch": 12.884615384615385,
"grad_norm": 0.010279512964189053,
"learning_rate": 1.775147928994083e-06,
"loss": 0.0018,
"step": 2010
},
{
"epoch": 12.948717948717949,
"grad_norm": 0.009499160572886467,
"learning_rate": 7.889546351084814e-07,
"loss": 0.0018,
"step": 2020
},
{
"epoch": 13.0,
"step": 2028,
"total_flos": 2.5134977377120666e+18,
"train_loss": 0.20268641209277588,
"train_runtime": 776.7577,
"train_samples_per_second": 41.757,
"train_steps_per_second": 2.611
}
],
"logging_steps": 10,
"max_steps": 2028,
"num_input_tokens_seen": 0,
"num_train_epochs": 13,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.5134977377120666e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}