auxyus's picture
Training in progress, step 400, checkpoint
29ec4bd verified
{
"best_metric": 2.29616379737854,
"best_model_checkpoint": "miner_id_24/checkpoint-400",
"epoch": 0.44469149527515284,
"eval_steps": 50,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0011117287381878821,
"grad_norm": 2.472998857498169,
"learning_rate": 1e-05,
"loss": 3.6514,
"step": 1
},
{
"epoch": 0.0011117287381878821,
"eval_loss": 4.773703098297119,
"eval_runtime": 70.9519,
"eval_samples_per_second": 21.352,
"eval_steps_per_second": 5.342,
"step": 1
},
{
"epoch": 0.0022234574763757642,
"grad_norm": 2.228417158126831,
"learning_rate": 2e-05,
"loss": 3.5613,
"step": 2
},
{
"epoch": 0.0033351862145636463,
"grad_norm": 2.4302899837493896,
"learning_rate": 3e-05,
"loss": 3.7739,
"step": 3
},
{
"epoch": 0.0044469149527515284,
"grad_norm": 2.527637481689453,
"learning_rate": 4e-05,
"loss": 3.7436,
"step": 4
},
{
"epoch": 0.005558643690939411,
"grad_norm": 2.517746686935425,
"learning_rate": 5e-05,
"loss": 3.7961,
"step": 5
},
{
"epoch": 0.006670372429127293,
"grad_norm": 2.257744073867798,
"learning_rate": 6e-05,
"loss": 3.6019,
"step": 6
},
{
"epoch": 0.007782101167315175,
"grad_norm": 2.2959647178649902,
"learning_rate": 7e-05,
"loss": 3.6773,
"step": 7
},
{
"epoch": 0.008893829905503057,
"grad_norm": 2.340047597885132,
"learning_rate": 8e-05,
"loss": 3.5736,
"step": 8
},
{
"epoch": 0.010005558643690939,
"grad_norm": 2.3508639335632324,
"learning_rate": 9e-05,
"loss": 3.5245,
"step": 9
},
{
"epoch": 0.011117287381878822,
"grad_norm": 2.224466562271118,
"learning_rate": 0.0001,
"loss": 3.399,
"step": 10
},
{
"epoch": 0.012229016120066704,
"grad_norm": 2.578826904296875,
"learning_rate": 9.99983777858264e-05,
"loss": 3.1011,
"step": 11
},
{
"epoch": 0.013340744858254585,
"grad_norm": 3.860137701034546,
"learning_rate": 9.999351124856874e-05,
"loss": 3.1844,
"step": 12
},
{
"epoch": 0.014452473596442469,
"grad_norm": 5.3637518882751465,
"learning_rate": 9.998540070400966e-05,
"loss": 2.8854,
"step": 13
},
{
"epoch": 0.01556420233463035,
"grad_norm": 5.5368523597717285,
"learning_rate": 9.997404667843075e-05,
"loss": 2.7676,
"step": 14
},
{
"epoch": 0.016675931072818232,
"grad_norm": 3.3872969150543213,
"learning_rate": 9.995944990857849e-05,
"loss": 2.4751,
"step": 15
},
{
"epoch": 0.017787659811006114,
"grad_norm": 2.1491751670837402,
"learning_rate": 9.994161134161634e-05,
"loss": 2.7102,
"step": 16
},
{
"epoch": 0.018899388549193995,
"grad_norm": 2.828312873840332,
"learning_rate": 9.992053213506334e-05,
"loss": 2.4852,
"step": 17
},
{
"epoch": 0.020011117287381877,
"grad_norm": 2.784764051437378,
"learning_rate": 9.989621365671902e-05,
"loss": 2.2696,
"step": 18
},
{
"epoch": 0.021122846025569762,
"grad_norm": 3.483478307723999,
"learning_rate": 9.986865748457457e-05,
"loss": 2.4701,
"step": 19
},
{
"epoch": 0.022234574763757644,
"grad_norm": 2.1025726795196533,
"learning_rate": 9.983786540671051e-05,
"loss": 2.5995,
"step": 20
},
{
"epoch": 0.023346303501945526,
"grad_norm": 2.0208017826080322,
"learning_rate": 9.980383942118066e-05,
"loss": 2.439,
"step": 21
},
{
"epoch": 0.024458032240133407,
"grad_norm": 4.046178340911865,
"learning_rate": 9.976658173588244e-05,
"loss": 2.3586,
"step": 22
},
{
"epoch": 0.02556976097832129,
"grad_norm": 2.4368722438812256,
"learning_rate": 9.972609476841367e-05,
"loss": 2.3624,
"step": 23
},
{
"epoch": 0.02668148971650917,
"grad_norm": 2.14642071723938,
"learning_rate": 9.968238114591566e-05,
"loss": 2.2844,
"step": 24
},
{
"epoch": 0.027793218454697052,
"grad_norm": 3.8467893600463867,
"learning_rate": 9.96354437049027e-05,
"loss": 2.5449,
"step": 25
},
{
"epoch": 0.028904947192884937,
"grad_norm": 3.0803942680358887,
"learning_rate": 9.95852854910781e-05,
"loss": 2.4109,
"step": 26
},
{
"epoch": 0.03001667593107282,
"grad_norm": 1.9121437072753906,
"learning_rate": 9.953190975913647e-05,
"loss": 2.2835,
"step": 27
},
{
"epoch": 0.0311284046692607,
"grad_norm": 1.6956825256347656,
"learning_rate": 9.947531997255256e-05,
"loss": 2.3913,
"step": 28
},
{
"epoch": 0.032240133407448586,
"grad_norm": 1.8031660318374634,
"learning_rate": 9.941551980335652e-05,
"loss": 2.3608,
"step": 29
},
{
"epoch": 0.033351862145636464,
"grad_norm": 1.9508814811706543,
"learning_rate": 9.935251313189564e-05,
"loss": 2.4243,
"step": 30
},
{
"epoch": 0.03446359088382435,
"grad_norm": 1.6861339807510376,
"learning_rate": 9.928630404658255e-05,
"loss": 2.342,
"step": 31
},
{
"epoch": 0.03557531962201223,
"grad_norm": 1.4689520597457886,
"learning_rate": 9.921689684362989e-05,
"loss": 2.3789,
"step": 32
},
{
"epoch": 0.03668704836020011,
"grad_norm": 1.4510256052017212,
"learning_rate": 9.914429602677162e-05,
"loss": 2.2404,
"step": 33
},
{
"epoch": 0.03779877709838799,
"grad_norm": 2.0053555965423584,
"learning_rate": 9.906850630697068e-05,
"loss": 2.4606,
"step": 34
},
{
"epoch": 0.038910505836575876,
"grad_norm": 1.4402495622634888,
"learning_rate": 9.898953260211338e-05,
"loss": 2.353,
"step": 35
},
{
"epoch": 0.040022234574763754,
"grad_norm": 1.6246519088745117,
"learning_rate": 9.890738003669029e-05,
"loss": 2.5241,
"step": 36
},
{
"epoch": 0.04113396331295164,
"grad_norm": 1.7262248992919922,
"learning_rate": 9.882205394146361e-05,
"loss": 2.0709,
"step": 37
},
{
"epoch": 0.042245692051139525,
"grad_norm": 1.5005686283111572,
"learning_rate": 9.87335598531214e-05,
"loss": 2.2354,
"step": 38
},
{
"epoch": 0.0433574207893274,
"grad_norm": 1.618937373161316,
"learning_rate": 9.864190351391822e-05,
"loss": 2.2752,
"step": 39
},
{
"epoch": 0.04446914952751529,
"grad_norm": 3.1042425632476807,
"learning_rate": 9.85470908713026e-05,
"loss": 2.4815,
"step": 40
},
{
"epoch": 0.045580878265703166,
"grad_norm": 2.9018545150756836,
"learning_rate": 9.844912807753104e-05,
"loss": 2.5847,
"step": 41
},
{
"epoch": 0.04669260700389105,
"grad_norm": 1.687359094619751,
"learning_rate": 9.834802148926882e-05,
"loss": 2.1867,
"step": 42
},
{
"epoch": 0.04780433574207893,
"grad_norm": 2.870513439178467,
"learning_rate": 9.824377766717759e-05,
"loss": 2.2868,
"step": 43
},
{
"epoch": 0.048916064480266815,
"grad_norm": 2.6799087524414062,
"learning_rate": 9.813640337548954e-05,
"loss": 2.3823,
"step": 44
},
{
"epoch": 0.0500277932184547,
"grad_norm": 2.1397416591644287,
"learning_rate": 9.802590558156862e-05,
"loss": 2.1061,
"step": 45
},
{
"epoch": 0.05113952195664258,
"grad_norm": 1.9821467399597168,
"learning_rate": 9.791229145545831e-05,
"loss": 2.3679,
"step": 46
},
{
"epoch": 0.05225125069483046,
"grad_norm": 3.3248050212860107,
"learning_rate": 9.779556836941645e-05,
"loss": 2.2956,
"step": 47
},
{
"epoch": 0.05336297943301834,
"grad_norm": 2.8718130588531494,
"learning_rate": 9.767574389743682e-05,
"loss": 2.2576,
"step": 48
},
{
"epoch": 0.054474708171206226,
"grad_norm": 2.6526224613189697,
"learning_rate": 9.755282581475769e-05,
"loss": 2.2742,
"step": 49
},
{
"epoch": 0.055586436909394105,
"grad_norm": 1.908512830734253,
"learning_rate": 9.742682209735727e-05,
"loss": 2.3169,
"step": 50
},
{
"epoch": 0.055586436909394105,
"eval_loss": 2.8695197105407715,
"eval_runtime": 71.5125,
"eval_samples_per_second": 21.185,
"eval_steps_per_second": 5.3,
"step": 50
},
{
"epoch": 0.05669816564758199,
"grad_norm": 8.565925598144531,
"learning_rate": 9.729774092143627e-05,
"loss": 3.3931,
"step": 51
},
{
"epoch": 0.057809894385769875,
"grad_norm": 8.781779289245605,
"learning_rate": 9.716559066288715e-05,
"loss": 3.2973,
"step": 52
},
{
"epoch": 0.05892162312395775,
"grad_norm": 6.783483505249023,
"learning_rate": 9.703037989675087e-05,
"loss": 3.0626,
"step": 53
},
{
"epoch": 0.06003335186214564,
"grad_norm": 3.6386938095092773,
"learning_rate": 9.689211739666023e-05,
"loss": 2.7669,
"step": 54
},
{
"epoch": 0.061145080600333516,
"grad_norm": 1.7182066440582275,
"learning_rate": 9.675081213427076e-05,
"loss": 2.6579,
"step": 55
},
{
"epoch": 0.0622568093385214,
"grad_norm": 1.6245131492614746,
"learning_rate": 9.66064732786784e-05,
"loss": 2.4646,
"step": 56
},
{
"epoch": 0.06336853807670928,
"grad_norm": 1.7475733757019043,
"learning_rate": 9.645911019582467e-05,
"loss": 2.4532,
"step": 57
},
{
"epoch": 0.06448026681489717,
"grad_norm": 1.7325527667999268,
"learning_rate": 9.630873244788883e-05,
"loss": 2.5395,
"step": 58
},
{
"epoch": 0.06559199555308505,
"grad_norm": 1.656267523765564,
"learning_rate": 9.615534979266745e-05,
"loss": 2.3968,
"step": 59
},
{
"epoch": 0.06670372429127293,
"grad_norm": 1.4385294914245605,
"learning_rate": 9.599897218294122e-05,
"loss": 2.4547,
"step": 60
},
{
"epoch": 0.0678154530294608,
"grad_norm": 1.149308681488037,
"learning_rate": 9.583960976582913e-05,
"loss": 2.4185,
"step": 61
},
{
"epoch": 0.0689271817676487,
"grad_norm": 1.0372174978256226,
"learning_rate": 9.567727288213005e-05,
"loss": 2.3459,
"step": 62
},
{
"epoch": 0.07003891050583658,
"grad_norm": 1.227518081665039,
"learning_rate": 9.551197206565173e-05,
"loss": 2.4516,
"step": 63
},
{
"epoch": 0.07115063924402446,
"grad_norm": 0.9263134002685547,
"learning_rate": 9.534371804252728e-05,
"loss": 2.3193,
"step": 64
},
{
"epoch": 0.07226236798221233,
"grad_norm": 1.0493077039718628,
"learning_rate": 9.517252173051911e-05,
"loss": 2.475,
"step": 65
},
{
"epoch": 0.07337409672040023,
"grad_norm": 1.0255634784698486,
"learning_rate": 9.49983942383106e-05,
"loss": 2.3088,
"step": 66
},
{
"epoch": 0.0744858254585881,
"grad_norm": 1.041403889656067,
"learning_rate": 9.482134686478519e-05,
"loss": 2.2648,
"step": 67
},
{
"epoch": 0.07559755419677598,
"grad_norm": 1.0037230253219604,
"learning_rate": 9.464139109829321e-05,
"loss": 2.2107,
"step": 68
},
{
"epoch": 0.07670928293496387,
"grad_norm": 1.3062645196914673,
"learning_rate": 9.445853861590647e-05,
"loss": 2.2864,
"step": 69
},
{
"epoch": 0.07782101167315175,
"grad_norm": 1.1115295886993408,
"learning_rate": 9.42728012826605e-05,
"loss": 2.185,
"step": 70
},
{
"epoch": 0.07893274041133963,
"grad_norm": 1.0353200435638428,
"learning_rate": 9.408419115078471e-05,
"loss": 2.4227,
"step": 71
},
{
"epoch": 0.08004446914952751,
"grad_norm": 1.0612479448318481,
"learning_rate": 9.389272045892024e-05,
"loss": 2.3398,
"step": 72
},
{
"epoch": 0.0811561978877154,
"grad_norm": 1.0379235744476318,
"learning_rate": 9.36984016313259e-05,
"loss": 2.2034,
"step": 73
},
{
"epoch": 0.08226792662590328,
"grad_norm": 0.9927018880844116,
"learning_rate": 9.350124727707197e-05,
"loss": 2.3159,
"step": 74
},
{
"epoch": 0.08337965536409116,
"grad_norm": 1.063293695449829,
"learning_rate": 9.330127018922194e-05,
"loss": 2.3267,
"step": 75
},
{
"epoch": 0.08449138410227905,
"grad_norm": 1.0034624338150024,
"learning_rate": 9.309848334400246e-05,
"loss": 2.0903,
"step": 76
},
{
"epoch": 0.08560311284046693,
"grad_norm": 1.027740240097046,
"learning_rate": 9.289289989996133e-05,
"loss": 2.3924,
"step": 77
},
{
"epoch": 0.0867148415786548,
"grad_norm": 1.3626140356063843,
"learning_rate": 9.268453319711363e-05,
"loss": 2.0867,
"step": 78
},
{
"epoch": 0.08782657031684268,
"grad_norm": 1.0475863218307495,
"learning_rate": 9.247339675607605e-05,
"loss": 2.159,
"step": 79
},
{
"epoch": 0.08893829905503058,
"grad_norm": 1.1371270418167114,
"learning_rate": 9.225950427718975e-05,
"loss": 2.4041,
"step": 80
},
{
"epoch": 0.09005002779321845,
"grad_norm": 1.2929434776306152,
"learning_rate": 9.204286963963111e-05,
"loss": 2.2017,
"step": 81
},
{
"epoch": 0.09116175653140633,
"grad_norm": 1.0671011209487915,
"learning_rate": 9.182350690051133e-05,
"loss": 2.4558,
"step": 82
},
{
"epoch": 0.09227348526959422,
"grad_norm": 1.121847152709961,
"learning_rate": 9.160143029396422e-05,
"loss": 2.1243,
"step": 83
},
{
"epoch": 0.0933852140077821,
"grad_norm": 1.2664432525634766,
"learning_rate": 9.13766542302225e-05,
"loss": 2.1224,
"step": 84
},
{
"epoch": 0.09449694274596998,
"grad_norm": 1.281224250793457,
"learning_rate": 9.114919329468282e-05,
"loss": 2.2792,
"step": 85
},
{
"epoch": 0.09560867148415786,
"grad_norm": 1.2077322006225586,
"learning_rate": 9.091906224695935e-05,
"loss": 2.202,
"step": 86
},
{
"epoch": 0.09672040022234575,
"grad_norm": 1.253800392150879,
"learning_rate": 9.068627601992598e-05,
"loss": 2.2172,
"step": 87
},
{
"epoch": 0.09783212896053363,
"grad_norm": 1.4512909650802612,
"learning_rate": 9.045084971874738e-05,
"loss": 2.0814,
"step": 88
},
{
"epoch": 0.09894385769872151,
"grad_norm": 1.256651759147644,
"learning_rate": 9.021279861989885e-05,
"loss": 2.2968,
"step": 89
},
{
"epoch": 0.1000555864369094,
"grad_norm": 1.8698711395263672,
"learning_rate": 8.997213817017507e-05,
"loss": 2.5366,
"step": 90
},
{
"epoch": 0.10116731517509728,
"grad_norm": 1.381558895111084,
"learning_rate": 8.972888398568772e-05,
"loss": 2.2141,
"step": 91
},
{
"epoch": 0.10227904391328516,
"grad_norm": 1.271780252456665,
"learning_rate": 8.948305185085225e-05,
"loss": 2.3866,
"step": 92
},
{
"epoch": 0.10339077265147303,
"grad_norm": 1.3147594928741455,
"learning_rate": 8.92346577173636e-05,
"loss": 2.0375,
"step": 93
},
{
"epoch": 0.10450250138966093,
"grad_norm": 1.3043659925460815,
"learning_rate": 8.898371770316111e-05,
"loss": 2.3249,
"step": 94
},
{
"epoch": 0.1056142301278488,
"grad_norm": 1.241226315498352,
"learning_rate": 8.873024809138272e-05,
"loss": 2.4081,
"step": 95
},
{
"epoch": 0.10672595886603668,
"grad_norm": 1.7342743873596191,
"learning_rate": 8.847426532930831e-05,
"loss": 2.1388,
"step": 96
},
{
"epoch": 0.10783768760422457,
"grad_norm": 1.5813889503479004,
"learning_rate": 8.821578602729242e-05,
"loss": 2.3315,
"step": 97
},
{
"epoch": 0.10894941634241245,
"grad_norm": 1.7183290719985962,
"learning_rate": 8.795482695768658e-05,
"loss": 2.1037,
"step": 98
},
{
"epoch": 0.11006114508060033,
"grad_norm": 16.183635711669922,
"learning_rate": 8.769140505375085e-05,
"loss": 2.3144,
"step": 99
},
{
"epoch": 0.11117287381878821,
"grad_norm": 1.6419098377227783,
"learning_rate": 8.742553740855506e-05,
"loss": 2.1397,
"step": 100
},
{
"epoch": 0.11117287381878821,
"eval_loss": 2.7260963916778564,
"eval_runtime": 71.6154,
"eval_samples_per_second": 21.155,
"eval_steps_per_second": 5.292,
"step": 100
},
{
"epoch": 0.1122846025569761,
"grad_norm": 5.945138454437256,
"learning_rate": 8.715724127386972e-05,
"loss": 3.3042,
"step": 101
},
{
"epoch": 0.11339633129516398,
"grad_norm": 5.689327239990234,
"learning_rate": 8.688653405904652e-05,
"loss": 3.2821,
"step": 102
},
{
"epoch": 0.11450806003335186,
"grad_norm": 5.383906364440918,
"learning_rate": 8.661343332988869e-05,
"loss": 3.1464,
"step": 103
},
{
"epoch": 0.11561978877153975,
"grad_norm": 3.6286027431488037,
"learning_rate": 8.633795680751116e-05,
"loss": 3.0208,
"step": 104
},
{
"epoch": 0.11673151750972763,
"grad_norm": 2.5231778621673584,
"learning_rate": 8.606012236719073e-05,
"loss": 2.9116,
"step": 105
},
{
"epoch": 0.1178432462479155,
"grad_norm": 1.8035188913345337,
"learning_rate": 8.577994803720606e-05,
"loss": 2.7812,
"step": 106
},
{
"epoch": 0.11895497498610338,
"grad_norm": 1.2647838592529297,
"learning_rate": 8.549745199766792e-05,
"loss": 2.5962,
"step": 107
},
{
"epoch": 0.12006670372429128,
"grad_norm": 1.0100034475326538,
"learning_rate": 8.521265257933948e-05,
"loss": 2.5903,
"step": 108
},
{
"epoch": 0.12117843246247915,
"grad_norm": 1.5505986213684082,
"learning_rate": 8.492556826244687e-05,
"loss": 2.4289,
"step": 109
},
{
"epoch": 0.12229016120066703,
"grad_norm": 2.1767804622650146,
"learning_rate": 8.463621767547998e-05,
"loss": 2.5854,
"step": 110
},
{
"epoch": 0.12340188993885493,
"grad_norm": 2.0668084621429443,
"learning_rate": 8.434461959398376e-05,
"loss": 2.4399,
"step": 111
},
{
"epoch": 0.1245136186770428,
"grad_norm": 1.511650562286377,
"learning_rate": 8.405079293933986e-05,
"loss": 2.6072,
"step": 112
},
{
"epoch": 0.12562534741523068,
"grad_norm": 1.1593494415283203,
"learning_rate": 8.375475677753881e-05,
"loss": 2.4045,
"step": 113
},
{
"epoch": 0.12673707615341856,
"grad_norm": 1.053107738494873,
"learning_rate": 8.345653031794292e-05,
"loss": 2.4522,
"step": 114
},
{
"epoch": 0.12784880489160644,
"grad_norm": 0.9937801361083984,
"learning_rate": 8.315613291203976e-05,
"loss": 2.3276,
"step": 115
},
{
"epoch": 0.12896053362979434,
"grad_norm": 0.8721001744270325,
"learning_rate": 8.285358405218655e-05,
"loss": 2.3567,
"step": 116
},
{
"epoch": 0.13007226236798222,
"grad_norm": 0.9330883026123047,
"learning_rate": 8.25489033703452e-05,
"loss": 2.2585,
"step": 117
},
{
"epoch": 0.1311839911061701,
"grad_norm": 0.9001569747924805,
"learning_rate": 8.224211063680853e-05,
"loss": 2.297,
"step": 118
},
{
"epoch": 0.13229571984435798,
"grad_norm": 0.8666402697563171,
"learning_rate": 8.19332257589174e-05,
"loss": 2.2182,
"step": 119
},
{
"epoch": 0.13340744858254586,
"grad_norm": 1.176416277885437,
"learning_rate": 8.162226877976887e-05,
"loss": 2.3346,
"step": 120
},
{
"epoch": 0.13451917732073373,
"grad_norm": 0.9645460844039917,
"learning_rate": 8.130925987691569e-05,
"loss": 2.0891,
"step": 121
},
{
"epoch": 0.1356309060589216,
"grad_norm": 1.3001148700714111,
"learning_rate": 8.099421936105702e-05,
"loss": 2.3923,
"step": 122
},
{
"epoch": 0.13674263479710952,
"grad_norm": 1.1771594285964966,
"learning_rate": 8.067716767472045e-05,
"loss": 2.3808,
"step": 123
},
{
"epoch": 0.1378543635352974,
"grad_norm": 0.997420608997345,
"learning_rate": 8.035812539093557e-05,
"loss": 2.4158,
"step": 124
},
{
"epoch": 0.13896609227348528,
"grad_norm": 0.8972210884094238,
"learning_rate": 8.003711321189895e-05,
"loss": 2.1985,
"step": 125
},
{
"epoch": 0.14007782101167315,
"grad_norm": 0.8970302939414978,
"learning_rate": 7.971415196763088e-05,
"loss": 1.9446,
"step": 126
},
{
"epoch": 0.14118954974986103,
"grad_norm": 1.0234975814819336,
"learning_rate": 7.938926261462366e-05,
"loss": 2.2494,
"step": 127
},
{
"epoch": 0.1423012784880489,
"grad_norm": 1.1858220100402832,
"learning_rate": 7.906246623448183e-05,
"loss": 2.2926,
"step": 128
},
{
"epoch": 0.1434130072262368,
"grad_norm": 1.04050612449646,
"learning_rate": 7.873378403255419e-05,
"loss": 2.3006,
"step": 129
},
{
"epoch": 0.14452473596442467,
"grad_norm": 1.20691978931427,
"learning_rate": 7.840323733655778e-05,
"loss": 2.3302,
"step": 130
},
{
"epoch": 0.14563646470261257,
"grad_norm": 1.0528558492660522,
"learning_rate": 7.807084759519405e-05,
"loss": 2.1477,
"step": 131
},
{
"epoch": 0.14674819344080045,
"grad_norm": 1.085060954093933,
"learning_rate": 7.773663637675694e-05,
"loss": 2.3391,
"step": 132
},
{
"epoch": 0.14785992217898833,
"grad_norm": 0.9347543120384216,
"learning_rate": 7.740062536773352e-05,
"loss": 2.2107,
"step": 133
},
{
"epoch": 0.1489716509171762,
"grad_norm": 1.0611324310302734,
"learning_rate": 7.706283637139658e-05,
"loss": 2.2921,
"step": 134
},
{
"epoch": 0.15008337965536409,
"grad_norm": 1.1190764904022217,
"learning_rate": 7.672329130639005e-05,
"loss": 2.1771,
"step": 135
},
{
"epoch": 0.15119510839355196,
"grad_norm": 1.021002173423767,
"learning_rate": 7.638201220530665e-05,
"loss": 2.1964,
"step": 136
},
{
"epoch": 0.15230683713173984,
"grad_norm": 1.2308954000473022,
"learning_rate": 7.603902121325813e-05,
"loss": 2.2279,
"step": 137
},
{
"epoch": 0.15341856586992775,
"grad_norm": 1.0030035972595215,
"learning_rate": 7.569434058643844e-05,
"loss": 2.1981,
"step": 138
},
{
"epoch": 0.15453029460811563,
"grad_norm": 1.0752218961715698,
"learning_rate": 7.534799269067953e-05,
"loss": 2.2809,
"step": 139
},
{
"epoch": 0.1556420233463035,
"grad_norm": 1.1892122030258179,
"learning_rate": 7.500000000000001e-05,
"loss": 2.2486,
"step": 140
},
{
"epoch": 0.15675375208449138,
"grad_norm": 1.1939011812210083,
"learning_rate": 7.465038509514688e-05,
"loss": 2.1895,
"step": 141
},
{
"epoch": 0.15786548082267926,
"grad_norm": 1.430174469947815,
"learning_rate": 7.42991706621303e-05,
"loss": 2.3485,
"step": 142
},
{
"epoch": 0.15897720956086714,
"grad_norm": 1.1184104681015015,
"learning_rate": 7.394637949075154e-05,
"loss": 2.2954,
"step": 143
},
{
"epoch": 0.16008893829905502,
"grad_norm": 1.0584604740142822,
"learning_rate": 7.35920344731241e-05,
"loss": 2.0915,
"step": 144
},
{
"epoch": 0.16120066703724292,
"grad_norm": 1.1394598484039307,
"learning_rate": 7.323615860218843e-05,
"loss": 1.9866,
"step": 145
},
{
"epoch": 0.1623123957754308,
"grad_norm": 1.3819961547851562,
"learning_rate": 7.287877497021978e-05,
"loss": 2.229,
"step": 146
},
{
"epoch": 0.16342412451361868,
"grad_norm": 1.717722773551941,
"learning_rate": 7.251990676732984e-05,
"loss": 2.0977,
"step": 147
},
{
"epoch": 0.16453585325180656,
"grad_norm": 1.206673502922058,
"learning_rate": 7.215957727996207e-05,
"loss": 2.2358,
"step": 148
},
{
"epoch": 0.16564758198999444,
"grad_norm": 1.5048532485961914,
"learning_rate": 7.179780988938051e-05,
"loss": 2.2084,
"step": 149
},
{
"epoch": 0.16675931072818231,
"grad_norm": 1.4663426876068115,
"learning_rate": 7.143462807015271e-05,
"loss": 2.1774,
"step": 150
},
{
"epoch": 0.16675931072818231,
"eval_loss": 2.5343213081359863,
"eval_runtime": 71.4182,
"eval_samples_per_second": 21.213,
"eval_steps_per_second": 5.307,
"step": 150
},
{
"epoch": 0.1678710394663702,
"grad_norm": 3.798626184463501,
"learning_rate": 7.107005538862646e-05,
"loss": 2.8672,
"step": 151
},
{
"epoch": 0.1689827682045581,
"grad_norm": 3.3820409774780273,
"learning_rate": 7.07041155014006e-05,
"loss": 2.9326,
"step": 152
},
{
"epoch": 0.17009449694274598,
"grad_norm": 2.7820358276367188,
"learning_rate": 7.033683215379002e-05,
"loss": 2.8474,
"step": 153
},
{
"epoch": 0.17120622568093385,
"grad_norm": 2.195570707321167,
"learning_rate": 6.996822917828477e-05,
"loss": 2.7765,
"step": 154
},
{
"epoch": 0.17231795441912173,
"grad_norm": 1.862131953239441,
"learning_rate": 6.959833049300377e-05,
"loss": 2.6993,
"step": 155
},
{
"epoch": 0.1734296831573096,
"grad_norm": 1.5429936647415161,
"learning_rate": 6.922716010014255e-05,
"loss": 2.697,
"step": 156
},
{
"epoch": 0.1745414118954975,
"grad_norm": 1.3168162107467651,
"learning_rate": 6.885474208441603e-05,
"loss": 2.5454,
"step": 157
},
{
"epoch": 0.17565314063368537,
"grad_norm": 1.1143990755081177,
"learning_rate": 6.848110061149556e-05,
"loss": 2.4594,
"step": 158
},
{
"epoch": 0.17676486937187327,
"grad_norm": 0.9172198176383972,
"learning_rate": 6.810625992644085e-05,
"loss": 2.3292,
"step": 159
},
{
"epoch": 0.17787659811006115,
"grad_norm": 0.9379021525382996,
"learning_rate": 6.773024435212678e-05,
"loss": 2.4432,
"step": 160
},
{
"epoch": 0.17898832684824903,
"grad_norm": 0.7792444229125977,
"learning_rate": 6.735307828766515e-05,
"loss": 2.4288,
"step": 161
},
{
"epoch": 0.1801000555864369,
"grad_norm": 0.8528432846069336,
"learning_rate": 6.697478620682137e-05,
"loss": 2.2895,
"step": 162
},
{
"epoch": 0.1812117843246248,
"grad_norm": 0.7585402727127075,
"learning_rate": 6.659539265642643e-05,
"loss": 2.4101,
"step": 163
},
{
"epoch": 0.18232351306281266,
"grad_norm": 0.8885021805763245,
"learning_rate": 6.621492225478414e-05,
"loss": 2.4375,
"step": 164
},
{
"epoch": 0.18343524180100054,
"grad_norm": 0.784393846988678,
"learning_rate": 6.583339969007363e-05,
"loss": 2.3757,
"step": 165
},
{
"epoch": 0.18454697053918845,
"grad_norm": 1.024843692779541,
"learning_rate": 6.545084971874738e-05,
"loss": 2.4254,
"step": 166
},
{
"epoch": 0.18565869927737633,
"grad_norm": 1.151550531387329,
"learning_rate": 6.506729716392481e-05,
"loss": 2.1751,
"step": 167
},
{
"epoch": 0.1867704280155642,
"grad_norm": 1.0025200843811035,
"learning_rate": 6.468276691378155e-05,
"loss": 2.4144,
"step": 168
},
{
"epoch": 0.18788215675375208,
"grad_norm": 0.8717769980430603,
"learning_rate": 6.429728391993446e-05,
"loss": 2.3761,
"step": 169
},
{
"epoch": 0.18899388549193996,
"grad_norm": 0.8901825547218323,
"learning_rate": 6.391087319582264e-05,
"loss": 2.1471,
"step": 170
},
{
"epoch": 0.19010561423012784,
"grad_norm": 0.988797664642334,
"learning_rate": 6.35235598150842e-05,
"loss": 2.3141,
"step": 171
},
{
"epoch": 0.19121734296831572,
"grad_norm": 0.9147389531135559,
"learning_rate": 6.313536890992935e-05,
"loss": 2.2724,
"step": 172
},
{
"epoch": 0.19232907170650362,
"grad_norm": 0.8791763782501221,
"learning_rate": 6.274632566950967e-05,
"loss": 2.2125,
"step": 173
},
{
"epoch": 0.1934408004446915,
"grad_norm": 0.9586495161056519,
"learning_rate": 6.235645533828349e-05,
"loss": 2.1094,
"step": 174
},
{
"epoch": 0.19455252918287938,
"grad_norm": 1.0605213642120361,
"learning_rate": 6.19657832143779e-05,
"loss": 2.2509,
"step": 175
},
{
"epoch": 0.19566425792106726,
"grad_norm": 1.155334234237671,
"learning_rate": 6.157433464794716e-05,
"loss": 2.2831,
"step": 176
},
{
"epoch": 0.19677598665925514,
"grad_norm": 0.8409937620162964,
"learning_rate": 6.118213503952779e-05,
"loss": 2.1029,
"step": 177
},
{
"epoch": 0.19788771539744301,
"grad_norm": 0.9668261408805847,
"learning_rate": 6.078920983839031e-05,
"loss": 2.3095,
"step": 178
},
{
"epoch": 0.1989994441356309,
"grad_norm": 1.3652583360671997,
"learning_rate": 6.0395584540887963e-05,
"loss": 2.3097,
"step": 179
},
{
"epoch": 0.2001111728738188,
"grad_norm": 1.2160142660140991,
"learning_rate": 6.0001284688802226e-05,
"loss": 2.367,
"step": 180
},
{
"epoch": 0.20122290161200668,
"grad_norm": 1.1758296489715576,
"learning_rate": 5.960633586768543e-05,
"loss": 2.2655,
"step": 181
},
{
"epoch": 0.20233463035019456,
"grad_norm": 1.1766093969345093,
"learning_rate": 5.921076370520058e-05,
"loss": 2.2498,
"step": 182
},
{
"epoch": 0.20344635908838243,
"grad_norm": 0.9961435794830322,
"learning_rate": 5.8814593869458455e-05,
"loss": 2.1775,
"step": 183
},
{
"epoch": 0.2045580878265703,
"grad_norm": 1.0376254320144653,
"learning_rate": 5.841785206735192e-05,
"loss": 2.2686,
"step": 184
},
{
"epoch": 0.2056698165647582,
"grad_norm": 0.9666380882263184,
"learning_rate": 5.8020564042888015e-05,
"loss": 2.0915,
"step": 185
},
{
"epoch": 0.20678154530294607,
"grad_norm": 1.1380987167358398,
"learning_rate": 5.762275557551727e-05,
"loss": 2.2868,
"step": 186
},
{
"epoch": 0.20789327404113397,
"grad_norm": 1.2181998491287231,
"learning_rate": 5.7224452478461064e-05,
"loss": 2.4482,
"step": 187
},
{
"epoch": 0.20900500277932185,
"grad_norm": 1.0468593835830688,
"learning_rate": 5.682568059703659e-05,
"loss": 2.1723,
"step": 188
},
{
"epoch": 0.21011673151750973,
"grad_norm": 0.961937665939331,
"learning_rate": 5.642646580697973e-05,
"loss": 2.2278,
"step": 189
},
{
"epoch": 0.2112284602556976,
"grad_norm": 1.102487564086914,
"learning_rate": 5.602683401276615e-05,
"loss": 2.339,
"step": 190
},
{
"epoch": 0.2123401889938855,
"grad_norm": 0.9763503670692444,
"learning_rate": 5.562681114593028e-05,
"loss": 2.1248,
"step": 191
},
{
"epoch": 0.21345191773207337,
"grad_norm": 1.0857285261154175,
"learning_rate": 5.522642316338268e-05,
"loss": 2.3992,
"step": 192
},
{
"epoch": 0.21456364647026124,
"grad_norm": 1.103461503982544,
"learning_rate": 5.482569604572576e-05,
"loss": 2.1701,
"step": 193
},
{
"epoch": 0.21567537520844915,
"grad_norm": 1.18154776096344,
"learning_rate": 5.442465579556793e-05,
"loss": 2.2224,
"step": 194
},
{
"epoch": 0.21678710394663703,
"grad_norm": 1.6554676294326782,
"learning_rate": 5.402332843583631e-05,
"loss": 2.2127,
"step": 195
},
{
"epoch": 0.2178988326848249,
"grad_norm": 1.4385900497436523,
"learning_rate": 5.3621740008088126e-05,
"loss": 2.3457,
"step": 196
},
{
"epoch": 0.21901056142301278,
"grad_norm": 1.1937305927276611,
"learning_rate": 5.321991657082097e-05,
"loss": 2.1697,
"step": 197
},
{
"epoch": 0.22012229016120066,
"grad_norm": 1.5197139978408813,
"learning_rate": 5.281788419778187e-05,
"loss": 2.3358,
"step": 198
},
{
"epoch": 0.22123401889938854,
"grad_norm": 1.266471028327942,
"learning_rate": 5.2415668976275355e-05,
"loss": 2.2003,
"step": 199
},
{
"epoch": 0.22234574763757642,
"grad_norm": 1.8889713287353516,
"learning_rate": 5.201329700547076e-05,
"loss": 2.3701,
"step": 200
},
{
"epoch": 0.22234574763757642,
"eval_loss": 2.5627048015594482,
"eval_runtime": 71.6301,
"eval_samples_per_second": 21.15,
"eval_steps_per_second": 5.291,
"step": 200
},
{
"epoch": 0.22345747637576432,
"grad_norm": 4.871993064880371,
"learning_rate": 5.161079439470866e-05,
"loss": 2.9347,
"step": 201
},
{
"epoch": 0.2245692051139522,
"grad_norm": 4.960667610168457,
"learning_rate": 5.1208187261806615e-05,
"loss": 2.9743,
"step": 202
},
{
"epoch": 0.22568093385214008,
"grad_norm": 4.213318824768066,
"learning_rate": 5.080550173136457e-05,
"loss": 2.9916,
"step": 203
},
{
"epoch": 0.22679266259032796,
"grad_norm": 3.499946355819702,
"learning_rate": 5.0402763933069496e-05,
"loss": 2.7203,
"step": 204
},
{
"epoch": 0.22790439132851584,
"grad_norm": 2.6555299758911133,
"learning_rate": 5e-05,
"loss": 2.6411,
"step": 205
},
{
"epoch": 0.22901612006670372,
"grad_norm": 2.1636953353881836,
"learning_rate": 4.9597236066930516e-05,
"loss": 2.7387,
"step": 206
},
{
"epoch": 0.2301278488048916,
"grad_norm": 1.7059534788131714,
"learning_rate": 4.919449826863544e-05,
"loss": 2.6006,
"step": 207
},
{
"epoch": 0.2312395775430795,
"grad_norm": 1.8288934230804443,
"learning_rate": 4.87918127381934e-05,
"loss": 2.559,
"step": 208
},
{
"epoch": 0.23235130628126738,
"grad_norm": 1.66895592212677,
"learning_rate": 4.8389205605291365e-05,
"loss": 2.6273,
"step": 209
},
{
"epoch": 0.23346303501945526,
"grad_norm": 1.2062842845916748,
"learning_rate": 4.798670299452926e-05,
"loss": 2.6314,
"step": 210
},
{
"epoch": 0.23457476375764313,
"grad_norm": 1.236952304840088,
"learning_rate": 4.758433102372466e-05,
"loss": 2.2492,
"step": 211
},
{
"epoch": 0.235686492495831,
"grad_norm": 1.0907583236694336,
"learning_rate": 4.7182115802218126e-05,
"loss": 2.3356,
"step": 212
},
{
"epoch": 0.2367982212340189,
"grad_norm": 1.1642088890075684,
"learning_rate": 4.678008342917903e-05,
"loss": 2.3069,
"step": 213
},
{
"epoch": 0.23790994997220677,
"grad_norm": 1.0130016803741455,
"learning_rate": 4.6378259991911886e-05,
"loss": 2.2744,
"step": 214
},
{
"epoch": 0.23902167871039467,
"grad_norm": 1.0717731714248657,
"learning_rate": 4.597667156416371e-05,
"loss": 2.2989,
"step": 215
},
{
"epoch": 0.24013340744858255,
"grad_norm": 0.8758122324943542,
"learning_rate": 4.5575344204432084e-05,
"loss": 2.2346,
"step": 216
},
{
"epoch": 0.24124513618677043,
"grad_norm": 0.7776920199394226,
"learning_rate": 4.5174303954274244e-05,
"loss": 2.2342,
"step": 217
},
{
"epoch": 0.2423568649249583,
"grad_norm": 0.858988344669342,
"learning_rate": 4.477357683661734e-05,
"loss": 2.3042,
"step": 218
},
{
"epoch": 0.2434685936631462,
"grad_norm": 0.902452290058136,
"learning_rate": 4.437318885406973e-05,
"loss": 2.2463,
"step": 219
},
{
"epoch": 0.24458032240133407,
"grad_norm": 0.8730572462081909,
"learning_rate": 4.397316598723385e-05,
"loss": 2.2071,
"step": 220
},
{
"epoch": 0.24569205113952194,
"grad_norm": 0.8203500509262085,
"learning_rate": 4.3573534193020274e-05,
"loss": 2.2388,
"step": 221
},
{
"epoch": 0.24680377987770985,
"grad_norm": 1.4247759580612183,
"learning_rate": 4.317431940296343e-05,
"loss": 2.1673,
"step": 222
},
{
"epoch": 0.24791550861589773,
"grad_norm": 0.8514419794082642,
"learning_rate": 4.277554752153895e-05,
"loss": 2.3399,
"step": 223
},
{
"epoch": 0.2490272373540856,
"grad_norm": 0.9166375398635864,
"learning_rate": 4.237724442448273e-05,
"loss": 2.1514,
"step": 224
},
{
"epoch": 0.2501389660922735,
"grad_norm": 0.8139379024505615,
"learning_rate": 4.197943595711198e-05,
"loss": 1.9978,
"step": 225
},
{
"epoch": 0.25125069483046136,
"grad_norm": 0.8882634043693542,
"learning_rate": 4.1582147932648074e-05,
"loss": 2.2634,
"step": 226
},
{
"epoch": 0.25236242356864924,
"grad_norm": 0.9146400690078735,
"learning_rate": 4.118540613054156e-05,
"loss": 2.5159,
"step": 227
},
{
"epoch": 0.2534741523068371,
"grad_norm": 0.9058739542961121,
"learning_rate": 4.078923629479943e-05,
"loss": 2.1295,
"step": 228
},
{
"epoch": 0.254585881045025,
"grad_norm": 0.8965374231338501,
"learning_rate": 4.039366413231458e-05,
"loss": 2.3373,
"step": 229
},
{
"epoch": 0.2556976097832129,
"grad_norm": 0.8202263712882996,
"learning_rate": 3.9998715311197785e-05,
"loss": 2.2355,
"step": 230
},
{
"epoch": 0.25680933852140075,
"grad_norm": 1.061989188194275,
"learning_rate": 3.960441545911204e-05,
"loss": 2.1224,
"step": 231
},
{
"epoch": 0.2579210672595887,
"grad_norm": 0.9839004874229431,
"learning_rate": 3.92107901616097e-05,
"loss": 2.2109,
"step": 232
},
{
"epoch": 0.25903279599777657,
"grad_norm": 0.9290953874588013,
"learning_rate": 3.8817864960472236e-05,
"loss": 2.1705,
"step": 233
},
{
"epoch": 0.26014452473596444,
"grad_norm": 0.8922295570373535,
"learning_rate": 3.842566535205286e-05,
"loss": 2.1385,
"step": 234
},
{
"epoch": 0.2612562534741523,
"grad_norm": 1.0526587963104248,
"learning_rate": 3.803421678562213e-05,
"loss": 2.3858,
"step": 235
},
{
"epoch": 0.2623679822123402,
"grad_norm": 1.066570520401001,
"learning_rate": 3.764354466171652e-05,
"loss": 2.2403,
"step": 236
},
{
"epoch": 0.2634797109505281,
"grad_norm": 1.4028855562210083,
"learning_rate": 3.725367433049033e-05,
"loss": 2.2443,
"step": 237
},
{
"epoch": 0.26459143968871596,
"grad_norm": 1.0442891120910645,
"learning_rate": 3.6864631090070655e-05,
"loss": 2.2474,
"step": 238
},
{
"epoch": 0.26570316842690384,
"grad_norm": 0.9756484031677246,
"learning_rate": 3.6476440184915815e-05,
"loss": 2.433,
"step": 239
},
{
"epoch": 0.2668148971650917,
"grad_norm": 1.0391199588775635,
"learning_rate": 3.608912680417737e-05,
"loss": 2.2412,
"step": 240
},
{
"epoch": 0.2679266259032796,
"grad_norm": 1.3023152351379395,
"learning_rate": 3.570271608006555e-05,
"loss": 2.1084,
"step": 241
},
{
"epoch": 0.26903835464146747,
"grad_norm": 1.0111302137374878,
"learning_rate": 3.531723308621847e-05,
"loss": 2.0772,
"step": 242
},
{
"epoch": 0.27015008337965535,
"grad_norm": 1.1569526195526123,
"learning_rate": 3.493270283607522e-05,
"loss": 2.1449,
"step": 243
},
{
"epoch": 0.2712618121178432,
"grad_norm": 1.4179155826568604,
"learning_rate": 3.4549150281252636e-05,
"loss": 2.2884,
"step": 244
},
{
"epoch": 0.2723735408560311,
"grad_norm": 1.1831965446472168,
"learning_rate": 3.4166600309926387e-05,
"loss": 2.2144,
"step": 245
},
{
"epoch": 0.27348526959421904,
"grad_norm": 1.2018048763275146,
"learning_rate": 3.3785077745215873e-05,
"loss": 2.1746,
"step": 246
},
{
"epoch": 0.2745969983324069,
"grad_norm": 1.330165982246399,
"learning_rate": 3.340460734357359e-05,
"loss": 2.0056,
"step": 247
},
{
"epoch": 0.2757087270705948,
"grad_norm": 1.8214839696884155,
"learning_rate": 3.3025213793178646e-05,
"loss": 2.2728,
"step": 248
},
{
"epoch": 0.2768204558087827,
"grad_norm": 1.4953075647354126,
"learning_rate": 3.264692171233485e-05,
"loss": 2.2458,
"step": 249
},
{
"epoch": 0.27793218454697055,
"grad_norm": 2.2444264888763428,
"learning_rate": 3.226975564787322e-05,
"loss": 2.1073,
"step": 250
},
{
"epoch": 0.27793218454697055,
"eval_loss": 2.419438123703003,
"eval_runtime": 71.5319,
"eval_samples_per_second": 21.179,
"eval_steps_per_second": 5.298,
"step": 250
},
{
"epoch": 0.27904391328515843,
"grad_norm": 3.008030891418457,
"learning_rate": 3.189374007355917e-05,
"loss": 2.903,
"step": 251
},
{
"epoch": 0.2801556420233463,
"grad_norm": 2.9889330863952637,
"learning_rate": 3.151889938850445e-05,
"loss": 2.7301,
"step": 252
},
{
"epoch": 0.2812673707615342,
"grad_norm": 3.3704285621643066,
"learning_rate": 3.114525791558398e-05,
"loss": 2.8056,
"step": 253
},
{
"epoch": 0.28237909949972206,
"grad_norm": 3.12371563911438,
"learning_rate": 3.0772839899857464e-05,
"loss": 2.6181,
"step": 254
},
{
"epoch": 0.28349082823790994,
"grad_norm": 2.795156478881836,
"learning_rate": 3.0401669506996256e-05,
"loss": 2.5891,
"step": 255
},
{
"epoch": 0.2846025569760978,
"grad_norm": 2.5517489910125732,
"learning_rate": 3.003177082171523e-05,
"loss": 2.5578,
"step": 256
},
{
"epoch": 0.2857142857142857,
"grad_norm": 2.131334066390991,
"learning_rate": 2.9663167846209998e-05,
"loss": 2.9474,
"step": 257
},
{
"epoch": 0.2868260144524736,
"grad_norm": 2.138378381729126,
"learning_rate": 2.9295884498599414e-05,
"loss": 2.5115,
"step": 258
},
{
"epoch": 0.28793774319066145,
"grad_norm": 1.636126160621643,
"learning_rate": 2.8929944611373554e-05,
"loss": 2.5024,
"step": 259
},
{
"epoch": 0.28904947192884933,
"grad_norm": 1.2389148473739624,
"learning_rate": 2.8565371929847284e-05,
"loss": 2.2814,
"step": 260
},
{
"epoch": 0.29016120066703727,
"grad_norm": 1.189886450767517,
"learning_rate": 2.8202190110619493e-05,
"loss": 2.4194,
"step": 261
},
{
"epoch": 0.29127292940522514,
"grad_norm": 1.0696269273757935,
"learning_rate": 2.784042272003794e-05,
"loss": 2.6107,
"step": 262
},
{
"epoch": 0.292384658143413,
"grad_norm": 0.9985766410827637,
"learning_rate": 2.7480093232670158e-05,
"loss": 2.5073,
"step": 263
},
{
"epoch": 0.2934963868816009,
"grad_norm": 0.9005837440490723,
"learning_rate": 2.712122502978024e-05,
"loss": 2.3042,
"step": 264
},
{
"epoch": 0.2946081156197888,
"grad_norm": 0.9823698997497559,
"learning_rate": 2.6763841397811573e-05,
"loss": 2.2399,
"step": 265
},
{
"epoch": 0.29571984435797666,
"grad_norm": 0.9442362189292908,
"learning_rate": 2.64079655268759e-05,
"loss": 2.4393,
"step": 266
},
{
"epoch": 0.29683157309616454,
"grad_norm": 0.831301748752594,
"learning_rate": 2.605362050924848e-05,
"loss": 2.2221,
"step": 267
},
{
"epoch": 0.2979433018343524,
"grad_norm": 0.9295997619628906,
"learning_rate": 2.57008293378697e-05,
"loss": 2.3975,
"step": 268
},
{
"epoch": 0.2990550305725403,
"grad_norm": 1.0188276767730713,
"learning_rate": 2.534961490485313e-05,
"loss": 2.4616,
"step": 269
},
{
"epoch": 0.30016675931072817,
"grad_norm": 0.9668642282485962,
"learning_rate": 2.500000000000001e-05,
"loss": 2.2432,
"step": 270
},
{
"epoch": 0.30127848804891605,
"grad_norm": 1.0286155939102173,
"learning_rate": 2.4652007309320498e-05,
"loss": 2.3834,
"step": 271
},
{
"epoch": 0.3023902167871039,
"grad_norm": 1.0591888427734375,
"learning_rate": 2.430565941356157e-05,
"loss": 2.3591,
"step": 272
},
{
"epoch": 0.3035019455252918,
"grad_norm": 1.0622197389602661,
"learning_rate": 2.3960978786741877e-05,
"loss": 2.4136,
"step": 273
},
{
"epoch": 0.3046136742634797,
"grad_norm": 0.9972384572029114,
"learning_rate": 2.361798779469336e-05,
"loss": 2.4249,
"step": 274
},
{
"epoch": 0.3057254030016676,
"grad_norm": 0.8855586647987366,
"learning_rate": 2.3276708693609943e-05,
"loss": 2.1604,
"step": 275
},
{
"epoch": 0.3068371317398555,
"grad_norm": 0.9017280340194702,
"learning_rate": 2.2937163628603435e-05,
"loss": 2.0953,
"step": 276
},
{
"epoch": 0.3079488604780434,
"grad_norm": 0.9258705377578735,
"learning_rate": 2.259937463226651e-05,
"loss": 2.1436,
"step": 277
},
{
"epoch": 0.30906058921623125,
"grad_norm": 0.8871088027954102,
"learning_rate": 2.2263363623243054e-05,
"loss": 2.3352,
"step": 278
},
{
"epoch": 0.31017231795441913,
"grad_norm": 0.9475242495536804,
"learning_rate": 2.192915240480596e-05,
"loss": 2.2745,
"step": 279
},
{
"epoch": 0.311284046692607,
"grad_norm": 0.9121392965316772,
"learning_rate": 2.1596762663442218e-05,
"loss": 2.0523,
"step": 280
},
{
"epoch": 0.3123957754307949,
"grad_norm": 0.9183974266052246,
"learning_rate": 2.1266215967445824e-05,
"loss": 2.1313,
"step": 281
},
{
"epoch": 0.31350750416898276,
"grad_norm": 1.0411345958709717,
"learning_rate": 2.0937533765518187e-05,
"loss": 2.5137,
"step": 282
},
{
"epoch": 0.31461923290717064,
"grad_norm": 0.9422702193260193,
"learning_rate": 2.061073738537635e-05,
"loss": 2.3598,
"step": 283
},
{
"epoch": 0.3157309616453585,
"grad_norm": 0.9007568359375,
"learning_rate": 2.0285848032369137e-05,
"loss": 2.2845,
"step": 284
},
{
"epoch": 0.3168426903835464,
"grad_norm": 0.895376980304718,
"learning_rate": 1.996288678810105e-05,
"loss": 2.1736,
"step": 285
},
{
"epoch": 0.3179544191217343,
"grad_norm": 0.9709522128105164,
"learning_rate": 1.9641874609064443e-05,
"loss": 2.2274,
"step": 286
},
{
"epoch": 0.31906614785992216,
"grad_norm": 1.032238245010376,
"learning_rate": 1.932283232527956e-05,
"loss": 2.2555,
"step": 287
},
{
"epoch": 0.32017787659811003,
"grad_norm": 1.008985161781311,
"learning_rate": 1.9005780638942982e-05,
"loss": 2.3412,
"step": 288
},
{
"epoch": 0.32128960533629797,
"grad_norm": 0.9746498465538025,
"learning_rate": 1.8690740123084316e-05,
"loss": 2.1347,
"step": 289
},
{
"epoch": 0.32240133407448585,
"grad_norm": 1.0789058208465576,
"learning_rate": 1.837773122023114e-05,
"loss": 2.2996,
"step": 290
},
{
"epoch": 0.3235130628126737,
"grad_norm": 1.0640016794204712,
"learning_rate": 1.8066774241082612e-05,
"loss": 2.1882,
"step": 291
},
{
"epoch": 0.3246247915508616,
"grad_norm": 1.2953685522079468,
"learning_rate": 1.7757889363191483e-05,
"loss": 2.3575,
"step": 292
},
{
"epoch": 0.3257365202890495,
"grad_norm": 1.0919634103775024,
"learning_rate": 1.745109662965481e-05,
"loss": 2.3441,
"step": 293
},
{
"epoch": 0.32684824902723736,
"grad_norm": 1.0896896123886108,
"learning_rate": 1.714641594781347e-05,
"loss": 2.4013,
"step": 294
},
{
"epoch": 0.32795997776542524,
"grad_norm": 1.1087121963500977,
"learning_rate": 1.684386708796025e-05,
"loss": 2.1645,
"step": 295
},
{
"epoch": 0.3290717065036131,
"grad_norm": 1.2468881607055664,
"learning_rate": 1.6543469682057106e-05,
"loss": 2.4229,
"step": 296
},
{
"epoch": 0.330183435241801,
"grad_norm": 1.247047781944275,
"learning_rate": 1.62452432224612e-05,
"loss": 2.4761,
"step": 297
},
{
"epoch": 0.33129516397998887,
"grad_norm": 1.446397066116333,
"learning_rate": 1.5949207060660138e-05,
"loss": 2.5412,
"step": 298
},
{
"epoch": 0.33240689271817675,
"grad_norm": 1.6580612659454346,
"learning_rate": 1.5655380406016235e-05,
"loss": 2.2901,
"step": 299
},
{
"epoch": 0.33351862145636463,
"grad_norm": 1.7994294166564941,
"learning_rate": 1.536378232452003e-05,
"loss": 2.4478,
"step": 300
},
{
"epoch": 0.33351862145636463,
"eval_loss": 2.3351998329162598,
"eval_runtime": 71.5628,
"eval_samples_per_second": 21.17,
"eval_steps_per_second": 5.296,
"step": 300
},
{
"epoch": 0.3346303501945525,
"grad_norm": 1.6746742725372314,
"learning_rate": 1.5074431737553157e-05,
"loss": 2.5023,
"step": 301
},
{
"epoch": 0.3357420789327404,
"grad_norm": 1.6056064367294312,
"learning_rate": 1.4787347420660541e-05,
"loss": 2.5727,
"step": 302
},
{
"epoch": 0.3368538076709283,
"grad_norm": 1.4742602109909058,
"learning_rate": 1.4502548002332088e-05,
"loss": 2.43,
"step": 303
},
{
"epoch": 0.3379655364091162,
"grad_norm": 1.530042052268982,
"learning_rate": 1.422005196279395e-05,
"loss": 2.5862,
"step": 304
},
{
"epoch": 0.3390772651473041,
"grad_norm": 1.5977087020874023,
"learning_rate": 1.3939877632809278e-05,
"loss": 2.5271,
"step": 305
},
{
"epoch": 0.34018899388549195,
"grad_norm": 1.583370327949524,
"learning_rate": 1.3662043192488849e-05,
"loss": 2.4107,
"step": 306
},
{
"epoch": 0.34130072262367983,
"grad_norm": 1.6869573593139648,
"learning_rate": 1.338656667011134e-05,
"loss": 2.6216,
"step": 307
},
{
"epoch": 0.3424124513618677,
"grad_norm": 1.4852731227874756,
"learning_rate": 1.3113465940953495e-05,
"loss": 2.5307,
"step": 308
},
{
"epoch": 0.3435241801000556,
"grad_norm": 1.2093337774276733,
"learning_rate": 1.2842758726130283e-05,
"loss": 2.2246,
"step": 309
},
{
"epoch": 0.34463590883824347,
"grad_norm": 1.3381179571151733,
"learning_rate": 1.257446259144494e-05,
"loss": 2.4152,
"step": 310
},
{
"epoch": 0.34574763757643134,
"grad_norm": 1.136741280555725,
"learning_rate": 1.2308594946249163e-05,
"loss": 2.5018,
"step": 311
},
{
"epoch": 0.3468593663146192,
"grad_norm": 0.9899182319641113,
"learning_rate": 1.204517304231343e-05,
"loss": 2.3935,
"step": 312
},
{
"epoch": 0.3479710950528071,
"grad_norm": 1.0916366577148438,
"learning_rate": 1.178421397270758e-05,
"loss": 2.5129,
"step": 313
},
{
"epoch": 0.349082823790995,
"grad_norm": 0.9697388410568237,
"learning_rate": 1.1525734670691701e-05,
"loss": 2.098,
"step": 314
},
{
"epoch": 0.35019455252918286,
"grad_norm": 0.952893078327179,
"learning_rate": 1.1269751908617277e-05,
"loss": 2.3895,
"step": 315
},
{
"epoch": 0.35130628126737073,
"grad_norm": 0.8646156191825867,
"learning_rate": 1.1016282296838887e-05,
"loss": 2.2197,
"step": 316
},
{
"epoch": 0.35241801000555867,
"grad_norm": 0.8228775858879089,
"learning_rate": 1.0765342282636416e-05,
"loss": 2.3013,
"step": 317
},
{
"epoch": 0.35352973874374655,
"grad_norm": 0.856402575969696,
"learning_rate": 1.0516948149147754e-05,
"loss": 2.4294,
"step": 318
},
{
"epoch": 0.3546414674819344,
"grad_norm": 1.03739333152771,
"learning_rate": 1.0271116014312293e-05,
"loss": 2.3878,
"step": 319
},
{
"epoch": 0.3557531962201223,
"grad_norm": 0.8302180767059326,
"learning_rate": 1.0027861829824952e-05,
"loss": 2.2219,
"step": 320
},
{
"epoch": 0.3568649249583102,
"grad_norm": 0.7932164072990417,
"learning_rate": 9.787201380101157e-06,
"loss": 2.2596,
"step": 321
},
{
"epoch": 0.35797665369649806,
"grad_norm": 0.8476454019546509,
"learning_rate": 9.549150281252633e-06,
"loss": 2.1147,
"step": 322
},
{
"epoch": 0.35908838243468594,
"grad_norm": 0.9964444637298584,
"learning_rate": 9.313723980074018e-06,
"loss": 2.3652,
"step": 323
},
{
"epoch": 0.3602001111728738,
"grad_norm": 1.0413466691970825,
"learning_rate": 9.080937753040646e-06,
"loss": 2.2266,
"step": 324
},
{
"epoch": 0.3613118399110617,
"grad_norm": 0.8853463530540466,
"learning_rate": 8.850806705317183e-06,
"loss": 2.2614,
"step": 325
},
{
"epoch": 0.3624235686492496,
"grad_norm": 0.7970969080924988,
"learning_rate": 8.623345769777514e-06,
"loss": 2.1004,
"step": 326
},
{
"epoch": 0.36353529738743745,
"grad_norm": 0.9213111996650696,
"learning_rate": 8.398569706035792e-06,
"loss": 2.0849,
"step": 327
},
{
"epoch": 0.36464702612562533,
"grad_norm": 0.9494043588638306,
"learning_rate": 8.176493099488663e-06,
"loss": 2.4137,
"step": 328
},
{
"epoch": 0.3657587548638132,
"grad_norm": 0.9899368286132812,
"learning_rate": 7.957130360368898e-06,
"loss": 2.301,
"step": 329
},
{
"epoch": 0.3668704836020011,
"grad_norm": 0.9502694606781006,
"learning_rate": 7.740495722810271e-06,
"loss": 2.2525,
"step": 330
},
{
"epoch": 0.367982212340189,
"grad_norm": 1.0422662496566772,
"learning_rate": 7.526603243923957e-06,
"loss": 2.2026,
"step": 331
},
{
"epoch": 0.3690939410783769,
"grad_norm": 1.1531425714492798,
"learning_rate": 7.315466802886401e-06,
"loss": 2.5027,
"step": 332
},
{
"epoch": 0.3702056698165648,
"grad_norm": 0.8836955428123474,
"learning_rate": 7.107100100038671e-06,
"loss": 2.1697,
"step": 333
},
{
"epoch": 0.37131739855475265,
"grad_norm": 0.9281957149505615,
"learning_rate": 6.901516655997536e-06,
"loss": 2.0896,
"step": 334
},
{
"epoch": 0.37242912729294053,
"grad_norm": 1.1078437566757202,
"learning_rate": 6.698729810778065e-06,
"loss": 2.1164,
"step": 335
},
{
"epoch": 0.3735408560311284,
"grad_norm": 1.1739754676818848,
"learning_rate": 6.498752722928042e-06,
"loss": 2.2655,
"step": 336
},
{
"epoch": 0.3746525847693163,
"grad_norm": 1.0183756351470947,
"learning_rate": 6.301598368674105e-06,
"loss": 2.1442,
"step": 337
},
{
"epoch": 0.37576431350750417,
"grad_norm": 1.3615624904632568,
"learning_rate": 6.107279541079769e-06,
"loss": 2.1463,
"step": 338
},
{
"epoch": 0.37687604224569204,
"grad_norm": 1.0861144065856934,
"learning_rate": 5.915808849215304e-06,
"loss": 2.2384,
"step": 339
},
{
"epoch": 0.3779877709838799,
"grad_norm": 1.1558014154434204,
"learning_rate": 5.727198717339511e-06,
"loss": 2.1577,
"step": 340
},
{
"epoch": 0.3790994997220678,
"grad_norm": 1.144195318222046,
"learning_rate": 5.54146138409355e-06,
"loss": 2.2819,
"step": 341
},
{
"epoch": 0.3802112284602557,
"grad_norm": 1.2792952060699463,
"learning_rate": 5.358608901706802e-06,
"loss": 2.2607,
"step": 342
},
{
"epoch": 0.38132295719844356,
"grad_norm": 1.2132809162139893,
"learning_rate": 5.178653135214812e-06,
"loss": 2.1884,
"step": 343
},
{
"epoch": 0.38243468593663144,
"grad_norm": 1.3561714887619019,
"learning_rate": 5.001605761689398e-06,
"loss": 2.2476,
"step": 344
},
{
"epoch": 0.38354641467481937,
"grad_norm": 1.3285537958145142,
"learning_rate": 4.827478269480895e-06,
"loss": 2.016,
"step": 345
},
{
"epoch": 0.38465814341300725,
"grad_norm": 1.4897688627243042,
"learning_rate": 4.65628195747273e-06,
"loss": 2.4268,
"step": 346
},
{
"epoch": 0.3857698721511951,
"grad_norm": 1.846487045288086,
"learning_rate": 4.488027934348271e-06,
"loss": 2.3771,
"step": 347
},
{
"epoch": 0.386881600889383,
"grad_norm": 1.8320105075836182,
"learning_rate": 4.322727117869951e-06,
"loss": 2.2541,
"step": 348
},
{
"epoch": 0.3879933296275709,
"grad_norm": 2.544713258743286,
"learning_rate": 4.16039023417088e-06,
"loss": 2.1886,
"step": 349
},
{
"epoch": 0.38910505836575876,
"grad_norm": 2.9872875213623047,
"learning_rate": 4.001027817058789e-06,
"loss": 2.288,
"step": 350
},
{
"epoch": 0.38910505836575876,
"eval_loss": 2.29748797416687,
"eval_runtime": 71.5498,
"eval_samples_per_second": 21.174,
"eval_steps_per_second": 5.297,
"step": 350
},
{
"epoch": 0.39021678710394664,
"grad_norm": 0.9002532362937927,
"learning_rate": 3.844650207332562e-06,
"loss": 2.4953,
"step": 351
},
{
"epoch": 0.3913285158421345,
"grad_norm": 0.9280316829681396,
"learning_rate": 3.691267552111183e-06,
"loss": 2.6538,
"step": 352
},
{
"epoch": 0.3924402445803224,
"grad_norm": 0.9478785395622253,
"learning_rate": 3.54088980417534e-06,
"loss": 2.5276,
"step": 353
},
{
"epoch": 0.3935519733185103,
"grad_norm": 0.9327703714370728,
"learning_rate": 3.393526721321616e-06,
"loss": 2.487,
"step": 354
},
{
"epoch": 0.39466370205669815,
"grad_norm": 0.8927820920944214,
"learning_rate": 3.249187865729264e-06,
"loss": 2.3213,
"step": 355
},
{
"epoch": 0.39577543079488603,
"grad_norm": 0.9866592288017273,
"learning_rate": 3.1078826033397843e-06,
"loss": 2.4739,
"step": 356
},
{
"epoch": 0.3968871595330739,
"grad_norm": 0.9074956774711609,
"learning_rate": 2.9696201032491434e-06,
"loss": 2.4302,
"step": 357
},
{
"epoch": 0.3979988882712618,
"grad_norm": 0.8887799382209778,
"learning_rate": 2.8344093371128424e-06,
"loss": 2.6652,
"step": 358
},
{
"epoch": 0.3991106170094497,
"grad_norm": 0.9303501844406128,
"learning_rate": 2.70225907856374e-06,
"loss": 2.3618,
"step": 359
},
{
"epoch": 0.4002223457476376,
"grad_norm": 0.8939053416252136,
"learning_rate": 2.573177902642726e-06,
"loss": 2.3751,
"step": 360
},
{
"epoch": 0.4013340744858255,
"grad_norm": 0.8913921117782593,
"learning_rate": 2.4471741852423237e-06,
"loss": 2.2483,
"step": 361
},
{
"epoch": 0.40244580322401335,
"grad_norm": 0.8503320813179016,
"learning_rate": 2.324256102563188e-06,
"loss": 2.2931,
"step": 362
},
{
"epoch": 0.40355753196220123,
"grad_norm": 0.9348843693733215,
"learning_rate": 2.204431630583548e-06,
"loss": 2.4732,
"step": 363
},
{
"epoch": 0.4046692607003891,
"grad_norm": 0.8464366793632507,
"learning_rate": 2.087708544541689e-06,
"loss": 2.1994,
"step": 364
},
{
"epoch": 0.405780989438577,
"grad_norm": 0.8891509175300598,
"learning_rate": 1.974094418431388e-06,
"loss": 2.3163,
"step": 365
},
{
"epoch": 0.40689271817676487,
"grad_norm": 0.8961646556854248,
"learning_rate": 1.8635966245104664e-06,
"loss": 2.2609,
"step": 366
},
{
"epoch": 0.40800444691495275,
"grad_norm": 0.8280013799667358,
"learning_rate": 1.7562223328224325e-06,
"loss": 2.1203,
"step": 367
},
{
"epoch": 0.4091161756531406,
"grad_norm": 0.8764577507972717,
"learning_rate": 1.6519785107311891e-06,
"loss": 2.1943,
"step": 368
},
{
"epoch": 0.4102279043913285,
"grad_norm": 0.8844929337501526,
"learning_rate": 1.5508719224689717e-06,
"loss": 2.3813,
"step": 369
},
{
"epoch": 0.4113396331295164,
"grad_norm": 0.9021992087364197,
"learning_rate": 1.4529091286973995e-06,
"loss": 2.2267,
"step": 370
},
{
"epoch": 0.41245136186770426,
"grad_norm": 0.8152535557746887,
"learning_rate": 1.358096486081778e-06,
"loss": 2.1442,
"step": 371
},
{
"epoch": 0.41356309060589214,
"grad_norm": 0.9210073947906494,
"learning_rate": 1.2664401468786114e-06,
"loss": 2.2703,
"step": 372
},
{
"epoch": 0.41467481934408007,
"grad_norm": 0.7856800556182861,
"learning_rate": 1.1779460585363944e-06,
"loss": 2.187,
"step": 373
},
{
"epoch": 0.41578654808226795,
"grad_norm": 0.9219398498535156,
"learning_rate": 1.0926199633097157e-06,
"loss": 2.538,
"step": 374
},
{
"epoch": 0.4168982768204558,
"grad_norm": 0.8674458265304565,
"learning_rate": 1.0104673978866164e-06,
"loss": 2.3901,
"step": 375
},
{
"epoch": 0.4180100055586437,
"grad_norm": 0.8765695095062256,
"learning_rate": 9.314936930293283e-07,
"loss": 2.1564,
"step": 376
},
{
"epoch": 0.4191217342968316,
"grad_norm": 0.7773668766021729,
"learning_rate": 8.557039732283944e-07,
"loss": 2.1513,
"step": 377
},
{
"epoch": 0.42023346303501946,
"grad_norm": 0.799339771270752,
"learning_rate": 7.83103156370113e-07,
"loss": 2.0931,
"step": 378
},
{
"epoch": 0.42134519177320734,
"grad_norm": 0.8551070094108582,
"learning_rate": 7.136959534174592e-07,
"loss": 2.0482,
"step": 379
},
{
"epoch": 0.4224569205113952,
"grad_norm": 0.8003242015838623,
"learning_rate": 6.474868681043578e-07,
"loss": 2.0851,
"step": 380
},
{
"epoch": 0.4235686492495831,
"grad_norm": 0.7685055732727051,
"learning_rate": 5.844801966434832e-07,
"loss": 1.9849,
"step": 381
},
{
"epoch": 0.424680377987771,
"grad_norm": 0.8753073215484619,
"learning_rate": 5.246800274474439e-07,
"loss": 2.1241,
"step": 382
},
{
"epoch": 0.42579210672595885,
"grad_norm": 0.8959077000617981,
"learning_rate": 4.680902408635335e-07,
"loss": 2.2865,
"step": 383
},
{
"epoch": 0.42690383546414673,
"grad_norm": 0.9518845677375793,
"learning_rate": 4.1471450892189846e-07,
"loss": 2.1737,
"step": 384
},
{
"epoch": 0.4280155642023346,
"grad_norm": 0.885158896446228,
"learning_rate": 3.6455629509730136e-07,
"loss": 2.0735,
"step": 385
},
{
"epoch": 0.4291272929405225,
"grad_norm": 0.9893671274185181,
"learning_rate": 3.1761885408435054e-07,
"loss": 2.2165,
"step": 386
},
{
"epoch": 0.4302390216787104,
"grad_norm": 0.8594589829444885,
"learning_rate": 2.7390523158633554e-07,
"loss": 2.1061,
"step": 387
},
{
"epoch": 0.4313507504168983,
"grad_norm": 0.9702785015106201,
"learning_rate": 2.334182641175686e-07,
"loss": 2.0657,
"step": 388
},
{
"epoch": 0.4324624791550862,
"grad_norm": 1.0460872650146484,
"learning_rate": 1.9616057881935436e-07,
"loss": 2.0916,
"step": 389
},
{
"epoch": 0.43357420789327406,
"grad_norm": 1.0313807725906372,
"learning_rate": 1.6213459328950352e-07,
"loss": 2.2682,
"step": 390
},
{
"epoch": 0.43468593663146193,
"grad_norm": 1.0293152332305908,
"learning_rate": 1.3134251542544774e-07,
"loss": 2.076,
"step": 391
},
{
"epoch": 0.4357976653696498,
"grad_norm": 1.0680193901062012,
"learning_rate": 1.0378634328099269e-07,
"loss": 2.0364,
"step": 392
},
{
"epoch": 0.4369093941078377,
"grad_norm": 1.0686061382293701,
"learning_rate": 7.946786493666647e-08,
"loss": 2.1607,
"step": 393
},
{
"epoch": 0.43802112284602557,
"grad_norm": 1.0303860902786255,
"learning_rate": 5.838865838366792e-08,
"loss": 2.1599,
"step": 394
},
{
"epoch": 0.43913285158421345,
"grad_norm": 1.2263742685317993,
"learning_rate": 4.055009142152067e-08,
"loss": 2.1796,
"step": 395
},
{
"epoch": 0.4402445803224013,
"grad_norm": 1.4119532108306885,
"learning_rate": 2.595332156925534e-08,
"loss": 2.2719,
"step": 396
},
{
"epoch": 0.4413563090605892,
"grad_norm": 1.4214668273925781,
"learning_rate": 1.4599295990352924e-08,
"loss": 2.3379,
"step": 397
},
{
"epoch": 0.4424680377987771,
"grad_norm": 1.72279691696167,
"learning_rate": 6.488751431266149e-09,
"loss": 2.2098,
"step": 398
},
{
"epoch": 0.44357976653696496,
"grad_norm": 1.8887008428573608,
"learning_rate": 1.622214173602199e-09,
"loss": 2.1835,
"step": 399
},
{
"epoch": 0.44469149527515284,
"grad_norm": 3.1898610591888428,
"learning_rate": 0.0,
"loss": 2.4357,
"step": 400
},
{
"epoch": 0.44469149527515284,
"eval_loss": 2.29616379737854,
"eval_runtime": 71.5597,
"eval_samples_per_second": 21.171,
"eval_steps_per_second": 5.296,
"step": 400
}
],
"logging_steps": 1,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.276333740621824e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}