crypty's picture
Upload folder using huggingface_hub
32b16c7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 110,
"global_step": 438,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00228310502283105,
"grad_norm": 49763.3828125,
"learning_rate": 1e-05,
"loss": 1.458,
"step": 1
},
{
"epoch": 0.00228310502283105,
"eval_loss": 1.3469293117523193,
"eval_runtime": 5.899,
"eval_samples_per_second": 16.952,
"eval_steps_per_second": 8.476,
"step": 1
},
{
"epoch": 0.0045662100456621,
"grad_norm": 47645.4765625,
"learning_rate": 2e-05,
"loss": 1.663,
"step": 2
},
{
"epoch": 0.00684931506849315,
"grad_norm": 53930.67578125,
"learning_rate": 3e-05,
"loss": 1.2375,
"step": 3
},
{
"epoch": 0.0091324200913242,
"grad_norm": 79495.359375,
"learning_rate": 4e-05,
"loss": 1.4099,
"step": 4
},
{
"epoch": 0.01141552511415525,
"grad_norm": 51649.53125,
"learning_rate": 5e-05,
"loss": 1.3848,
"step": 5
},
{
"epoch": 0.0136986301369863,
"grad_norm": 62501.76171875,
"learning_rate": 6e-05,
"loss": 1.3113,
"step": 6
},
{
"epoch": 0.01598173515981735,
"grad_norm": 56830.796875,
"learning_rate": 7e-05,
"loss": 1.394,
"step": 7
},
{
"epoch": 0.0182648401826484,
"grad_norm": 69029.3671875,
"learning_rate": 8e-05,
"loss": 1.1999,
"step": 8
},
{
"epoch": 0.02054794520547945,
"grad_norm": 77872.7421875,
"learning_rate": 9e-05,
"loss": 1.4121,
"step": 9
},
{
"epoch": 0.0228310502283105,
"grad_norm": 67018.5234375,
"learning_rate": 0.0001,
"loss": 1.5292,
"step": 10
},
{
"epoch": 0.02511415525114155,
"grad_norm": 52883.80078125,
"learning_rate": 0.00011000000000000002,
"loss": 1.1904,
"step": 11
},
{
"epoch": 0.0273972602739726,
"grad_norm": 89700.34375,
"learning_rate": 0.00012,
"loss": 1.6428,
"step": 12
},
{
"epoch": 0.02968036529680365,
"grad_norm": 70959.6875,
"learning_rate": 0.00013000000000000002,
"loss": 1.8412,
"step": 13
},
{
"epoch": 0.0319634703196347,
"grad_norm": 123958.625,
"learning_rate": 0.00014,
"loss": 1.3549,
"step": 14
},
{
"epoch": 0.03424657534246575,
"grad_norm": 55201.86328125,
"learning_rate": 0.00015000000000000001,
"loss": 0.717,
"step": 15
},
{
"epoch": 0.0365296803652968,
"grad_norm": 79495.921875,
"learning_rate": 0.00016,
"loss": 2.5554,
"step": 16
},
{
"epoch": 0.03881278538812785,
"grad_norm": 94823.359375,
"learning_rate": 0.00017,
"loss": 1.3731,
"step": 17
},
{
"epoch": 0.0410958904109589,
"grad_norm": 78739.7890625,
"learning_rate": 0.00018,
"loss": 0.9813,
"step": 18
},
{
"epoch": 0.04337899543378995,
"grad_norm": 70018.5234375,
"learning_rate": 0.00019,
"loss": 1.3944,
"step": 19
},
{
"epoch": 0.045662100456621,
"grad_norm": 93789.5625,
"learning_rate": 0.0002,
"loss": 1.3604,
"step": 20
},
{
"epoch": 0.04794520547945205,
"grad_norm": 69996.328125,
"learning_rate": 0.0001999971756719333,
"loss": 0.7975,
"step": 21
},
{
"epoch": 0.0502283105022831,
"grad_norm": 62440.68359375,
"learning_rate": 0.00019998870284726968,
"loss": 1.3632,
"step": 22
},
{
"epoch": 0.05251141552511415,
"grad_norm": 57081.87890625,
"learning_rate": 0.00019997458200460993,
"loss": 1.181,
"step": 23
},
{
"epoch": 0.0547945205479452,
"grad_norm": 68026.21875,
"learning_rate": 0.00019995481394159188,
"loss": 1.2883,
"step": 24
},
{
"epoch": 0.05707762557077625,
"grad_norm": 68773.109375,
"learning_rate": 0.0001999293997748454,
"loss": 1.209,
"step": 25
},
{
"epoch": 0.0593607305936073,
"grad_norm": 54953.078125,
"learning_rate": 0.00019989834093992945,
"loss": 1.2207,
"step": 26
},
{
"epoch": 0.06164383561643835,
"grad_norm": 68915.734375,
"learning_rate": 0.00019986163919125075,
"loss": 1.1395,
"step": 27
},
{
"epoch": 0.0639269406392694,
"grad_norm": 77723.328125,
"learning_rate": 0.00019981929660196492,
"loss": 1.4178,
"step": 28
},
{
"epoch": 0.06621004566210045,
"grad_norm": 57558.64453125,
"learning_rate": 0.0001997713155638592,
"loss": 1.3991,
"step": 29
},
{
"epoch": 0.0684931506849315,
"grad_norm": 57487.20703125,
"learning_rate": 0.00019971769878721743,
"loss": 1.2361,
"step": 30
},
{
"epoch": 0.07077625570776255,
"grad_norm": 56958.12109375,
"learning_rate": 0.000199658449300667,
"loss": 1.0886,
"step": 31
},
{
"epoch": 0.0730593607305936,
"grad_norm": 69826.890625,
"learning_rate": 0.00019959357045100764,
"loss": 1.2462,
"step": 32
},
{
"epoch": 0.07534246575342465,
"grad_norm": 83606.59375,
"learning_rate": 0.00019952306590302247,
"loss": 1.5512,
"step": 33
},
{
"epoch": 0.0776255707762557,
"grad_norm": 96047.1171875,
"learning_rate": 0.00019944693963927092,
"loss": 1.2642,
"step": 34
},
{
"epoch": 0.07990867579908675,
"grad_norm": 75340.171875,
"learning_rate": 0.00019936519595986394,
"loss": 1.3945,
"step": 35
},
{
"epoch": 0.0821917808219178,
"grad_norm": 53603.67578125,
"learning_rate": 0.00019927783948222084,
"loss": 0.8141,
"step": 36
},
{
"epoch": 0.08447488584474885,
"grad_norm": 60950.3359375,
"learning_rate": 0.00019918487514080865,
"loss": 1.1856,
"step": 37
},
{
"epoch": 0.0867579908675799,
"grad_norm": 57694.49609375,
"learning_rate": 0.00019908630818686338,
"loss": 0.5208,
"step": 38
},
{
"epoch": 0.08904109589041095,
"grad_norm": 83846.890625,
"learning_rate": 0.0001989821441880933,
"loss": 1.2401,
"step": 39
},
{
"epoch": 0.091324200913242,
"grad_norm": 60294.91796875,
"learning_rate": 0.00019887238902836448,
"loss": 1.3362,
"step": 40
},
{
"epoch": 0.09360730593607305,
"grad_norm": 55908.76953125,
"learning_rate": 0.00019875704890736853,
"loss": 1.0295,
"step": 41
},
{
"epoch": 0.0958904109589041,
"grad_norm": 79842.359375,
"learning_rate": 0.00019863613034027224,
"loss": 1.3764,
"step": 42
},
{
"epoch": 0.09817351598173515,
"grad_norm": 53915.5703125,
"learning_rate": 0.0001985096401573497,
"loss": 1.1399,
"step": 43
},
{
"epoch": 0.1004566210045662,
"grad_norm": 56798.44921875,
"learning_rate": 0.00019837758550359636,
"loss": 0.9945,
"step": 44
},
{
"epoch": 0.10273972602739725,
"grad_norm": 72487.9921875,
"learning_rate": 0.0001982399738383255,
"loss": 1.4328,
"step": 45
},
{
"epoch": 0.1050228310502283,
"grad_norm": 56317.5546875,
"learning_rate": 0.00019809681293474693,
"loss": 1.2217,
"step": 46
},
{
"epoch": 0.10730593607305935,
"grad_norm": 108634.3203125,
"learning_rate": 0.0001979481108795278,
"loss": 1.5159,
"step": 47
},
{
"epoch": 0.1095890410958904,
"grad_norm": 77436.7578125,
"learning_rate": 0.00019779387607233586,
"loss": 1.051,
"step": 48
},
{
"epoch": 0.11187214611872145,
"grad_norm": 61779.69921875,
"learning_rate": 0.00019763411722536502,
"loss": 1.2487,
"step": 49
},
{
"epoch": 0.1141552511415525,
"grad_norm": 74456.3359375,
"learning_rate": 0.00019746884336284317,
"loss": 1.5362,
"step": 50
},
{
"epoch": 0.11643835616438356,
"grad_norm": 65273.90625,
"learning_rate": 0.00019729806382052248,
"loss": 1.125,
"step": 51
},
{
"epoch": 0.1187214611872146,
"grad_norm": 74326.5390625,
"learning_rate": 0.00019712178824515212,
"loss": 1.4872,
"step": 52
},
{
"epoch": 0.12100456621004566,
"grad_norm": 66311.375,
"learning_rate": 0.00019694002659393305,
"loss": 1.3129,
"step": 53
},
{
"epoch": 0.1232876712328767,
"grad_norm": 93956.140625,
"learning_rate": 0.00019675278913395606,
"loss": 1.1963,
"step": 54
},
{
"epoch": 0.12557077625570776,
"grad_norm": 54108.7421875,
"learning_rate": 0.0001965600864416213,
"loss": 1.3549,
"step": 55
},
{
"epoch": 0.1278538812785388,
"grad_norm": 82672.5234375,
"learning_rate": 0.00019636192940204134,
"loss": 1.3415,
"step": 56
},
{
"epoch": 0.13013698630136986,
"grad_norm": 79705.0234375,
"learning_rate": 0.00019615832920842586,
"loss": 1.0711,
"step": 57
},
{
"epoch": 0.1324200913242009,
"grad_norm": 66569.171875,
"learning_rate": 0.00019594929736144976,
"loss": 1.5752,
"step": 58
},
{
"epoch": 0.13470319634703196,
"grad_norm": 54818.6953125,
"learning_rate": 0.0001957348456686032,
"loss": 1.2086,
"step": 59
},
{
"epoch": 0.136986301369863,
"grad_norm": 84023.5625,
"learning_rate": 0.00019551498624352496,
"loss": 1.2713,
"step": 60
},
{
"epoch": 0.13926940639269406,
"grad_norm": 58488.77734375,
"learning_rate": 0.00019528973150531787,
"loss": 1.1957,
"step": 61
},
{
"epoch": 0.1415525114155251,
"grad_norm": 59256.1328125,
"learning_rate": 0.00019505909417784754,
"loss": 1.1863,
"step": 62
},
{
"epoch": 0.14383561643835616,
"grad_norm": 58009.8359375,
"learning_rate": 0.00019482308728902356,
"loss": 1.0046,
"step": 63
},
{
"epoch": 0.1461187214611872,
"grad_norm": 58970.30859375,
"learning_rate": 0.00019458172417006347,
"loss": 1.3599,
"step": 64
},
{
"epoch": 0.14840182648401826,
"grad_norm": 75271.78125,
"learning_rate": 0.00019433501845473995,
"loss": 1.175,
"step": 65
},
{
"epoch": 0.1506849315068493,
"grad_norm": 60125.44140625,
"learning_rate": 0.00019408298407861042,
"loss": 1.2096,
"step": 66
},
{
"epoch": 0.15296803652968036,
"grad_norm": 62565.88671875,
"learning_rate": 0.00019382563527823026,
"loss": 1.0284,
"step": 67
},
{
"epoch": 0.1552511415525114,
"grad_norm": 64562.3359375,
"learning_rate": 0.00019356298659034817,
"loss": 1.1955,
"step": 68
},
{
"epoch": 0.15753424657534246,
"grad_norm": 61627.109375,
"learning_rate": 0.00019329505285108542,
"loss": 1.1498,
"step": 69
},
{
"epoch": 0.1598173515981735,
"grad_norm": 65598.3515625,
"learning_rate": 0.00019302184919509755,
"loss": 1.046,
"step": 70
},
{
"epoch": 0.16210045662100456,
"grad_norm": 56694.41015625,
"learning_rate": 0.00019274339105471971,
"loss": 0.6779,
"step": 71
},
{
"epoch": 0.1643835616438356,
"grad_norm": 82800.1015625,
"learning_rate": 0.00019245969415909465,
"loss": 1.2381,
"step": 72
},
{
"epoch": 0.16666666666666666,
"grad_norm": 44540.94140625,
"learning_rate": 0.00019217077453328449,
"loss": 1.0871,
"step": 73
},
{
"epoch": 0.1689497716894977,
"grad_norm": 62427.3125,
"learning_rate": 0.0001918766484973654,
"loss": 1.4182,
"step": 74
},
{
"epoch": 0.17123287671232876,
"grad_norm": 62395.83203125,
"learning_rate": 0.00019157733266550575,
"loss": 1.1391,
"step": 75
},
{
"epoch": 0.1735159817351598,
"grad_norm": 86479.0546875,
"learning_rate": 0.0001912728439450276,
"loss": 1.4769,
"step": 76
},
{
"epoch": 0.17579908675799086,
"grad_norm": 52077.796875,
"learning_rate": 0.00019096319953545185,
"loss": 0.9951,
"step": 77
},
{
"epoch": 0.1780821917808219,
"grad_norm": 65181.8671875,
"learning_rate": 0.0001906484169275263,
"loss": 1.1435,
"step": 78
},
{
"epoch": 0.18036529680365296,
"grad_norm": 61927.88671875,
"learning_rate": 0.00019032851390223812,
"loss": 1.3638,
"step": 79
},
{
"epoch": 0.182648401826484,
"grad_norm": 65557.1796875,
"learning_rate": 0.00019000350852980909,
"loss": 1.0751,
"step": 80
},
{
"epoch": 0.18493150684931506,
"grad_norm": 67026.625,
"learning_rate": 0.00018967341916867518,
"loss": 1.4392,
"step": 81
},
{
"epoch": 0.1872146118721461,
"grad_norm": 65608.7578125,
"learning_rate": 0.00018933826446444933,
"loss": 1.4204,
"step": 82
},
{
"epoch": 0.18949771689497716,
"grad_norm": 62439.32421875,
"learning_rate": 0.0001889980633488683,
"loss": 1.6256,
"step": 83
},
{
"epoch": 0.1917808219178082,
"grad_norm": 58315.05078125,
"learning_rate": 0.00018865283503872324,
"loss": 1.3327,
"step": 84
},
{
"epoch": 0.19406392694063926,
"grad_norm": 63276.6953125,
"learning_rate": 0.00018830259903477426,
"loss": 1.3005,
"step": 85
},
{
"epoch": 0.1963470319634703,
"grad_norm": 60068.3125,
"learning_rate": 0.0001879473751206489,
"loss": 1.3073,
"step": 86
},
{
"epoch": 0.19863013698630136,
"grad_norm": 60654.34375,
"learning_rate": 0.0001875871833617246,
"loss": 1.1668,
"step": 87
},
{
"epoch": 0.2009132420091324,
"grad_norm": 49492.6875,
"learning_rate": 0.0001872220441039952,
"loss": 0.8938,
"step": 88
},
{
"epoch": 0.20319634703196346,
"grad_norm": 82807.6953125,
"learning_rate": 0.0001868519779729218,
"loss": 1.014,
"step": 89
},
{
"epoch": 0.2054794520547945,
"grad_norm": 50824.89453125,
"learning_rate": 0.0001864770058722676,
"loss": 1.0941,
"step": 90
},
{
"epoch": 0.20776255707762556,
"grad_norm": 53105.984375,
"learning_rate": 0.00018609714898291718,
"loss": 0.7681,
"step": 91
},
{
"epoch": 0.2100456621004566,
"grad_norm": 57766.73046875,
"learning_rate": 0.00018571242876167996,
"loss": 1.0531,
"step": 92
},
{
"epoch": 0.21232876712328766,
"grad_norm": 66334.5625,
"learning_rate": 0.0001853228669400784,
"loss": 1.2699,
"step": 93
},
{
"epoch": 0.2146118721461187,
"grad_norm": 54520.6015625,
"learning_rate": 0.00018492848552312014,
"loss": 1.4723,
"step": 94
},
{
"epoch": 0.21689497716894976,
"grad_norm": 75962.671875,
"learning_rate": 0.00018452930678805536,
"loss": 1.379,
"step": 95
},
{
"epoch": 0.2191780821917808,
"grad_norm": 57191.44921875,
"learning_rate": 0.00018412535328311814,
"loss": 1.3189,
"step": 96
},
{
"epoch": 0.22146118721461186,
"grad_norm": 63262.0625,
"learning_rate": 0.00018371664782625287,
"loss": 0.9332,
"step": 97
},
{
"epoch": 0.2237442922374429,
"grad_norm": 55938.12890625,
"learning_rate": 0.00018330321350382544,
"loss": 1.3675,
"step": 98
},
{
"epoch": 0.22602739726027396,
"grad_norm": 48929.921875,
"learning_rate": 0.00018288507366931905,
"loss": 1.0751,
"step": 99
},
{
"epoch": 0.228310502283105,
"grad_norm": 60707.87890625,
"learning_rate": 0.00018246225194201517,
"loss": 1.2708,
"step": 100
},
{
"epoch": 0.23059360730593606,
"grad_norm": 59498.01171875,
"learning_rate": 0.00018203477220565912,
"loss": 1.3661,
"step": 101
},
{
"epoch": 0.2328767123287671,
"grad_norm": 52212.64453125,
"learning_rate": 0.00018160265860711134,
"loss": 0.9827,
"step": 102
},
{
"epoch": 0.23515981735159816,
"grad_norm": 47361.78515625,
"learning_rate": 0.00018116593555498307,
"loss": 1.0684,
"step": 103
},
{
"epoch": 0.2374429223744292,
"grad_norm": 63993.5078125,
"learning_rate": 0.0001807246277182578,
"loss": 1.2669,
"step": 104
},
{
"epoch": 0.23972602739726026,
"grad_norm": 55152.2578125,
"learning_rate": 0.0001802787600248977,
"loss": 0.9088,
"step": 105
},
{
"epoch": 0.2420091324200913,
"grad_norm": 49352.40625,
"learning_rate": 0.0001798283576604356,
"loss": 1.0416,
"step": 106
},
{
"epoch": 0.24429223744292236,
"grad_norm": 60283.234375,
"learning_rate": 0.0001793734460665523,
"loss": 1.3194,
"step": 107
},
{
"epoch": 0.2465753424657534,
"grad_norm": 46655.90234375,
"learning_rate": 0.00017891405093963938,
"loss": 0.8581,
"step": 108
},
{
"epoch": 0.24885844748858446,
"grad_norm": 65966.4609375,
"learning_rate": 0.0001784501982293479,
"loss": 0.9858,
"step": 109
},
{
"epoch": 0.2511415525114155,
"grad_norm": 72021.5703125,
"learning_rate": 0.00017798191413712243,
"loss": 1.0411,
"step": 110
},
{
"epoch": 0.2511415525114155,
"eval_loss": 1.1379607915878296,
"eval_runtime": 6.0407,
"eval_samples_per_second": 16.554,
"eval_steps_per_second": 8.277,
"step": 110
},
{
"epoch": 0.2534246575342466,
"grad_norm": 60144.625,
"learning_rate": 0.0001775092251147211,
"loss": 1.1175,
"step": 111
},
{
"epoch": 0.2557077625570776,
"grad_norm": 55304.96484375,
"learning_rate": 0.0001770321578627213,
"loss": 0.7432,
"step": 112
},
{
"epoch": 0.2579908675799087,
"grad_norm": 50044.91015625,
"learning_rate": 0.00017655073932901168,
"loss": 0.8335,
"step": 113
},
{
"epoch": 0.2602739726027397,
"grad_norm": 63828.10546875,
"learning_rate": 0.0001760649967072697,
"loss": 1.1305,
"step": 114
},
{
"epoch": 0.2625570776255708,
"grad_norm": 56665.87890625,
"learning_rate": 0.00017557495743542585,
"loss": 1.1719,
"step": 115
},
{
"epoch": 0.2648401826484018,
"grad_norm": 63755.87109375,
"learning_rate": 0.00017508064919411344,
"loss": 1.0547,
"step": 116
},
{
"epoch": 0.2671232876712329,
"grad_norm": 55144.74609375,
"learning_rate": 0.00017458209990510527,
"loss": 0.8684,
"step": 117
},
{
"epoch": 0.2694063926940639,
"grad_norm": 68380.3125,
"learning_rate": 0.00017407933772973637,
"loss": 1.1672,
"step": 118
},
{
"epoch": 0.271689497716895,
"grad_norm": 67942.5,
"learning_rate": 0.00017357239106731317,
"loss": 1.3715,
"step": 119
},
{
"epoch": 0.273972602739726,
"grad_norm": 50505.95703125,
"learning_rate": 0.00017306128855350942,
"loss": 0.9512,
"step": 120
},
{
"epoch": 0.2762557077625571,
"grad_norm": 56973.859375,
"learning_rate": 0.0001725460590587486,
"loss": 0.9059,
"step": 121
},
{
"epoch": 0.2785388127853881,
"grad_norm": 47352.71484375,
"learning_rate": 0.00017202673168657318,
"loss": 0.9492,
"step": 122
},
{
"epoch": 0.2808219178082192,
"grad_norm": 60938.4921875,
"learning_rate": 0.0001715033357720006,
"loss": 1.1549,
"step": 123
},
{
"epoch": 0.2831050228310502,
"grad_norm": 50557.2265625,
"learning_rate": 0.00017097590087986633,
"loss": 1.1143,
"step": 124
},
{
"epoch": 0.2853881278538813,
"grad_norm": 61211.25390625,
"learning_rate": 0.00017044445680315372,
"loss": 1.1829,
"step": 125
},
{
"epoch": 0.2876712328767123,
"grad_norm": 58093.75,
"learning_rate": 0.00016990903356131124,
"loss": 1.1329,
"step": 126
},
{
"epoch": 0.2899543378995434,
"grad_norm": 62758.921875,
"learning_rate": 0.00016936966139855663,
"loss": 0.8082,
"step": 127
},
{
"epoch": 0.2922374429223744,
"grad_norm": 55444.03515625,
"learning_rate": 0.00016882637078216868,
"loss": 1.0763,
"step": 128
},
{
"epoch": 0.2945205479452055,
"grad_norm": 45902.125,
"learning_rate": 0.0001682791924007661,
"loss": 0.5558,
"step": 129
},
{
"epoch": 0.2968036529680365,
"grad_norm": 67659.0546875,
"learning_rate": 0.00016772815716257412,
"loss": 0.8364,
"step": 130
},
{
"epoch": 0.2990867579908676,
"grad_norm": 58708.359375,
"learning_rate": 0.0001671732961936785,
"loss": 1.1031,
"step": 131
},
{
"epoch": 0.3013698630136986,
"grad_norm": 44826.8671875,
"learning_rate": 0.00016661464083626734,
"loss": 0.6981,
"step": 132
},
{
"epoch": 0.3036529680365297,
"grad_norm": 49845.3125,
"learning_rate": 0.00016605222264686086,
"loss": 0.932,
"step": 133
},
{
"epoch": 0.3059360730593607,
"grad_norm": 68260.5703125,
"learning_rate": 0.00016548607339452853,
"loss": 1.1092,
"step": 134
},
{
"epoch": 0.3082191780821918,
"grad_norm": 51835.33203125,
"learning_rate": 0.00016491622505909482,
"loss": 1.175,
"step": 135
},
{
"epoch": 0.3105022831050228,
"grad_norm": 47168.08203125,
"learning_rate": 0.00016434270982933273,
"loss": 1.1088,
"step": 136
},
{
"epoch": 0.3127853881278539,
"grad_norm": 65501.48828125,
"learning_rate": 0.0001637655601011454,
"loss": 1.0026,
"step": 137
},
{
"epoch": 0.3150684931506849,
"grad_norm": 58316.41015625,
"learning_rate": 0.00016318480847573642,
"loss": 1.073,
"step": 138
},
{
"epoch": 0.317351598173516,
"grad_norm": 47404.8125,
"learning_rate": 0.00016260048775776804,
"loss": 0.9182,
"step": 139
},
{
"epoch": 0.319634703196347,
"grad_norm": 67868.96875,
"learning_rate": 0.00016201263095350833,
"loss": 1.2406,
"step": 140
},
{
"epoch": 0.3219178082191781,
"grad_norm": 62392.62109375,
"learning_rate": 0.0001614212712689668,
"loss": 0.6007,
"step": 141
},
{
"epoch": 0.3242009132420091,
"grad_norm": 44980.81640625,
"learning_rate": 0.00016082644210801844,
"loss": 0.9601,
"step": 142
},
{
"epoch": 0.3264840182648402,
"grad_norm": 53141.99609375,
"learning_rate": 0.00016022817707051724,
"loss": 0.7017,
"step": 143
},
{
"epoch": 0.3287671232876712,
"grad_norm": 54526.64453125,
"learning_rate": 0.00015962650995039783,
"loss": 1.1726,
"step": 144
},
{
"epoch": 0.3310502283105023,
"grad_norm": 60199.36328125,
"learning_rate": 0.00015902147473376694,
"loss": 1.0703,
"step": 145
},
{
"epoch": 0.3333333333333333,
"grad_norm": 62003.74609375,
"learning_rate": 0.00015841310559698343,
"loss": 1.384,
"step": 146
},
{
"epoch": 0.3356164383561644,
"grad_norm": 74924.28125,
"learning_rate": 0.0001578014369047279,
"loss": 1.4119,
"step": 147
},
{
"epoch": 0.3378995433789954,
"grad_norm": 53506.546875,
"learning_rate": 0.00015718650320806142,
"loss": 1.1047,
"step": 148
},
{
"epoch": 0.3401826484018265,
"grad_norm": 95636.2578125,
"learning_rate": 0.00015656833924247398,
"loss": 1.2457,
"step": 149
},
{
"epoch": 0.3424657534246575,
"grad_norm": 84311.3671875,
"learning_rate": 0.00015594697992592232,
"loss": 1.8571,
"step": 150
},
{
"epoch": 0.3447488584474886,
"grad_norm": 49623.90625,
"learning_rate": 0.00015532246035685756,
"loss": 0.8779,
"step": 151
},
{
"epoch": 0.3470319634703196,
"grad_norm": 75495.09375,
"learning_rate": 0.00015469481581224272,
"loss": 1.1597,
"step": 152
},
{
"epoch": 0.3493150684931507,
"grad_norm": 61961.66796875,
"learning_rate": 0.00015406408174555976,
"loss": 1.1428,
"step": 153
},
{
"epoch": 0.3515981735159817,
"grad_norm": 63387.83203125,
"learning_rate": 0.0001534302937848073,
"loss": 1.2391,
"step": 154
},
{
"epoch": 0.3538812785388128,
"grad_norm": 59753.67578125,
"learning_rate": 0.00015279348773048786,
"loss": 1.6021,
"step": 155
},
{
"epoch": 0.3561643835616438,
"grad_norm": 61863.0078125,
"learning_rate": 0.00015215369955358566,
"loss": 1.241,
"step": 156
},
{
"epoch": 0.3584474885844749,
"grad_norm": 57081.796875,
"learning_rate": 0.0001515109653935348,
"loss": 1.1971,
"step": 157
},
{
"epoch": 0.3607305936073059,
"grad_norm": 71862.75,
"learning_rate": 0.00015086532155617784,
"loss": 1.1196,
"step": 158
},
{
"epoch": 0.363013698630137,
"grad_norm": 53156.65234375,
"learning_rate": 0.00015021680451171498,
"loss": 1.1128,
"step": 159
},
{
"epoch": 0.365296803652968,
"grad_norm": 65439.45703125,
"learning_rate": 0.00014956545089264407,
"loss": 1.1221,
"step": 160
},
{
"epoch": 0.3675799086757991,
"grad_norm": 61590.7734375,
"learning_rate": 0.0001489112974916912,
"loss": 1.1785,
"step": 161
},
{
"epoch": 0.3698630136986301,
"grad_norm": 44939.07421875,
"learning_rate": 0.00014825438125973264,
"loss": 1.1794,
"step": 162
},
{
"epoch": 0.3721461187214612,
"grad_norm": 52118.67578125,
"learning_rate": 0.00014759473930370736,
"loss": 0.9673,
"step": 163
},
{
"epoch": 0.3744292237442922,
"grad_norm": 56795.2734375,
"learning_rate": 0.0001469324088845212,
"loss": 1.0183,
"step": 164
},
{
"epoch": 0.3767123287671233,
"grad_norm": 54740.015625,
"learning_rate": 0.00014626742741494206,
"loss": 1.2797,
"step": 165
},
{
"epoch": 0.3789954337899543,
"grad_norm": 54072.75,
"learning_rate": 0.00014559983245748638,
"loss": 1.1121,
"step": 166
},
{
"epoch": 0.3812785388127854,
"grad_norm": 63319.68359375,
"learning_rate": 0.00014492966172229777,
"loss": 1.1224,
"step": 167
},
{
"epoch": 0.3835616438356164,
"grad_norm": 70814.8828125,
"learning_rate": 0.00014425695306501658,
"loss": 1.5157,
"step": 168
},
{
"epoch": 0.3858447488584475,
"grad_norm": 57420.37890625,
"learning_rate": 0.00014358174448464154,
"loss": 1.213,
"step": 169
},
{
"epoch": 0.3881278538812785,
"grad_norm": 53243.4375,
"learning_rate": 0.00014290407412138366,
"loss": 1.3539,
"step": 170
},
{
"epoch": 0.3904109589041096,
"grad_norm": 55964.484375,
"learning_rate": 0.00014222398025451135,
"loss": 1.0541,
"step": 171
},
{
"epoch": 0.3926940639269406,
"grad_norm": 60372.61328125,
"learning_rate": 0.00014154150130018866,
"loss": 0.9876,
"step": 172
},
{
"epoch": 0.3949771689497717,
"grad_norm": 78811.0546875,
"learning_rate": 0.0001408566758093048,
"loss": 1.1479,
"step": 173
},
{
"epoch": 0.3972602739726027,
"grad_norm": 68956.203125,
"learning_rate": 0.00014016954246529696,
"loss": 1.3174,
"step": 174
},
{
"epoch": 0.3995433789954338,
"grad_norm": 54806.1328125,
"learning_rate": 0.00013948014008196487,
"loss": 1.1019,
"step": 175
},
{
"epoch": 0.4018264840182648,
"grad_norm": 62154.96875,
"learning_rate": 0.0001387885076012785,
"loss": 1.2393,
"step": 176
},
{
"epoch": 0.4041095890410959,
"grad_norm": 53926.55078125,
"learning_rate": 0.00013809468409117846,
"loss": 1.1682,
"step": 177
},
{
"epoch": 0.4063926940639269,
"grad_norm": 55390.63671875,
"learning_rate": 0.00013739870874336898,
"loss": 1.0232,
"step": 178
},
{
"epoch": 0.408675799086758,
"grad_norm": 60079.51953125,
"learning_rate": 0.00013670062087110422,
"loss": 1.2492,
"step": 179
},
{
"epoch": 0.410958904109589,
"grad_norm": 68658.15625,
"learning_rate": 0.00013600045990696762,
"loss": 1.432,
"step": 180
},
{
"epoch": 0.4132420091324201,
"grad_norm": 79412.75,
"learning_rate": 0.0001352982654006444,
"loss": 1.3679,
"step": 181
},
{
"epoch": 0.4155251141552511,
"grad_norm": 76066.828125,
"learning_rate": 0.00013459407701668763,
"loss": 1.3032,
"step": 182
},
{
"epoch": 0.4178082191780822,
"grad_norm": 74717.1328125,
"learning_rate": 0.00013388793453227767,
"loss": 1.1265,
"step": 183
},
{
"epoch": 0.4200913242009132,
"grad_norm": 83329.0859375,
"learning_rate": 0.0001331798778349752,
"loss": 1.5738,
"step": 184
},
{
"epoch": 0.4223744292237443,
"grad_norm": 56585.3203125,
"learning_rate": 0.00013246994692046836,
"loss": 1.1222,
"step": 185
},
{
"epoch": 0.4246575342465753,
"grad_norm": 66546.9140625,
"learning_rate": 0.00013175818189031327,
"loss": 1.1622,
"step": 186
},
{
"epoch": 0.4269406392694064,
"grad_norm": 89381.2734375,
"learning_rate": 0.00013104462294966896,
"loss": 1.0249,
"step": 187
},
{
"epoch": 0.4292237442922374,
"grad_norm": 61278.7734375,
"learning_rate": 0.00013032931040502627,
"loss": 0.9255,
"step": 188
},
{
"epoch": 0.4315068493150685,
"grad_norm": 73469.1484375,
"learning_rate": 0.00012961228466193116,
"loss": 1.2164,
"step": 189
},
{
"epoch": 0.4337899543378995,
"grad_norm": 56180.0546875,
"learning_rate": 0.00012889358622270223,
"loss": 0.8783,
"step": 190
},
{
"epoch": 0.4360730593607306,
"grad_norm": 55383.29296875,
"learning_rate": 0.00012817325568414297,
"loss": 1.0513,
"step": 191
},
{
"epoch": 0.4383561643835616,
"grad_norm": 67318.65625,
"learning_rate": 0.00012745133373524853,
"loss": 1.2075,
"step": 192
},
{
"epoch": 0.4406392694063927,
"grad_norm": 41490.49609375,
"learning_rate": 0.0001267278611549073,
"loss": 0.4758,
"step": 193
},
{
"epoch": 0.4429223744292237,
"grad_norm": 56853.50390625,
"learning_rate": 0.00012600287880959763,
"loss": 1.2679,
"step": 194
},
{
"epoch": 0.4452054794520548,
"grad_norm": 34868.66796875,
"learning_rate": 0.0001252764276510792,
"loss": 0.5454,
"step": 195
},
{
"epoch": 0.4474885844748858,
"grad_norm": 53731.953125,
"learning_rate": 0.00012454854871407994,
"loss": 1.2682,
"step": 196
},
{
"epoch": 0.4497716894977169,
"grad_norm": 57530.828125,
"learning_rate": 0.00012381928311397806,
"loss": 1.1872,
"step": 197
},
{
"epoch": 0.4520547945205479,
"grad_norm": 59639.96875,
"learning_rate": 0.0001230886720444796,
"loss": 1.2913,
"step": 198
},
{
"epoch": 0.454337899543379,
"grad_norm": 76171.09375,
"learning_rate": 0.00012235675677529158,
"loss": 1.314,
"step": 199
},
{
"epoch": 0.45662100456621,
"grad_norm": 73176.8984375,
"learning_rate": 0.00012162357864979072,
"loss": 0.9921,
"step": 200
},
{
"epoch": 0.4589041095890411,
"grad_norm": 44535.90625,
"learning_rate": 0.00012088917908268821,
"loss": 0.9701,
"step": 201
},
{
"epoch": 0.4611872146118721,
"grad_norm": 60352.7734375,
"learning_rate": 0.00012015359955769021,
"loss": 1.0526,
"step": 202
},
{
"epoch": 0.4634703196347032,
"grad_norm": 60158.63671875,
"learning_rate": 0.00011941688162515467,
"loss": 0.9069,
"step": 203
},
{
"epoch": 0.4657534246575342,
"grad_norm": 56008.59375,
"learning_rate": 0.00011867906689974428,
"loss": 1.1581,
"step": 204
},
{
"epoch": 0.4680365296803653,
"grad_norm": 60848.1328125,
"learning_rate": 0.00011794019705807584,
"loss": 1.4732,
"step": 205
},
{
"epoch": 0.4703196347031963,
"grad_norm": 73643.15625,
"learning_rate": 0.00011720031383636585,
"loss": 1.4934,
"step": 206
},
{
"epoch": 0.4726027397260274,
"grad_norm": 60684.5625,
"learning_rate": 0.00011645945902807341,
"loss": 0.9241,
"step": 207
},
{
"epoch": 0.4748858447488584,
"grad_norm": 56336.984375,
"learning_rate": 0.00011571767448153901,
"loss": 0.7535,
"step": 208
},
{
"epoch": 0.4771689497716895,
"grad_norm": 57490.16015625,
"learning_rate": 0.00011497500209762102,
"loss": 1.0042,
"step": 209
},
{
"epoch": 0.4794520547945205,
"grad_norm": 58042.80859375,
"learning_rate": 0.00011423148382732853,
"loss": 1.0596,
"step": 210
},
{
"epoch": 0.4817351598173516,
"grad_norm": 72169.4375,
"learning_rate": 0.00011348716166945195,
"loss": 1.259,
"step": 211
},
{
"epoch": 0.4840182648401826,
"grad_norm": 60079.5859375,
"learning_rate": 0.0001127420776681905,
"loss": 1.189,
"step": 212
},
{
"epoch": 0.4863013698630137,
"grad_norm": 67140.265625,
"learning_rate": 0.00011199627391077732,
"loss": 1.4022,
"step": 213
},
{
"epoch": 0.4885844748858447,
"grad_norm": 52046.58984375,
"learning_rate": 0.00011124979252510208,
"loss": 1.3088,
"step": 214
},
{
"epoch": 0.4908675799086758,
"grad_norm": 61540.34375,
"learning_rate": 0.0001105026756773314,
"loss": 1.1,
"step": 215
},
{
"epoch": 0.4931506849315068,
"grad_norm": 54976.57421875,
"learning_rate": 0.00010975496556952682,
"loss": 0.9573,
"step": 216
},
{
"epoch": 0.4954337899543379,
"grad_norm": 47444.56640625,
"learning_rate": 0.00010900670443726135,
"loss": 0.7793,
"step": 217
},
{
"epoch": 0.4977168949771689,
"grad_norm": 114338.7734375,
"learning_rate": 0.00010825793454723325,
"loss": 1.4479,
"step": 218
},
{
"epoch": 0.5,
"grad_norm": 55353.40625,
"learning_rate": 0.00010750869819487883,
"loss": 1.1219,
"step": 219
},
{
"epoch": 0.502283105022831,
"grad_norm": 56450.48046875,
"learning_rate": 0.00010675903770198333,
"loss": 1.143,
"step": 220
},
{
"epoch": 0.502283105022831,
"eval_loss": 1.120017647743225,
"eval_runtime": 6.072,
"eval_samples_per_second": 16.469,
"eval_steps_per_second": 8.235,
"step": 220
},
{
"epoch": 0.5045662100456622,
"grad_norm": 60988.2421875,
"learning_rate": 0.00010600899541429004,
"loss": 1.1982,
"step": 221
},
{
"epoch": 0.5068493150684932,
"grad_norm": 66918.2578125,
"learning_rate": 0.00010525861369910877,
"loss": 1.2181,
"step": 222
},
{
"epoch": 0.5091324200913242,
"grad_norm": 79952.6640625,
"learning_rate": 0.00010450793494292224,
"loss": 1.1986,
"step": 223
},
{
"epoch": 0.5114155251141552,
"grad_norm": 66536.671875,
"learning_rate": 0.00010375700154899208,
"loss": 1.5032,
"step": 224
},
{
"epoch": 0.5136986301369864,
"grad_norm": 71625.8984375,
"learning_rate": 0.00010300585593496348,
"loss": 1.3998,
"step": 225
},
{
"epoch": 0.5159817351598174,
"grad_norm": 66669.5703125,
"learning_rate": 0.00010225454053046921,
"loss": 0.8312,
"step": 226
},
{
"epoch": 0.5182648401826484,
"grad_norm": 39306.3359375,
"learning_rate": 0.00010150309777473306,
"loss": 0.6047,
"step": 227
},
{
"epoch": 0.5205479452054794,
"grad_norm": 53523.4609375,
"learning_rate": 0.0001007515701141722,
"loss": 1.1629,
"step": 228
},
{
"epoch": 0.5228310502283106,
"grad_norm": 74079.578125,
"learning_rate": 0.0001,
"loss": 1.0456,
"step": 229
},
{
"epoch": 0.5251141552511416,
"grad_norm": 62511.48046875,
"learning_rate": 9.924842988582782e-05,
"loss": 1.0753,
"step": 230
},
{
"epoch": 0.5273972602739726,
"grad_norm": 53424.6328125,
"learning_rate": 9.849690222526698e-05,
"loss": 1.0496,
"step": 231
},
{
"epoch": 0.5296803652968036,
"grad_norm": 76170.8671875,
"learning_rate": 9.77454594695308e-05,
"loss": 1.291,
"step": 232
},
{
"epoch": 0.5319634703196348,
"grad_norm": 54918.74609375,
"learning_rate": 9.699414406503654e-05,
"loss": 1.0644,
"step": 233
},
{
"epoch": 0.5342465753424658,
"grad_norm": 90057.484375,
"learning_rate": 9.624299845100795e-05,
"loss": 1.6448,
"step": 234
},
{
"epoch": 0.5365296803652968,
"grad_norm": 50608.3125,
"learning_rate": 9.549206505707777e-05,
"loss": 1.0954,
"step": 235
},
{
"epoch": 0.5388127853881278,
"grad_norm": 75716.2109375,
"learning_rate": 9.474138630089124e-05,
"loss": 1.2342,
"step": 236
},
{
"epoch": 0.541095890410959,
"grad_norm": 43616.71875,
"learning_rate": 9.399100458570997e-05,
"loss": 0.8373,
"step": 237
},
{
"epoch": 0.54337899543379,
"grad_norm": 51082.984375,
"learning_rate": 9.324096229801674e-05,
"loss": 1.0579,
"step": 238
},
{
"epoch": 0.545662100456621,
"grad_norm": 64326.8359375,
"learning_rate": 9.249130180512118e-05,
"loss": 1.1863,
"step": 239
},
{
"epoch": 0.547945205479452,
"grad_norm": 63323.40234375,
"learning_rate": 9.174206545276677e-05,
"loss": 1.1812,
"step": 240
},
{
"epoch": 0.5502283105022832,
"grad_norm": 57428.61328125,
"learning_rate": 9.099329556273866e-05,
"loss": 1.0751,
"step": 241
},
{
"epoch": 0.5525114155251142,
"grad_norm": 78367.109375,
"learning_rate": 9.024503443047319e-05,
"loss": 1.0871,
"step": 242
},
{
"epoch": 0.5547945205479452,
"grad_norm": 56003.375,
"learning_rate": 8.949732432266866e-05,
"loss": 1.2125,
"step": 243
},
{
"epoch": 0.5570776255707762,
"grad_norm": 50056.81640625,
"learning_rate": 8.875020747489794e-05,
"loss": 1.1738,
"step": 244
},
{
"epoch": 0.5593607305936074,
"grad_norm": 54685.7265625,
"learning_rate": 8.800372608922271e-05,
"loss": 1.2343,
"step": 245
},
{
"epoch": 0.5616438356164384,
"grad_norm": 75873.0,
"learning_rate": 8.72579223318095e-05,
"loss": 1.4155,
"step": 246
},
{
"epoch": 0.5639269406392694,
"grad_norm": 73813.1171875,
"learning_rate": 8.651283833054809e-05,
"loss": 1.3641,
"step": 247
},
{
"epoch": 0.5662100456621004,
"grad_norm": 52799.9140625,
"learning_rate": 8.57685161726715e-05,
"loss": 1.0217,
"step": 248
},
{
"epoch": 0.5684931506849316,
"grad_norm": 53224.50390625,
"learning_rate": 8.5024997902379e-05,
"loss": 1.1589,
"step": 249
},
{
"epoch": 0.5707762557077626,
"grad_norm": 101657.953125,
"learning_rate": 8.428232551846101e-05,
"loss": 1.5682,
"step": 250
},
{
"epoch": 0.5730593607305936,
"grad_norm": 61659.9453125,
"learning_rate": 8.35405409719266e-05,
"loss": 1.0378,
"step": 251
},
{
"epoch": 0.5753424657534246,
"grad_norm": 56497.0859375,
"learning_rate": 8.279968616363418e-05,
"loss": 1.2885,
"step": 252
},
{
"epoch": 0.5776255707762558,
"grad_norm": 64090.8359375,
"learning_rate": 8.205980294192421e-05,
"loss": 1.3739,
"step": 253
},
{
"epoch": 0.5799086757990868,
"grad_norm": 56467.49609375,
"learning_rate": 8.132093310025571e-05,
"loss": 1.0083,
"step": 254
},
{
"epoch": 0.5821917808219178,
"grad_norm": 66332.6328125,
"learning_rate": 8.058311837484535e-05,
"loss": 1.2237,
"step": 255
},
{
"epoch": 0.5844748858447488,
"grad_norm": 68149.140625,
"learning_rate": 7.984640044230983e-05,
"loss": 0.8762,
"step": 256
},
{
"epoch": 0.58675799086758,
"grad_norm": 56869.44140625,
"learning_rate": 7.911082091731181e-05,
"loss": 1.4265,
"step": 257
},
{
"epoch": 0.589041095890411,
"grad_norm": 66520.6640625,
"learning_rate": 7.837642135020929e-05,
"loss": 1.3615,
"step": 258
},
{
"epoch": 0.591324200913242,
"grad_norm": 50649.890625,
"learning_rate": 7.764324322470841e-05,
"loss": 1.1493,
"step": 259
},
{
"epoch": 0.593607305936073,
"grad_norm": 55484.89453125,
"learning_rate": 7.691132795552043e-05,
"loss": 1.125,
"step": 260
},
{
"epoch": 0.5958904109589042,
"grad_norm": 69880.4765625,
"learning_rate": 7.618071688602199e-05,
"loss": 1.0974,
"step": 261
},
{
"epoch": 0.5981735159817352,
"grad_norm": 69534.0234375,
"learning_rate": 7.54514512859201e-05,
"loss": 1.4658,
"step": 262
},
{
"epoch": 0.6004566210045662,
"grad_norm": 63003.56640625,
"learning_rate": 7.472357234892082e-05,
"loss": 1.3326,
"step": 263
},
{
"epoch": 0.6027397260273972,
"grad_norm": 48729.6796875,
"learning_rate": 7.399712119040238e-05,
"loss": 0.9631,
"step": 264
},
{
"epoch": 0.6050228310502284,
"grad_norm": 53327.13671875,
"learning_rate": 7.327213884509272e-05,
"loss": 0.8053,
"step": 265
},
{
"epoch": 0.6073059360730594,
"grad_norm": 56525.19921875,
"learning_rate": 7.254866626475152e-05,
"loss": 0.9795,
"step": 266
},
{
"epoch": 0.6095890410958904,
"grad_norm": 54734.92578125,
"learning_rate": 7.182674431585704e-05,
"loss": 1.0737,
"step": 267
},
{
"epoch": 0.6118721461187214,
"grad_norm": 62297.49609375,
"learning_rate": 7.110641377729778e-05,
"loss": 1.3582,
"step": 268
},
{
"epoch": 0.6141552511415526,
"grad_norm": 69470.6484375,
"learning_rate": 7.038771533806884e-05,
"loss": 1.261,
"step": 269
},
{
"epoch": 0.6164383561643836,
"grad_norm": 75963.8046875,
"learning_rate": 6.967068959497376e-05,
"loss": 1.2154,
"step": 270
},
{
"epoch": 0.6187214611872146,
"grad_norm": 66193.421875,
"learning_rate": 6.895537705033108e-05,
"loss": 1.2711,
"step": 271
},
{
"epoch": 0.6210045662100456,
"grad_norm": 53985.34765625,
"learning_rate": 6.824181810968675e-05,
"loss": 0.9666,
"step": 272
},
{
"epoch": 0.6232876712328768,
"grad_norm": 54472.45703125,
"learning_rate": 6.753005307953167e-05,
"loss": 0.7533,
"step": 273
},
{
"epoch": 0.6255707762557078,
"grad_norm": 56472.04296875,
"learning_rate": 6.682012216502484e-05,
"loss": 1.1015,
"step": 274
},
{
"epoch": 0.6278538812785388,
"grad_norm": 59576.98046875,
"learning_rate": 6.611206546772237e-05,
"loss": 1.243,
"step": 275
},
{
"epoch": 0.6301369863013698,
"grad_norm": 46372.140625,
"learning_rate": 6.54059229833124e-05,
"loss": 0.919,
"step": 276
},
{
"epoch": 0.632420091324201,
"grad_norm": 67258.171875,
"learning_rate": 6.47017345993556e-05,
"loss": 1.2923,
"step": 277
},
{
"epoch": 0.634703196347032,
"grad_norm": 97251.375,
"learning_rate": 6.39995400930324e-05,
"loss": 1.3966,
"step": 278
},
{
"epoch": 0.636986301369863,
"grad_norm": 55997.97265625,
"learning_rate": 6.329937912889582e-05,
"loss": 0.7084,
"step": 279
},
{
"epoch": 0.639269406392694,
"grad_norm": 52094.359375,
"learning_rate": 6.260129125663106e-05,
"loss": 0.9639,
"step": 280
},
{
"epoch": 0.6415525114155252,
"grad_norm": 59095.65625,
"learning_rate": 6.190531590882159e-05,
"loss": 1.2343,
"step": 281
},
{
"epoch": 0.6438356164383562,
"grad_norm": 50110.4375,
"learning_rate": 6.121149239872151e-05,
"loss": 1.1458,
"step": 282
},
{
"epoch": 0.6461187214611872,
"grad_norm": 61831.3359375,
"learning_rate": 6.051985991803517e-05,
"loss": 1.1047,
"step": 283
},
{
"epoch": 0.6484018264840182,
"grad_norm": 68382.28125,
"learning_rate": 5.983045753470308e-05,
"loss": 1.0395,
"step": 284
},
{
"epoch": 0.6506849315068494,
"grad_norm": 52874.1796875,
"learning_rate": 5.9143324190695196e-05,
"loss": 1.1445,
"step": 285
},
{
"epoch": 0.6529680365296804,
"grad_norm": 53148.92578125,
"learning_rate": 5.845849869981137e-05,
"loss": 1.1164,
"step": 286
},
{
"epoch": 0.6552511415525114,
"grad_norm": 49849.4765625,
"learning_rate": 5.777601974548866e-05,
"loss": 0.8629,
"step": 287
},
{
"epoch": 0.6575342465753424,
"grad_norm": 55761.171875,
"learning_rate": 5.709592587861637e-05,
"loss": 1.0324,
"step": 288
},
{
"epoch": 0.6598173515981736,
"grad_norm": 47947.84765625,
"learning_rate": 5.6418255515358486e-05,
"loss": 0.9341,
"step": 289
},
{
"epoch": 0.6621004566210046,
"grad_norm": 69258.109375,
"learning_rate": 5.574304693498346e-05,
"loss": 1.1231,
"step": 290
},
{
"epoch": 0.6643835616438356,
"grad_norm": 48883.62109375,
"learning_rate": 5.507033827770225e-05,
"loss": 1.0446,
"step": 291
},
{
"epoch": 0.6666666666666666,
"grad_norm": 48850.08203125,
"learning_rate": 5.4400167542513636e-05,
"loss": 1.0385,
"step": 292
},
{
"epoch": 0.6689497716894978,
"grad_norm": 58763.58984375,
"learning_rate": 5.3732572585057974e-05,
"loss": 1.3623,
"step": 293
},
{
"epoch": 0.6712328767123288,
"grad_norm": 53041.3046875,
"learning_rate": 5.306759111547881e-05,
"loss": 1.1103,
"step": 294
},
{
"epoch": 0.6735159817351598,
"grad_norm": 54536.75390625,
"learning_rate": 5.240526069629265e-05,
"loss": 1.359,
"step": 295
},
{
"epoch": 0.6757990867579908,
"grad_norm": 55320.109375,
"learning_rate": 5.174561874026741e-05,
"loss": 1.1521,
"step": 296
},
{
"epoch": 0.678082191780822,
"grad_norm": 56988.97265625,
"learning_rate": 5.108870250830882e-05,
"loss": 1.2104,
"step": 297
},
{
"epoch": 0.680365296803653,
"grad_norm": 65441.95703125,
"learning_rate": 5.0434549107355944e-05,
"loss": 1.0381,
"step": 298
},
{
"epoch": 0.682648401826484,
"grad_norm": 54248.94921875,
"learning_rate": 4.978319548828504e-05,
"loss": 1.0877,
"step": 299
},
{
"epoch": 0.684931506849315,
"grad_norm": 62817.375,
"learning_rate": 4.9134678443822166e-05,
"loss": 1.1492,
"step": 300
},
{
"epoch": 0.6872146118721462,
"grad_norm": 55446.65234375,
"learning_rate": 4.8489034606465225e-05,
"loss": 1.078,
"step": 301
},
{
"epoch": 0.6894977168949772,
"grad_norm": 61516.6484375,
"learning_rate": 4.784630044641435e-05,
"loss": 1.0592,
"step": 302
},
{
"epoch": 0.6917808219178082,
"grad_norm": 53431.09375,
"learning_rate": 4.7206512269512124e-05,
"loss": 1.2311,
"step": 303
},
{
"epoch": 0.6940639269406392,
"grad_norm": 68350.4609375,
"learning_rate": 4.65697062151927e-05,
"loss": 0.9918,
"step": 304
},
{
"epoch": 0.6963470319634704,
"grad_norm": 53722.33984375,
"learning_rate": 4.593591825444028e-05,
"loss": 1.0563,
"step": 305
},
{
"epoch": 0.6986301369863014,
"grad_norm": 73647.765625,
"learning_rate": 4.530518418775733e-05,
"loss": 1.1067,
"step": 306
},
{
"epoch": 0.7009132420091324,
"grad_norm": 60410.765625,
"learning_rate": 4.4677539643142454e-05,
"loss": 1.2064,
"step": 307
},
{
"epoch": 0.7031963470319634,
"grad_norm": 74342.1328125,
"learning_rate": 4.40530200740777e-05,
"loss": 1.2019,
"step": 308
},
{
"epoch": 0.7054794520547946,
"grad_norm": 58122.41796875,
"learning_rate": 4.343166075752605e-05,
"loss": 1.0697,
"step": 309
},
{
"epoch": 0.7077625570776256,
"grad_norm": 68661.671875,
"learning_rate": 4.281349679193861e-05,
"loss": 1.444,
"step": 310
},
{
"epoch": 0.7100456621004566,
"grad_norm": 60156.05859375,
"learning_rate": 4.2198563095272116e-05,
"loss": 1.0489,
"step": 311
},
{
"epoch": 0.7123287671232876,
"grad_norm": 72793.8359375,
"learning_rate": 4.158689440301657e-05,
"loss": 1.225,
"step": 312
},
{
"epoch": 0.7146118721461188,
"grad_norm": 61000.60546875,
"learning_rate": 4.097852526623307e-05,
"loss": 1.1954,
"step": 313
},
{
"epoch": 0.7168949771689498,
"grad_norm": 60486.58203125,
"learning_rate": 4.0373490049602204e-05,
"loss": 1.0551,
"step": 314
},
{
"epoch": 0.7191780821917808,
"grad_norm": 91112.53125,
"learning_rate": 3.977182292948283e-05,
"loss": 1.2928,
"step": 315
},
{
"epoch": 0.7214611872146118,
"grad_norm": 60165.15234375,
"learning_rate": 3.9173557891981573e-05,
"loss": 0.9197,
"step": 316
},
{
"epoch": 0.723744292237443,
"grad_norm": 63943.1875,
"learning_rate": 3.857872873103322e-05,
"loss": 1.0148,
"step": 317
},
{
"epoch": 0.726027397260274,
"grad_norm": 66577.5234375,
"learning_rate": 3.7987369046491684e-05,
"loss": 1.1527,
"step": 318
},
{
"epoch": 0.728310502283105,
"grad_norm": 46133.828125,
"learning_rate": 3.7399512242231995e-05,
"loss": 0.6874,
"step": 319
},
{
"epoch": 0.730593607305936,
"grad_norm": 54615.4296875,
"learning_rate": 3.6815191524263624e-05,
"loss": 1.2463,
"step": 320
},
{
"epoch": 0.7328767123287672,
"grad_norm": 60014.6015625,
"learning_rate": 3.623443989885462e-05,
"loss": 1.3196,
"step": 321
},
{
"epoch": 0.7351598173515982,
"grad_norm": 53860.69140625,
"learning_rate": 3.565729017066729e-05,
"loss": 1.0468,
"step": 322
},
{
"epoch": 0.7374429223744292,
"grad_norm": 55772.921875,
"learning_rate": 3.508377494090521e-05,
"loss": 1.0851,
"step": 323
},
{
"epoch": 0.7397260273972602,
"grad_norm": 53785.3203125,
"learning_rate": 3.45139266054715e-05,
"loss": 1.2084,
"step": 324
},
{
"epoch": 0.7420091324200914,
"grad_norm": 57143.98046875,
"learning_rate": 3.394777735313919e-05,
"loss": 1.031,
"step": 325
},
{
"epoch": 0.7442922374429224,
"grad_norm": 60805.40625,
"learning_rate": 3.338535916373266e-05,
"loss": 1.2843,
"step": 326
},
{
"epoch": 0.7465753424657534,
"grad_norm": 56165.6875,
"learning_rate": 3.2826703806321525e-05,
"loss": 0.9815,
"step": 327
},
{
"epoch": 0.7488584474885844,
"grad_norm": 65290.7421875,
"learning_rate": 3.227184283742591e-05,
"loss": 1.2205,
"step": 328
},
{
"epoch": 0.7511415525114156,
"grad_norm": 59236.3828125,
"learning_rate": 3.17208075992339e-05,
"loss": 1.0087,
"step": 329
},
{
"epoch": 0.7534246575342466,
"grad_norm": 68966.4921875,
"learning_rate": 3.117362921783134e-05,
"loss": 1.2744,
"step": 330
},
{
"epoch": 0.7534246575342466,
"eval_loss": 1.1098405122756958,
"eval_runtime": 5.9855,
"eval_samples_per_second": 16.707,
"eval_steps_per_second": 8.354,
"step": 330
},
{
"epoch": 0.7557077625570776,
"grad_norm": 57247.7265625,
"learning_rate": 3.063033860144339e-05,
"loss": 1.3947,
"step": 331
},
{
"epoch": 0.7579908675799086,
"grad_norm": 60022.890625,
"learning_rate": 3.0090966438688772e-05,
"loss": 1.4142,
"step": 332
},
{
"epoch": 0.7602739726027398,
"grad_norm": 48251.8359375,
"learning_rate": 2.9555543196846292e-05,
"loss": 0.9917,
"step": 333
},
{
"epoch": 0.7625570776255708,
"grad_norm": 50926.33203125,
"learning_rate": 2.9024099120133673e-05,
"loss": 1.0497,
"step": 334
},
{
"epoch": 0.7648401826484018,
"grad_norm": 51535.24609375,
"learning_rate": 2.8496664227999415e-05,
"loss": 1.0956,
"step": 335
},
{
"epoch": 0.7671232876712328,
"grad_norm": 47902.59375,
"learning_rate": 2.7973268313426837e-05,
"loss": 1.182,
"step": 336
},
{
"epoch": 0.769406392694064,
"grad_norm": 57668.80859375,
"learning_rate": 2.745394094125141e-05,
"loss": 1.1184,
"step": 337
},
{
"epoch": 0.771689497716895,
"grad_norm": 65797.921875,
"learning_rate": 2.6938711446490606e-05,
"loss": 1.524,
"step": 338
},
{
"epoch": 0.773972602739726,
"grad_norm": 44397.484375,
"learning_rate": 2.6427608932686843e-05,
"loss": 1.0515,
"step": 339
},
{
"epoch": 0.776255707762557,
"grad_norm": 88163.90625,
"learning_rate": 2.5920662270263653e-05,
"loss": 1.291,
"step": 340
},
{
"epoch": 0.7785388127853882,
"grad_norm": 91966.3046875,
"learning_rate": 2.5417900094894744e-05,
"loss": 1.5279,
"step": 341
},
{
"epoch": 0.7808219178082192,
"grad_norm": 54689.46875,
"learning_rate": 2.4919350805886577e-05,
"loss": 1.0487,
"step": 342
},
{
"epoch": 0.7831050228310502,
"grad_norm": 60095.0703125,
"learning_rate": 2.4425042564574184e-05,
"loss": 1.3099,
"step": 343
},
{
"epoch": 0.7853881278538812,
"grad_norm": 54892.97265625,
"learning_rate": 2.3935003292730296e-05,
"loss": 1.1576,
"step": 344
},
{
"epoch": 0.7876712328767124,
"grad_norm": 99210.53125,
"learning_rate": 2.344926067098836e-05,
"loss": 1.846,
"step": 345
},
{
"epoch": 0.7899543378995434,
"grad_norm": 65337.87890625,
"learning_rate": 2.2967842137278706e-05,
"loss": 1.1789,
"step": 346
},
{
"epoch": 0.7922374429223744,
"grad_norm": 71690.0625,
"learning_rate": 2.2490774885278908e-05,
"loss": 1.3072,
"step": 347
},
{
"epoch": 0.7945205479452054,
"grad_norm": 58516.3828125,
"learning_rate": 2.201808586287757e-05,
"loss": 1.0443,
"step": 348
},
{
"epoch": 0.7968036529680366,
"grad_norm": 72086.8828125,
"learning_rate": 2.15498017706521e-05,
"loss": 1.1584,
"step": 349
},
{
"epoch": 0.7990867579908676,
"grad_norm": 58905.08984375,
"learning_rate": 2.1085949060360654e-05,
"loss": 1.2844,
"step": 350
},
{
"epoch": 0.8013698630136986,
"grad_norm": 56423.5703125,
"learning_rate": 2.0626553933447734e-05,
"loss": 1.1755,
"step": 351
},
{
"epoch": 0.8036529680365296,
"grad_norm": 55340.45703125,
"learning_rate": 2.01716423395644e-05,
"loss": 1.08,
"step": 352
},
{
"epoch": 0.8059360730593608,
"grad_norm": 56130.5859375,
"learning_rate": 1.9721239975102313e-05,
"loss": 1.1129,
"step": 353
},
{
"epoch": 0.8082191780821918,
"grad_norm": 58951.8046875,
"learning_rate": 1.9275372281742242e-05,
"loss": 0.9749,
"step": 354
},
{
"epoch": 0.8105022831050228,
"grad_norm": 58070.171875,
"learning_rate": 1.8834064445016953e-05,
"loss": 1.3564,
"step": 355
},
{
"epoch": 0.8127853881278538,
"grad_norm": 56061.4921875,
"learning_rate": 1.839734139288868e-05,
"loss": 1.2425,
"step": 356
},
{
"epoch": 0.815068493150685,
"grad_norm": 56343.59375,
"learning_rate": 1.7965227794340877e-05,
"loss": 0.9662,
"step": 357
},
{
"epoch": 0.817351598173516,
"grad_norm": 61289.234375,
"learning_rate": 1.753774805798486e-05,
"loss": 1.1741,
"step": 358
},
{
"epoch": 0.819634703196347,
"grad_norm": 98183.703125,
"learning_rate": 1.7114926330680957e-05,
"loss": 0.9826,
"step": 359
},
{
"epoch": 0.821917808219178,
"grad_norm": 55233.2890625,
"learning_rate": 1.6696786496174578e-05,
"loss": 1.0732,
"step": 360
},
{
"epoch": 0.8242009132420092,
"grad_norm": 45176.41015625,
"learning_rate": 1.6283352173747145e-05,
"loss": 1.0048,
"step": 361
},
{
"epoch": 0.8264840182648402,
"grad_norm": 66357.2109375,
"learning_rate": 1.587464671688187e-05,
"loss": 1.1619,
"step": 362
},
{
"epoch": 0.8287671232876712,
"grad_norm": 57376.734375,
"learning_rate": 1.5470693211944643e-05,
"loss": 1.1175,
"step": 363
},
{
"epoch": 0.8310502283105022,
"grad_norm": 54212.546875,
"learning_rate": 1.5071514476879878e-05,
"loss": 0.9726,
"step": 364
},
{
"epoch": 0.8333333333333334,
"grad_norm": 65802.8515625,
"learning_rate": 1.4677133059921632e-05,
"loss": 1.3092,
"step": 365
},
{
"epoch": 0.8356164383561644,
"grad_norm": 49888.99609375,
"learning_rate": 1.4287571238320053e-05,
"loss": 0.5516,
"step": 366
},
{
"epoch": 0.8378995433789954,
"grad_norm": 66406.1875,
"learning_rate": 1.3902851017082864e-05,
"loss": 0.9952,
"step": 367
},
{
"epoch": 0.8401826484018264,
"grad_norm": 57395.77734375,
"learning_rate": 1.3522994127732414e-05,
"loss": 1.1279,
"step": 368
},
{
"epoch": 0.8424657534246576,
"grad_norm": 79530.40625,
"learning_rate": 1.3148022027078222e-05,
"loss": 1.8326,
"step": 369
},
{
"epoch": 0.8447488584474886,
"grad_norm": 63298.96875,
"learning_rate": 1.2777955896004812e-05,
"loss": 1.0041,
"step": 370
},
{
"epoch": 0.8470319634703196,
"grad_norm": 74275.5234375,
"learning_rate": 1.2412816638275404e-05,
"loss": 1.3752,
"step": 371
},
{
"epoch": 0.8493150684931506,
"grad_norm": 57181.0390625,
"learning_rate": 1.2052624879351104e-05,
"loss": 1.3001,
"step": 372
},
{
"epoch": 0.8515981735159818,
"grad_norm": 56421.484375,
"learning_rate": 1.1697400965225747e-05,
"loss": 1.1496,
"step": 373
},
{
"epoch": 0.8538812785388128,
"grad_norm": 55708.265625,
"learning_rate": 1.134716496127679e-05,
"loss": 1.0953,
"step": 374
},
{
"epoch": 0.8561643835616438,
"grad_norm": 64520.75,
"learning_rate": 1.1001936651131717e-05,
"loss": 0.9648,
"step": 375
},
{
"epoch": 0.8584474885844748,
"grad_norm": 67208.2578125,
"learning_rate": 1.0661735535550666e-05,
"loss": 1.465,
"step": 376
},
{
"epoch": 0.860730593607306,
"grad_norm": 69055.765625,
"learning_rate": 1.0326580831324817e-05,
"loss": 1.1437,
"step": 377
},
{
"epoch": 0.863013698630137,
"grad_norm": 56862.21875,
"learning_rate": 9.996491470190917e-06,
"loss": 1.1829,
"step": 378
},
{
"epoch": 0.865296803652968,
"grad_norm": 43474.109375,
"learning_rate": 9.671486097761917e-06,
"loss": 0.7937,
"step": 379
},
{
"epoch": 0.867579908675799,
"grad_norm": 58035.9140625,
"learning_rate": 9.351583072473713e-06,
"loss": 1.2742,
"step": 380
},
{
"epoch": 0.8698630136986302,
"grad_norm": 59717.421875,
"learning_rate": 9.036800464548157e-06,
"loss": 1.245,
"step": 381
},
{
"epoch": 0.8721461187214612,
"grad_norm": 100361.84375,
"learning_rate": 8.727156054972374e-06,
"loss": 0.9628,
"step": 382
},
{
"epoch": 0.8744292237442922,
"grad_norm": 46780.90234375,
"learning_rate": 8.422667334494249e-06,
"loss": 0.7594,
"step": 383
},
{
"epoch": 0.8767123287671232,
"grad_norm": 58426.484375,
"learning_rate": 8.123351502634625e-06,
"loss": 1.3035,
"step": 384
},
{
"epoch": 0.8789954337899544,
"grad_norm": 55850.3046875,
"learning_rate": 7.82922546671555e-06,
"loss": 1.3021,
"step": 385
},
{
"epoch": 0.8812785388127854,
"grad_norm": 65162.09375,
"learning_rate": 7.54030584090537e-06,
"loss": 1.214,
"step": 386
},
{
"epoch": 0.8835616438356164,
"grad_norm": 50473.97265625,
"learning_rate": 7.256608945280319e-06,
"loss": 1.1281,
"step": 387
},
{
"epoch": 0.8858447488584474,
"grad_norm": 50409.5390625,
"learning_rate": 6.97815080490245e-06,
"loss": 1.2092,
"step": 388
},
{
"epoch": 0.8881278538812786,
"grad_norm": 56921.63671875,
"learning_rate": 6.704947148914609e-06,
"loss": 1.0073,
"step": 389
},
{
"epoch": 0.8904109589041096,
"grad_norm": 72200.1875,
"learning_rate": 6.437013409651849e-06,
"loss": 1.0158,
"step": 390
},
{
"epoch": 0.8926940639269406,
"grad_norm": 48175.20703125,
"learning_rate": 6.174364721769743e-06,
"loss": 0.939,
"step": 391
},
{
"epoch": 0.8949771689497716,
"grad_norm": 53166.9296875,
"learning_rate": 5.917015921389568e-06,
"loss": 1.0468,
"step": 392
},
{
"epoch": 0.8972602739726028,
"grad_norm": 58858.93359375,
"learning_rate": 5.664981545260073e-06,
"loss": 1.0791,
"step": 393
},
{
"epoch": 0.8995433789954338,
"grad_norm": 49101.5078125,
"learning_rate": 5.418275829936537e-06,
"loss": 1.1875,
"step": 394
},
{
"epoch": 0.9018264840182648,
"grad_norm": 65572.046875,
"learning_rate": 5.176912710976467e-06,
"loss": 1.326,
"step": 395
},
{
"epoch": 0.9041095890410958,
"grad_norm": 61459.63671875,
"learning_rate": 4.940905822152453e-06,
"loss": 1.0855,
"step": 396
},
{
"epoch": 0.906392694063927,
"grad_norm": 64900.1796875,
"learning_rate": 4.710268494682146e-06,
"loss": 1.3393,
"step": 397
},
{
"epoch": 0.908675799086758,
"grad_norm": 52331.64453125,
"learning_rate": 4.485013756475076e-06,
"loss": 1.0434,
"step": 398
},
{
"epoch": 0.910958904109589,
"grad_norm": 71229.609375,
"learning_rate": 4.2651543313968145e-06,
"loss": 1.2118,
"step": 399
},
{
"epoch": 0.91324200913242,
"grad_norm": 63443.84375,
"learning_rate": 4.050702638550275e-06,
"loss": 1.1458,
"step": 400
},
{
"epoch": 0.9155251141552512,
"grad_norm": 64409.73828125,
"learning_rate": 3.841670791574137e-06,
"loss": 1.1705,
"step": 401
},
{
"epoch": 0.9178082191780822,
"grad_norm": 81092.15625,
"learning_rate": 3.638070597958665e-06,
"loss": 1.2991,
"step": 402
},
{
"epoch": 0.9200913242009132,
"grad_norm": 41609.2109375,
"learning_rate": 3.4399135583787043e-06,
"loss": 0.4622,
"step": 403
},
{
"epoch": 0.9223744292237442,
"grad_norm": 58560.71875,
"learning_rate": 3.2472108660439706e-06,
"loss": 0.8759,
"step": 404
},
{
"epoch": 0.9246575342465754,
"grad_norm": 65780.9609375,
"learning_rate": 3.059973406066963e-06,
"loss": 1.3583,
"step": 405
},
{
"epoch": 0.9269406392694064,
"grad_norm": 53281.921875,
"learning_rate": 2.878211754847926e-06,
"loss": 0.971,
"step": 406
},
{
"epoch": 0.9292237442922374,
"grad_norm": 62492.41015625,
"learning_rate": 2.7019361794775156e-06,
"loss": 1.1152,
"step": 407
},
{
"epoch": 0.9315068493150684,
"grad_norm": 57761.875,
"learning_rate": 2.5311566371568507e-06,
"loss": 1.1735,
"step": 408
},
{
"epoch": 0.9337899543378996,
"grad_norm": 53913.1640625,
"learning_rate": 2.365882774634998e-06,
"loss": 0.9759,
"step": 409
},
{
"epoch": 0.9360730593607306,
"grad_norm": 57200.4375,
"learning_rate": 2.206123927664161e-06,
"loss": 1.262,
"step": 410
},
{
"epoch": 0.9383561643835616,
"grad_norm": 64508.68359375,
"learning_rate": 2.0518891204722168e-06,
"loss": 1.1443,
"step": 411
},
{
"epoch": 0.9406392694063926,
"grad_norm": 68100.71875,
"learning_rate": 1.903187065253076e-06,
"loss": 1.156,
"step": 412
},
{
"epoch": 0.9429223744292238,
"grad_norm": 46483.26171875,
"learning_rate": 1.7600261616745106e-06,
"loss": 0.9084,
"step": 413
},
{
"epoch": 0.9452054794520548,
"grad_norm": 75558.0078125,
"learning_rate": 1.6224144964036681e-06,
"loss": 1.3323,
"step": 414
},
{
"epoch": 0.9474885844748858,
"grad_norm": 61868.96484375,
"learning_rate": 1.4903598426503241e-06,
"loss": 1.6048,
"step": 415
},
{
"epoch": 0.9497716894977168,
"grad_norm": 48915.76953125,
"learning_rate": 1.3638696597277679e-06,
"loss": 0.9012,
"step": 416
},
{
"epoch": 0.952054794520548,
"grad_norm": 75366.640625,
"learning_rate": 1.2429510926314836e-06,
"loss": 1.132,
"step": 417
},
{
"epoch": 0.954337899543379,
"grad_norm": 60648.18359375,
"learning_rate": 1.1276109716355287e-06,
"loss": 1.0748,
"step": 418
},
{
"epoch": 0.95662100456621,
"grad_norm": 55365.9765625,
"learning_rate": 1.0178558119067315e-06,
"loss": 0.827,
"step": 419
},
{
"epoch": 0.958904109589041,
"grad_norm": 58780.57421875,
"learning_rate": 9.136918131366412e-07,
"loss": 1.2993,
"step": 420
},
{
"epoch": 0.9611872146118722,
"grad_norm": 47850.671875,
"learning_rate": 8.151248591913518e-07,
"loss": 1.0673,
"step": 421
},
{
"epoch": 0.9634703196347032,
"grad_norm": 50755.59765625,
"learning_rate": 7.221605177791691e-07,
"loss": 1.0819,
"step": 422
},
{
"epoch": 0.9657534246575342,
"grad_norm": 68484.7265625,
"learning_rate": 6.348040401360833e-07,
"loss": 1.3769,
"step": 423
},
{
"epoch": 0.9680365296803652,
"grad_norm": 59223.92578125,
"learning_rate": 5.530603607290851e-07,
"loss": 1.3159,
"step": 424
},
{
"epoch": 0.9703196347031964,
"grad_norm": 63650.15234375,
"learning_rate": 4.76934096977566e-07,
"loss": 1.3134,
"step": 425
},
{
"epoch": 0.9726027397260274,
"grad_norm": 62978.29296875,
"learning_rate": 4.0642954899238197e-07,
"loss": 0.9593,
"step": 426
},
{
"epoch": 0.9748858447488584,
"grad_norm": 63336.55078125,
"learning_rate": 3.415506993330153e-07,
"loss": 1.1585,
"step": 427
},
{
"epoch": 0.9771689497716894,
"grad_norm": 58021.15625,
"learning_rate": 2.8230121278257637e-07,
"loss": 1.2236,
"step": 428
},
{
"epoch": 0.9794520547945206,
"grad_norm": 56498.8515625,
"learning_rate": 2.2868443614082469e-07,
"loss": 1.0606,
"step": 429
},
{
"epoch": 0.9817351598173516,
"grad_norm": 48720.328125,
"learning_rate": 1.8070339803509807e-07,
"loss": 1.123,
"step": 430
},
{
"epoch": 0.9840182648401826,
"grad_norm": 60693.24609375,
"learning_rate": 1.3836080874926049e-07,
"loss": 0.5269,
"step": 431
},
{
"epoch": 0.9863013698630136,
"grad_norm": 123484.75,
"learning_rate": 1.0165906007056914e-07,
"loss": 1.2241,
"step": 432
},
{
"epoch": 0.9885844748858448,
"grad_norm": 52265.00390625,
"learning_rate": 7.060022515460451e-08,
"loss": 0.8173,
"step": 433
},
{
"epoch": 0.9908675799086758,
"grad_norm": 54763.09375,
"learning_rate": 4.518605840815315e-08,
"loss": 1.2235,
"step": 434
},
{
"epoch": 0.9931506849315068,
"grad_norm": 67981.8515625,
"learning_rate": 2.5417995390086824e-08,
"loss": 1.3369,
"step": 435
},
{
"epoch": 0.9954337899543378,
"grad_norm": 53286.59375,
"learning_rate": 1.129715273033849e-08,
"loss": 0.9584,
"step": 436
},
{
"epoch": 0.997716894977169,
"grad_norm": 54882.32421875,
"learning_rate": 2.824328066730608e-09,
"loss": 1.0711,
"step": 437
},
{
"epoch": 1.0,
"grad_norm": 54870.58984375,
"learning_rate": 0.0,
"loss": 1.1062,
"step": 438
}
],
"logging_steps": 1,
"max_steps": 438,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7972410614906880.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}