esunn's picture
Upload folder using huggingface_hub
bbddc40 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.945533769063181,
"eval_steps": 115,
"global_step": 1377,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002178649237472767,
"grad_norm": 0.4049851596355438,
"learning_rate": 1e-05,
"loss": 1.9628,
"step": 1
},
{
"epoch": 0.002178649237472767,
"eval_loss": 1.915004849433899,
"eval_runtime": 0.9396,
"eval_samples_per_second": 181.996,
"eval_steps_per_second": 13.836,
"step": 1
},
{
"epoch": 0.004357298474945534,
"grad_norm": 0.38593971729278564,
"learning_rate": 2e-05,
"loss": 2.0388,
"step": 2
},
{
"epoch": 0.006535947712418301,
"grad_norm": 0.38570573925971985,
"learning_rate": 3e-05,
"loss": 1.932,
"step": 3
},
{
"epoch": 0.008714596949891068,
"grad_norm": 0.35365748405456543,
"learning_rate": 4e-05,
"loss": 1.866,
"step": 4
},
{
"epoch": 0.010893246187363835,
"grad_norm": 0.4407881796360016,
"learning_rate": 5e-05,
"loss": 1.9959,
"step": 5
},
{
"epoch": 0.013071895424836602,
"grad_norm": 0.3517284095287323,
"learning_rate": 6e-05,
"loss": 1.8822,
"step": 6
},
{
"epoch": 0.015250544662309368,
"grad_norm": 0.45338165760040283,
"learning_rate": 7e-05,
"loss": 1.8933,
"step": 7
},
{
"epoch": 0.017429193899782137,
"grad_norm": 0.3475826680660248,
"learning_rate": 8e-05,
"loss": 1.8337,
"step": 8
},
{
"epoch": 0.0196078431372549,
"grad_norm": 0.4348187744617462,
"learning_rate": 9e-05,
"loss": 1.9261,
"step": 9
},
{
"epoch": 0.02178649237472767,
"grad_norm": 0.3879510164260864,
"learning_rate": 0.0001,
"loss": 1.7648,
"step": 10
},
{
"epoch": 0.023965141612200435,
"grad_norm": 0.3244905471801758,
"learning_rate": 0.00011000000000000002,
"loss": 1.7833,
"step": 11
},
{
"epoch": 0.026143790849673203,
"grad_norm": 0.3566845953464508,
"learning_rate": 0.00012,
"loss": 1.786,
"step": 12
},
{
"epoch": 0.02832244008714597,
"grad_norm": 0.35482919216156006,
"learning_rate": 0.00013000000000000002,
"loss": 1.6593,
"step": 13
},
{
"epoch": 0.030501089324618737,
"grad_norm": 0.3647037744522095,
"learning_rate": 0.00014,
"loss": 1.696,
"step": 14
},
{
"epoch": 0.032679738562091505,
"grad_norm": 0.41642501950263977,
"learning_rate": 0.00015000000000000001,
"loss": 1.5964,
"step": 15
},
{
"epoch": 0.034858387799564274,
"grad_norm": 0.3616989254951477,
"learning_rate": 0.00016,
"loss": 1.7121,
"step": 16
},
{
"epoch": 0.037037037037037035,
"grad_norm": 0.39412033557891846,
"learning_rate": 0.00017,
"loss": 1.7392,
"step": 17
},
{
"epoch": 0.0392156862745098,
"grad_norm": 0.3788229823112488,
"learning_rate": 0.00018,
"loss": 1.793,
"step": 18
},
{
"epoch": 0.04139433551198257,
"grad_norm": 0.3376384377479553,
"learning_rate": 0.00019,
"loss": 1.6145,
"step": 19
},
{
"epoch": 0.04357298474945534,
"grad_norm": 0.3674631714820862,
"learning_rate": 0.0002,
"loss": 1.6802,
"step": 20
},
{
"epoch": 0.0457516339869281,
"grad_norm": 0.406034916639328,
"learning_rate": 0.00019999985036335823,
"loss": 1.7272,
"step": 21
},
{
"epoch": 0.04793028322440087,
"grad_norm": 0.34544336795806885,
"learning_rate": 0.00019999940145388063,
"loss": 1.592,
"step": 22
},
{
"epoch": 0.05010893246187364,
"grad_norm": 0.4000962972640991,
"learning_rate": 0.00019999865327291073,
"loss": 1.6177,
"step": 23
},
{
"epoch": 0.05228758169934641,
"grad_norm": 0.36951783299446106,
"learning_rate": 0.00019999760582268763,
"loss": 1.6261,
"step": 24
},
{
"epoch": 0.054466230936819175,
"grad_norm": 0.3870888948440552,
"learning_rate": 0.00019999625910634605,
"loss": 1.544,
"step": 25
},
{
"epoch": 0.05664488017429194,
"grad_norm": 0.4127906560897827,
"learning_rate": 0.00019999461312791638,
"loss": 1.5375,
"step": 26
},
{
"epoch": 0.058823529411764705,
"grad_norm": 0.43752938508987427,
"learning_rate": 0.00019999266789232455,
"loss": 1.4055,
"step": 27
},
{
"epoch": 0.06100217864923747,
"grad_norm": 0.44983282685279846,
"learning_rate": 0.0001999904234053922,
"loss": 1.5742,
"step": 28
},
{
"epoch": 0.06318082788671024,
"grad_norm": 0.4332844614982605,
"learning_rate": 0.0001999878796738364,
"loss": 1.5264,
"step": 29
},
{
"epoch": 0.06535947712418301,
"grad_norm": 0.4228737950325012,
"learning_rate": 0.00019998503670526994,
"loss": 1.4985,
"step": 30
},
{
"epoch": 0.06753812636165578,
"grad_norm": 0.4225306808948517,
"learning_rate": 0.000199981894508201,
"loss": 1.447,
"step": 31
},
{
"epoch": 0.06971677559912855,
"grad_norm": 0.5055080056190491,
"learning_rate": 0.00019997845309203334,
"loss": 1.4575,
"step": 32
},
{
"epoch": 0.0718954248366013,
"grad_norm": 0.4757756292819977,
"learning_rate": 0.0001999747124670662,
"loss": 1.3472,
"step": 33
},
{
"epoch": 0.07407407407407407,
"grad_norm": 0.4340977966785431,
"learning_rate": 0.00019997067264449433,
"loss": 1.3273,
"step": 34
},
{
"epoch": 0.07625272331154684,
"grad_norm": 0.4556865692138672,
"learning_rate": 0.0001999663336364078,
"loss": 1.43,
"step": 35
},
{
"epoch": 0.0784313725490196,
"grad_norm": 0.5178071856498718,
"learning_rate": 0.00019996169545579207,
"loss": 1.3286,
"step": 36
},
{
"epoch": 0.08061002178649238,
"grad_norm": 0.5154844522476196,
"learning_rate": 0.00019995675811652802,
"loss": 1.2845,
"step": 37
},
{
"epoch": 0.08278867102396514,
"grad_norm": 0.5944285988807678,
"learning_rate": 0.00019995152163339178,
"loss": 1.4411,
"step": 38
},
{
"epoch": 0.08496732026143791,
"grad_norm": 0.5691947340965271,
"learning_rate": 0.00019994598602205473,
"loss": 1.3807,
"step": 39
},
{
"epoch": 0.08714596949891068,
"grad_norm": 0.575366199016571,
"learning_rate": 0.00019994015129908346,
"loss": 1.3347,
"step": 40
},
{
"epoch": 0.08932461873638345,
"grad_norm": 0.5233891010284424,
"learning_rate": 0.00019993401748193978,
"loss": 1.303,
"step": 41
},
{
"epoch": 0.0915032679738562,
"grad_norm": 0.5643051862716675,
"learning_rate": 0.00019992758458898055,
"loss": 1.2618,
"step": 42
},
{
"epoch": 0.09368191721132897,
"grad_norm": 0.6836549043655396,
"learning_rate": 0.0001999208526394577,
"loss": 1.3218,
"step": 43
},
{
"epoch": 0.09586056644880174,
"grad_norm": 0.6471132040023804,
"learning_rate": 0.00019991382165351814,
"loss": 1.1933,
"step": 44
},
{
"epoch": 0.09803921568627451,
"grad_norm": 0.5644765496253967,
"learning_rate": 0.00019990649165220375,
"loss": 1.1135,
"step": 45
},
{
"epoch": 0.10021786492374728,
"grad_norm": 0.7101904153823853,
"learning_rate": 0.00019989886265745128,
"loss": 1.1919,
"step": 46
},
{
"epoch": 0.10239651416122005,
"grad_norm": 0.706234872341156,
"learning_rate": 0.00019989093469209224,
"loss": 1.1607,
"step": 47
},
{
"epoch": 0.10457516339869281,
"grad_norm": 0.6854044795036316,
"learning_rate": 0.00019988270777985292,
"loss": 1.1441,
"step": 48
},
{
"epoch": 0.10675381263616558,
"grad_norm": 0.6608173251152039,
"learning_rate": 0.00019987418194535427,
"loss": 1.0626,
"step": 49
},
{
"epoch": 0.10893246187363835,
"grad_norm": 0.7540091276168823,
"learning_rate": 0.00019986535721411186,
"loss": 1.1346,
"step": 50
},
{
"epoch": 0.1111111111111111,
"grad_norm": 0.783423125743866,
"learning_rate": 0.00019985623361253572,
"loss": 1.2105,
"step": 51
},
{
"epoch": 0.11328976034858387,
"grad_norm": 0.7029076814651489,
"learning_rate": 0.00019984681116793038,
"loss": 0.9689,
"step": 52
},
{
"epoch": 0.11546840958605664,
"grad_norm": 0.8416129350662231,
"learning_rate": 0.00019983708990849468,
"loss": 1.1176,
"step": 53
},
{
"epoch": 0.11764705882352941,
"grad_norm": 0.7312731146812439,
"learning_rate": 0.00019982706986332175,
"loss": 1.1695,
"step": 54
},
{
"epoch": 0.11982570806100218,
"grad_norm": 0.8590166568756104,
"learning_rate": 0.00019981675106239895,
"loss": 1.016,
"step": 55
},
{
"epoch": 0.12200435729847495,
"grad_norm": 0.8634907603263855,
"learning_rate": 0.00019980613353660763,
"loss": 1.0777,
"step": 56
},
{
"epoch": 0.12418300653594772,
"grad_norm": 0.6608163714408875,
"learning_rate": 0.00019979521731772323,
"loss": 0.9661,
"step": 57
},
{
"epoch": 0.12636165577342048,
"grad_norm": 0.9486388564109802,
"learning_rate": 0.00019978400243841508,
"loss": 0.8715,
"step": 58
},
{
"epoch": 0.12854030501089325,
"grad_norm": 0.8431762456893921,
"learning_rate": 0.00019977248893224636,
"loss": 1.0458,
"step": 59
},
{
"epoch": 0.13071895424836602,
"grad_norm": 1.00847589969635,
"learning_rate": 0.00019976067683367385,
"loss": 0.9081,
"step": 60
},
{
"epoch": 0.1328976034858388,
"grad_norm": 1.3647116422653198,
"learning_rate": 0.00019974856617804807,
"loss": 1.0181,
"step": 61
},
{
"epoch": 0.13507625272331156,
"grad_norm": 1.2597001791000366,
"learning_rate": 0.0001997361570016129,
"loss": 0.9373,
"step": 62
},
{
"epoch": 0.13725490196078433,
"grad_norm": 1.238145351409912,
"learning_rate": 0.00019972344934150577,
"loss": 0.9464,
"step": 63
},
{
"epoch": 0.1394335511982571,
"grad_norm": 1.114610195159912,
"learning_rate": 0.00019971044323575728,
"loss": 0.9163,
"step": 64
},
{
"epoch": 0.14161220043572983,
"grad_norm": 0.9760491847991943,
"learning_rate": 0.0001996971387232912,
"loss": 0.8424,
"step": 65
},
{
"epoch": 0.1437908496732026,
"grad_norm": 0.999609649181366,
"learning_rate": 0.0001996835358439244,
"loss": 0.8027,
"step": 66
},
{
"epoch": 0.14596949891067537,
"grad_norm": 0.9615645408630371,
"learning_rate": 0.00019966963463836668,
"loss": 0.9491,
"step": 67
},
{
"epoch": 0.14814814814814814,
"grad_norm": 0.9067331552505493,
"learning_rate": 0.00019965543514822062,
"loss": 0.9756,
"step": 68
},
{
"epoch": 0.1503267973856209,
"grad_norm": 1.0316940546035767,
"learning_rate": 0.00019964093741598152,
"loss": 0.7276,
"step": 69
},
{
"epoch": 0.15250544662309368,
"grad_norm": 0.7774396538734436,
"learning_rate": 0.00019962614148503718,
"loss": 0.8904,
"step": 70
},
{
"epoch": 0.15468409586056645,
"grad_norm": 1.0500309467315674,
"learning_rate": 0.0001996110473996679,
"loss": 0.8801,
"step": 71
},
{
"epoch": 0.1568627450980392,
"grad_norm": 0.8712791800498962,
"learning_rate": 0.00019959565520504623,
"loss": 0.992,
"step": 72
},
{
"epoch": 0.15904139433551198,
"grad_norm": 1.006437063217163,
"learning_rate": 0.0001995799649472369,
"loss": 0.6761,
"step": 73
},
{
"epoch": 0.16122004357298475,
"grad_norm": 1.0199809074401855,
"learning_rate": 0.00019956397667319668,
"loss": 0.7066,
"step": 74
},
{
"epoch": 0.16339869281045752,
"grad_norm": 1.2605611085891724,
"learning_rate": 0.0001995476904307742,
"loss": 0.6546,
"step": 75
},
{
"epoch": 0.1655773420479303,
"grad_norm": 0.9553707242012024,
"learning_rate": 0.00019953110626870979,
"loss": 0.9392,
"step": 76
},
{
"epoch": 0.16775599128540306,
"grad_norm": 0.909253716468811,
"learning_rate": 0.00019951422423663547,
"loss": 0.8757,
"step": 77
},
{
"epoch": 0.16993464052287582,
"grad_norm": 1.007814645767212,
"learning_rate": 0.00019949704438507459,
"loss": 0.877,
"step": 78
},
{
"epoch": 0.1721132897603486,
"grad_norm": 1.341426968574524,
"learning_rate": 0.00019947956676544192,
"loss": 0.8002,
"step": 79
},
{
"epoch": 0.17429193899782136,
"grad_norm": 1.153745174407959,
"learning_rate": 0.00019946179143004325,
"loss": 0.714,
"step": 80
},
{
"epoch": 0.17647058823529413,
"grad_norm": 1.0699673891067505,
"learning_rate": 0.00019944371843207546,
"loss": 0.9575,
"step": 81
},
{
"epoch": 0.1786492374727669,
"grad_norm": 0.9054269194602966,
"learning_rate": 0.0001994253478256262,
"loss": 0.8967,
"step": 82
},
{
"epoch": 0.18082788671023964,
"grad_norm": 1.3790533542633057,
"learning_rate": 0.0001994066796656737,
"loss": 1.0535,
"step": 83
},
{
"epoch": 0.1830065359477124,
"grad_norm": 1.1256961822509766,
"learning_rate": 0.0001993877140080869,
"loss": 0.7872,
"step": 84
},
{
"epoch": 0.18518518518518517,
"grad_norm": 0.8870573043823242,
"learning_rate": 0.0001993684509096249,
"loss": 0.9137,
"step": 85
},
{
"epoch": 0.18736383442265794,
"grad_norm": 1.1747201681137085,
"learning_rate": 0.000199348890427937,
"loss": 0.779,
"step": 86
},
{
"epoch": 0.1895424836601307,
"grad_norm": 0.8280813694000244,
"learning_rate": 0.00019932903262156245,
"loss": 0.8289,
"step": 87
},
{
"epoch": 0.19172113289760348,
"grad_norm": 0.984609842300415,
"learning_rate": 0.00019930887754993044,
"loss": 0.8238,
"step": 88
},
{
"epoch": 0.19389978213507625,
"grad_norm": 1.030261516571045,
"learning_rate": 0.00019928842527335968,
"loss": 0.7061,
"step": 89
},
{
"epoch": 0.19607843137254902,
"grad_norm": 0.8822032809257507,
"learning_rate": 0.00019926767585305835,
"loss": 0.8622,
"step": 90
},
{
"epoch": 0.19825708061002179,
"grad_norm": 0.996427059173584,
"learning_rate": 0.00019924662935112393,
"loss": 0.5348,
"step": 91
},
{
"epoch": 0.20043572984749455,
"grad_norm": 1.0308480262756348,
"learning_rate": 0.000199225285830543,
"loss": 0.8325,
"step": 92
},
{
"epoch": 0.20261437908496732,
"grad_norm": 0.8959431648254395,
"learning_rate": 0.000199203645355191,
"loss": 0.6136,
"step": 93
},
{
"epoch": 0.2047930283224401,
"grad_norm": 0.8773916363716125,
"learning_rate": 0.00019918170798983211,
"loss": 0.577,
"step": 94
},
{
"epoch": 0.20697167755991286,
"grad_norm": 1.091194748878479,
"learning_rate": 0.00019915947380011898,
"loss": 0.7751,
"step": 95
},
{
"epoch": 0.20915032679738563,
"grad_norm": 0.8473864197731018,
"learning_rate": 0.00019913694285259256,
"loss": 0.5831,
"step": 96
},
{
"epoch": 0.2113289760348584,
"grad_norm": 0.801262378692627,
"learning_rate": 0.00019911411521468205,
"loss": 0.6089,
"step": 97
},
{
"epoch": 0.21350762527233116,
"grad_norm": 0.9437965154647827,
"learning_rate": 0.00019909099095470444,
"loss": 0.7343,
"step": 98
},
{
"epoch": 0.21568627450980393,
"grad_norm": 1.1255544424057007,
"learning_rate": 0.00019906757014186442,
"loss": 0.6728,
"step": 99
},
{
"epoch": 0.2178649237472767,
"grad_norm": 0.930216372013092,
"learning_rate": 0.00019904385284625424,
"loss": 0.5675,
"step": 100
},
{
"epoch": 0.22004357298474944,
"grad_norm": 0.8021939396858215,
"learning_rate": 0.00019901983913885344,
"loss": 0.4423,
"step": 101
},
{
"epoch": 0.2222222222222222,
"grad_norm": 1.5028183460235596,
"learning_rate": 0.00019899552909152866,
"loss": 0.9797,
"step": 102
},
{
"epoch": 0.22440087145969498,
"grad_norm": 0.7115923762321472,
"learning_rate": 0.00019897092277703333,
"loss": 0.4128,
"step": 103
},
{
"epoch": 0.22657952069716775,
"grad_norm": 0.9592722058296204,
"learning_rate": 0.00019894602026900758,
"loss": 0.8714,
"step": 104
},
{
"epoch": 0.22875816993464052,
"grad_norm": 0.8745520114898682,
"learning_rate": 0.000198920821641978,
"loss": 0.5991,
"step": 105
},
{
"epoch": 0.23093681917211328,
"grad_norm": 0.7649117708206177,
"learning_rate": 0.00019889532697135734,
"loss": 0.5501,
"step": 106
},
{
"epoch": 0.23311546840958605,
"grad_norm": 1.1097913980484009,
"learning_rate": 0.0001988695363334443,
"loss": 0.5863,
"step": 107
},
{
"epoch": 0.23529411764705882,
"grad_norm": 0.9224969148635864,
"learning_rate": 0.00019884344980542338,
"loss": 0.5883,
"step": 108
},
{
"epoch": 0.2374727668845316,
"grad_norm": 0.7770025134086609,
"learning_rate": 0.00019881706746536462,
"loss": 0.8375,
"step": 109
},
{
"epoch": 0.23965141612200436,
"grad_norm": 0.8830885887145996,
"learning_rate": 0.00019879038939222329,
"loss": 0.6464,
"step": 110
},
{
"epoch": 0.24183006535947713,
"grad_norm": 0.8932918310165405,
"learning_rate": 0.00019876341566583977,
"loss": 0.4851,
"step": 111
},
{
"epoch": 0.2440087145969499,
"grad_norm": 0.8250621557235718,
"learning_rate": 0.0001987361463669392,
"loss": 0.5658,
"step": 112
},
{
"epoch": 0.24618736383442266,
"grad_norm": 0.9288647174835205,
"learning_rate": 0.00019870858157713123,
"loss": 0.5441,
"step": 113
},
{
"epoch": 0.24836601307189543,
"grad_norm": 0.8258922100067139,
"learning_rate": 0.00019868072137891002,
"loss": 0.764,
"step": 114
},
{
"epoch": 0.25054466230936817,
"grad_norm": 0.8087350726127625,
"learning_rate": 0.00019865256585565363,
"loss": 0.5816,
"step": 115
},
{
"epoch": 0.25054466230936817,
"eval_loss": 0.6156808733940125,
"eval_runtime": 0.937,
"eval_samples_per_second": 182.501,
"eval_steps_per_second": 13.874,
"step": 115
},
{
"epoch": 0.25272331154684097,
"grad_norm": 1.0198040008544922,
"learning_rate": 0.00019862411509162406,
"loss": 0.9471,
"step": 116
},
{
"epoch": 0.2549019607843137,
"grad_norm": 0.8376523852348328,
"learning_rate": 0.00019859536917196687,
"loss": 0.6166,
"step": 117
},
{
"epoch": 0.2570806100217865,
"grad_norm": 0.8766109347343445,
"learning_rate": 0.0001985663281827108,
"loss": 0.6737,
"step": 118
},
{
"epoch": 0.25925925925925924,
"grad_norm": 1.072192668914795,
"learning_rate": 0.00019853699221076792,
"loss": 0.6403,
"step": 119
},
{
"epoch": 0.26143790849673204,
"grad_norm": 0.8205565214157104,
"learning_rate": 0.00019850736134393286,
"loss": 0.4247,
"step": 120
},
{
"epoch": 0.2636165577342048,
"grad_norm": 1.0622146129608154,
"learning_rate": 0.00019847743567088293,
"loss": 0.6497,
"step": 121
},
{
"epoch": 0.2657952069716776,
"grad_norm": 0.8463292717933655,
"learning_rate": 0.00019844721528117766,
"loss": 0.6587,
"step": 122
},
{
"epoch": 0.2679738562091503,
"grad_norm": 0.9597845673561096,
"learning_rate": 0.0001984167002652586,
"loss": 0.4752,
"step": 123
},
{
"epoch": 0.2701525054466231,
"grad_norm": 0.8975586295127869,
"learning_rate": 0.00019838589071444903,
"loss": 0.7978,
"step": 124
},
{
"epoch": 0.27233115468409586,
"grad_norm": 0.8363540768623352,
"learning_rate": 0.00019835478672095374,
"loss": 0.7359,
"step": 125
},
{
"epoch": 0.27450980392156865,
"grad_norm": 1.0208615064620972,
"learning_rate": 0.00019832338837785863,
"loss": 0.518,
"step": 126
},
{
"epoch": 0.2766884531590414,
"grad_norm": 1.14145028591156,
"learning_rate": 0.0001982916957791306,
"loss": 0.7642,
"step": 127
},
{
"epoch": 0.2788671023965142,
"grad_norm": 0.9274200797080994,
"learning_rate": 0.00019825970901961705,
"loss": 0.5288,
"step": 128
},
{
"epoch": 0.28104575163398693,
"grad_norm": 0.8783562779426575,
"learning_rate": 0.0001982274281950459,
"loss": 0.5958,
"step": 129
},
{
"epoch": 0.28322440087145967,
"grad_norm": 0.9028067588806152,
"learning_rate": 0.000198194853402025,
"loss": 0.6075,
"step": 130
},
{
"epoch": 0.28540305010893247,
"grad_norm": 0.9846379160881042,
"learning_rate": 0.00019816198473804198,
"loss": 0.8254,
"step": 131
},
{
"epoch": 0.2875816993464052,
"grad_norm": 0.9409753680229187,
"learning_rate": 0.00019812882230146398,
"loss": 0.704,
"step": 132
},
{
"epoch": 0.289760348583878,
"grad_norm": 0.8969582915306091,
"learning_rate": 0.00019809536619153732,
"loss": 0.6107,
"step": 133
},
{
"epoch": 0.29193899782135074,
"grad_norm": 0.7812852263450623,
"learning_rate": 0.00019806161650838723,
"loss": 0.5671,
"step": 134
},
{
"epoch": 0.29411764705882354,
"grad_norm": 0.8860548734664917,
"learning_rate": 0.00019802757335301741,
"loss": 0.5248,
"step": 135
},
{
"epoch": 0.2962962962962963,
"grad_norm": 0.8217918276786804,
"learning_rate": 0.00019799323682731,
"loss": 0.4935,
"step": 136
},
{
"epoch": 0.2984749455337691,
"grad_norm": 0.7621735334396362,
"learning_rate": 0.00019795860703402505,
"loss": 0.5984,
"step": 137
},
{
"epoch": 0.3006535947712418,
"grad_norm": 0.9418565630912781,
"learning_rate": 0.00019792368407680025,
"loss": 0.558,
"step": 138
},
{
"epoch": 0.3028322440087146,
"grad_norm": 0.819114625453949,
"learning_rate": 0.00019788846806015066,
"loss": 0.3791,
"step": 139
},
{
"epoch": 0.30501089324618735,
"grad_norm": 0.9072156548500061,
"learning_rate": 0.00019785295908946848,
"loss": 0.4462,
"step": 140
},
{
"epoch": 0.30718954248366015,
"grad_norm": 0.8303220868110657,
"learning_rate": 0.00019781715727102252,
"loss": 0.4959,
"step": 141
},
{
"epoch": 0.3093681917211329,
"grad_norm": 0.8586477041244507,
"learning_rate": 0.00019778106271195806,
"loss": 0.4701,
"step": 142
},
{
"epoch": 0.3115468409586057,
"grad_norm": 0.7374873757362366,
"learning_rate": 0.00019774467552029646,
"loss": 0.407,
"step": 143
},
{
"epoch": 0.3137254901960784,
"grad_norm": 1.1180788278579712,
"learning_rate": 0.00019770799580493494,
"loss": 0.6304,
"step": 144
},
{
"epoch": 0.3159041394335512,
"grad_norm": 0.9823700189590454,
"learning_rate": 0.000197671023675646,
"loss": 0.5079,
"step": 145
},
{
"epoch": 0.31808278867102396,
"grad_norm": 0.8474340438842773,
"learning_rate": 0.00019763375924307735,
"loss": 0.5708,
"step": 146
},
{
"epoch": 0.3202614379084967,
"grad_norm": 0.9172300100326538,
"learning_rate": 0.00019759620261875155,
"loss": 0.418,
"step": 147
},
{
"epoch": 0.3224400871459695,
"grad_norm": 0.7413074374198914,
"learning_rate": 0.0001975583539150655,
"loss": 0.4531,
"step": 148
},
{
"epoch": 0.32461873638344224,
"grad_norm": 0.7417133450508118,
"learning_rate": 0.00019752021324529023,
"loss": 0.5158,
"step": 149
},
{
"epoch": 0.32679738562091504,
"grad_norm": 0.774067223072052,
"learning_rate": 0.00019748178072357065,
"loss": 0.5995,
"step": 150
},
{
"epoch": 0.3289760348583878,
"grad_norm": 0.9123216867446899,
"learning_rate": 0.00019744305646492497,
"loss": 0.6477,
"step": 151
},
{
"epoch": 0.3311546840958606,
"grad_norm": 0.8347046375274658,
"learning_rate": 0.00019740404058524457,
"loss": 0.6527,
"step": 152
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.9968402981758118,
"learning_rate": 0.00019736473320129352,
"loss": 0.5282,
"step": 153
},
{
"epoch": 0.3355119825708061,
"grad_norm": 0.7431806921958923,
"learning_rate": 0.00019732513443070836,
"loss": 0.5553,
"step": 154
},
{
"epoch": 0.33769063180827885,
"grad_norm": 0.9280023574829102,
"learning_rate": 0.0001972852443919976,
"loss": 0.7504,
"step": 155
},
{
"epoch": 0.33986928104575165,
"grad_norm": 0.9563156366348267,
"learning_rate": 0.00019724506320454153,
"loss": 0.4566,
"step": 156
},
{
"epoch": 0.3420479302832244,
"grad_norm": 0.744659960269928,
"learning_rate": 0.00019720459098859165,
"loss": 0.5929,
"step": 157
},
{
"epoch": 0.3442265795206972,
"grad_norm": 0.8504654765129089,
"learning_rate": 0.0001971638278652705,
"loss": 0.5152,
"step": 158
},
{
"epoch": 0.3464052287581699,
"grad_norm": 0.9335165023803711,
"learning_rate": 0.0001971227739565712,
"loss": 0.5115,
"step": 159
},
{
"epoch": 0.3485838779956427,
"grad_norm": 1.0398534536361694,
"learning_rate": 0.0001970814293853572,
"loss": 0.4626,
"step": 160
},
{
"epoch": 0.35076252723311546,
"grad_norm": 0.7855281233787537,
"learning_rate": 0.0001970397942753617,
"loss": 0.4245,
"step": 161
},
{
"epoch": 0.35294117647058826,
"grad_norm": 0.7897714972496033,
"learning_rate": 0.00019699786875118747,
"loss": 0.5555,
"step": 162
},
{
"epoch": 0.355119825708061,
"grad_norm": 0.8648003935813904,
"learning_rate": 0.0001969556529383064,
"loss": 0.5558,
"step": 163
},
{
"epoch": 0.3572984749455338,
"grad_norm": 1.0440359115600586,
"learning_rate": 0.00019691314696305913,
"loss": 0.4879,
"step": 164
},
{
"epoch": 0.35947712418300654,
"grad_norm": 0.8991299867630005,
"learning_rate": 0.00019687035095265475,
"loss": 0.4131,
"step": 165
},
{
"epoch": 0.3616557734204793,
"grad_norm": 1.070555329322815,
"learning_rate": 0.00019682726503517017,
"loss": 0.4959,
"step": 166
},
{
"epoch": 0.3638344226579521,
"grad_norm": 0.751699686050415,
"learning_rate": 0.00019678388933955015,
"loss": 0.4098,
"step": 167
},
{
"epoch": 0.3660130718954248,
"grad_norm": 0.7820857763290405,
"learning_rate": 0.00019674022399560648,
"loss": 0.4611,
"step": 168
},
{
"epoch": 0.3681917211328976,
"grad_norm": 0.7827489376068115,
"learning_rate": 0.00019669626913401792,
"loss": 0.3593,
"step": 169
},
{
"epoch": 0.37037037037037035,
"grad_norm": 0.8705342411994934,
"learning_rate": 0.00019665202488632956,
"loss": 0.4037,
"step": 170
},
{
"epoch": 0.37254901960784315,
"grad_norm": 0.9181383848190308,
"learning_rate": 0.00019660749138495268,
"loss": 0.5621,
"step": 171
},
{
"epoch": 0.3747276688453159,
"grad_norm": 0.7258014678955078,
"learning_rate": 0.0001965626687631641,
"loss": 0.3909,
"step": 172
},
{
"epoch": 0.3769063180827887,
"grad_norm": 0.7386276721954346,
"learning_rate": 0.00019651755715510602,
"loss": 0.5974,
"step": 173
},
{
"epoch": 0.3790849673202614,
"grad_norm": 0.7849751710891724,
"learning_rate": 0.00019647215669578536,
"loss": 0.4947,
"step": 174
},
{
"epoch": 0.3812636165577342,
"grad_norm": 0.7632936239242554,
"learning_rate": 0.00019642646752107362,
"loss": 0.4886,
"step": 175
},
{
"epoch": 0.38344226579520696,
"grad_norm": 0.8370786309242249,
"learning_rate": 0.00019638048976770628,
"loss": 0.4741,
"step": 176
},
{
"epoch": 0.38562091503267976,
"grad_norm": 0.8441713452339172,
"learning_rate": 0.00019633422357328239,
"loss": 0.4939,
"step": 177
},
{
"epoch": 0.3877995642701525,
"grad_norm": 0.7680661082267761,
"learning_rate": 0.00019628766907626446,
"loss": 0.5976,
"step": 178
},
{
"epoch": 0.3899782135076253,
"grad_norm": 0.8030869364738464,
"learning_rate": 0.00019624082641597754,
"loss": 0.4914,
"step": 179
},
{
"epoch": 0.39215686274509803,
"grad_norm": 0.8066624402999878,
"learning_rate": 0.00019619369573260924,
"loss": 0.4982,
"step": 180
},
{
"epoch": 0.39433551198257083,
"grad_norm": 0.7550255060195923,
"learning_rate": 0.00019614627716720912,
"loss": 0.3796,
"step": 181
},
{
"epoch": 0.39651416122004357,
"grad_norm": 0.761080265045166,
"learning_rate": 0.00019609857086168823,
"loss": 0.4118,
"step": 182
},
{
"epoch": 0.39869281045751637,
"grad_norm": 1.061673641204834,
"learning_rate": 0.00019605057695881885,
"loss": 0.4461,
"step": 183
},
{
"epoch": 0.4008714596949891,
"grad_norm": 0.8266555070877075,
"learning_rate": 0.00019600229560223388,
"loss": 0.4915,
"step": 184
},
{
"epoch": 0.40305010893246185,
"grad_norm": 0.769981861114502,
"learning_rate": 0.00019595372693642654,
"loss": 0.3993,
"step": 185
},
{
"epoch": 0.40522875816993464,
"grad_norm": 0.8316985368728638,
"learning_rate": 0.00019590487110674983,
"loss": 0.598,
"step": 186
},
{
"epoch": 0.4074074074074074,
"grad_norm": 0.7869564890861511,
"learning_rate": 0.00019585572825941627,
"loss": 0.5088,
"step": 187
},
{
"epoch": 0.4095860566448802,
"grad_norm": 0.7254141569137573,
"learning_rate": 0.0001958062985414972,
"loss": 0.3948,
"step": 188
},
{
"epoch": 0.4117647058823529,
"grad_norm": 0.7505261898040771,
"learning_rate": 0.00019575658210092259,
"loss": 0.3883,
"step": 189
},
{
"epoch": 0.4139433551198257,
"grad_norm": 0.7498146891593933,
"learning_rate": 0.00019570657908648048,
"loss": 0.4072,
"step": 190
},
{
"epoch": 0.41612200435729846,
"grad_norm": 0.9777516722679138,
"learning_rate": 0.00019565628964781647,
"loss": 0.4711,
"step": 191
},
{
"epoch": 0.41830065359477125,
"grad_norm": 0.719313383102417,
"learning_rate": 0.0001956057139354335,
"loss": 0.2645,
"step": 192
},
{
"epoch": 0.420479302832244,
"grad_norm": 0.826934814453125,
"learning_rate": 0.0001955548521006911,
"loss": 0.4608,
"step": 193
},
{
"epoch": 0.4226579520697168,
"grad_norm": 0.7773908376693726,
"learning_rate": 0.0001955037042958052,
"loss": 0.6364,
"step": 194
},
{
"epoch": 0.42483660130718953,
"grad_norm": 0.8829189538955688,
"learning_rate": 0.00019545227067384747,
"loss": 0.5166,
"step": 195
},
{
"epoch": 0.42701525054466233,
"grad_norm": 0.7444214820861816,
"learning_rate": 0.00019540055138874505,
"loss": 0.4784,
"step": 196
},
{
"epoch": 0.42919389978213507,
"grad_norm": 0.7535512447357178,
"learning_rate": 0.0001953485465952799,
"loss": 0.328,
"step": 197
},
{
"epoch": 0.43137254901960786,
"grad_norm": 0.867964506149292,
"learning_rate": 0.00019529625644908847,
"loss": 0.4954,
"step": 198
},
{
"epoch": 0.4335511982570806,
"grad_norm": 0.8096396923065186,
"learning_rate": 0.00019524368110666122,
"loss": 0.409,
"step": 199
},
{
"epoch": 0.4357298474945534,
"grad_norm": 0.6851803064346313,
"learning_rate": 0.0001951908207253421,
"loss": 0.3642,
"step": 200
},
{
"epoch": 0.43790849673202614,
"grad_norm": 1.0261396169662476,
"learning_rate": 0.00019513767546332813,
"loss": 0.5437,
"step": 201
},
{
"epoch": 0.4400871459694989,
"grad_norm": 0.6751096248626709,
"learning_rate": 0.00019508424547966884,
"loss": 0.3054,
"step": 202
},
{
"epoch": 0.4422657952069717,
"grad_norm": 0.9070213437080383,
"learning_rate": 0.00019503053093426593,
"loss": 0.5467,
"step": 203
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.7909525632858276,
"learning_rate": 0.00019497653198787264,
"loss": 0.4506,
"step": 204
},
{
"epoch": 0.4466230936819172,
"grad_norm": 0.8636406064033508,
"learning_rate": 0.00019492224880209344,
"loss": 0.4917,
"step": 205
},
{
"epoch": 0.44880174291938996,
"grad_norm": 0.8827958106994629,
"learning_rate": 0.00019486768153938338,
"loss": 0.6783,
"step": 206
},
{
"epoch": 0.45098039215686275,
"grad_norm": 0.8108576536178589,
"learning_rate": 0.00019481283036304768,
"loss": 0.5415,
"step": 207
},
{
"epoch": 0.4531590413943355,
"grad_norm": 0.700524091720581,
"learning_rate": 0.0001947576954372413,
"loss": 0.2578,
"step": 208
},
{
"epoch": 0.4553376906318083,
"grad_norm": 0.6894106268882751,
"learning_rate": 0.00019470227692696833,
"loss": 0.4008,
"step": 209
},
{
"epoch": 0.45751633986928103,
"grad_norm": 0.762399435043335,
"learning_rate": 0.00019464657499808152,
"loss": 0.448,
"step": 210
},
{
"epoch": 0.4596949891067538,
"grad_norm": 0.7530399560928345,
"learning_rate": 0.00019459058981728192,
"loss": 0.4793,
"step": 211
},
{
"epoch": 0.46187363834422657,
"grad_norm": 0.8698046803474426,
"learning_rate": 0.0001945343215521182,
"loss": 0.5627,
"step": 212
},
{
"epoch": 0.46405228758169936,
"grad_norm": 0.9562462568283081,
"learning_rate": 0.00019447777037098622,
"loss": 0.6461,
"step": 213
},
{
"epoch": 0.4662309368191721,
"grad_norm": 0.7317548990249634,
"learning_rate": 0.0001944209364431286,
"loss": 0.2913,
"step": 214
},
{
"epoch": 0.4684095860566449,
"grad_norm": 0.8016018271446228,
"learning_rate": 0.00019436381993863405,
"loss": 0.4218,
"step": 215
},
{
"epoch": 0.47058823529411764,
"grad_norm": 0.7906899452209473,
"learning_rate": 0.00019430642102843707,
"loss": 0.3951,
"step": 216
},
{
"epoch": 0.47276688453159044,
"grad_norm": 0.7488080859184265,
"learning_rate": 0.0001942487398843172,
"loss": 0.2522,
"step": 217
},
{
"epoch": 0.4749455337690632,
"grad_norm": 0.7537955641746521,
"learning_rate": 0.00019419077667889872,
"loss": 0.4182,
"step": 218
},
{
"epoch": 0.477124183006536,
"grad_norm": 1.090034008026123,
"learning_rate": 0.00019413253158565006,
"loss": 0.4396,
"step": 219
},
{
"epoch": 0.4793028322440087,
"grad_norm": 0.8850147724151611,
"learning_rate": 0.00019407400477888315,
"loss": 0.4225,
"step": 220
},
{
"epoch": 0.48148148148148145,
"grad_norm": 0.6652522683143616,
"learning_rate": 0.00019401519643375315,
"loss": 0.3252,
"step": 221
},
{
"epoch": 0.48366013071895425,
"grad_norm": 0.9398255348205566,
"learning_rate": 0.00019395610672625767,
"loss": 0.3825,
"step": 222
},
{
"epoch": 0.485838779956427,
"grad_norm": 0.7577664852142334,
"learning_rate": 0.00019389673583323645,
"loss": 0.4972,
"step": 223
},
{
"epoch": 0.4880174291938998,
"grad_norm": 0.7722988724708557,
"learning_rate": 0.00019383708393237075,
"loss": 0.3655,
"step": 224
},
{
"epoch": 0.49019607843137253,
"grad_norm": 0.9242033958435059,
"learning_rate": 0.0001937771512021827,
"loss": 0.5322,
"step": 225
},
{
"epoch": 0.4923747276688453,
"grad_norm": 0.7697513699531555,
"learning_rate": 0.00019371693782203498,
"loss": 0.3516,
"step": 226
},
{
"epoch": 0.49455337690631807,
"grad_norm": 1.0141658782958984,
"learning_rate": 0.00019365644397213014,
"loss": 0.4072,
"step": 227
},
{
"epoch": 0.49673202614379086,
"grad_norm": 0.8579118847846985,
"learning_rate": 0.00019359566983351013,
"loss": 0.5341,
"step": 228
},
{
"epoch": 0.4989106753812636,
"grad_norm": 0.9969218969345093,
"learning_rate": 0.0001935346155880557,
"loss": 0.5544,
"step": 229
},
{
"epoch": 0.5010893246187363,
"grad_norm": 0.7654063105583191,
"learning_rate": 0.0001934732814184859,
"loss": 0.3604,
"step": 230
},
{
"epoch": 0.5010893246187363,
"eval_loss": 0.430705189704895,
"eval_runtime": 0.9369,
"eval_samples_per_second": 182.508,
"eval_steps_per_second": 13.875,
"step": 230
},
{
"epoch": 0.5032679738562091,
"grad_norm": 0.8155052661895752,
"learning_rate": 0.00019341166750835748,
"loss": 0.3841,
"step": 231
},
{
"epoch": 0.5054466230936819,
"grad_norm": 1.005814790725708,
"learning_rate": 0.00019334977404206443,
"loss": 0.4976,
"step": 232
},
{
"epoch": 0.5076252723311547,
"grad_norm": 0.795301079750061,
"learning_rate": 0.00019328760120483743,
"loss": 0.347,
"step": 233
},
{
"epoch": 0.5098039215686274,
"grad_norm": 0.8294497728347778,
"learning_rate": 0.00019322514918274308,
"loss": 0.3366,
"step": 234
},
{
"epoch": 0.5119825708061002,
"grad_norm": 1.0151102542877197,
"learning_rate": 0.0001931624181626836,
"loss": 0.3217,
"step": 235
},
{
"epoch": 0.514161220043573,
"grad_norm": 0.8590940833091736,
"learning_rate": 0.00019309940833239626,
"loss": 0.4399,
"step": 236
},
{
"epoch": 0.5163398692810458,
"grad_norm": 0.8852725028991699,
"learning_rate": 0.00019303611988045257,
"loss": 0.3737,
"step": 237
},
{
"epoch": 0.5185185185185185,
"grad_norm": 0.8230773210525513,
"learning_rate": 0.00019297255299625797,
"loss": 0.4262,
"step": 238
},
{
"epoch": 0.5206971677559913,
"grad_norm": 0.6675543785095215,
"learning_rate": 0.00019290870787005114,
"loss": 0.291,
"step": 239
},
{
"epoch": 0.5228758169934641,
"grad_norm": 0.867770791053772,
"learning_rate": 0.00019284458469290354,
"loss": 0.4791,
"step": 240
},
{
"epoch": 0.5250544662309368,
"grad_norm": 0.6569726467132568,
"learning_rate": 0.0001927801836567187,
"loss": 0.3331,
"step": 241
},
{
"epoch": 0.5272331154684096,
"grad_norm": 0.9207850098609924,
"learning_rate": 0.00019271550495423168,
"loss": 0.3867,
"step": 242
},
{
"epoch": 0.5294117647058824,
"grad_norm": 0.9937344789505005,
"learning_rate": 0.00019265054877900858,
"loss": 0.4788,
"step": 243
},
{
"epoch": 0.5315904139433552,
"grad_norm": 0.6370213627815247,
"learning_rate": 0.00019258531532544585,
"loss": 0.2628,
"step": 244
},
{
"epoch": 0.5337690631808278,
"grad_norm": 0.7503904104232788,
"learning_rate": 0.00019251980478876985,
"loss": 0.3214,
"step": 245
},
{
"epoch": 0.5359477124183006,
"grad_norm": 0.8075821399688721,
"learning_rate": 0.00019245401736503608,
"loss": 0.4626,
"step": 246
},
{
"epoch": 0.5381263616557734,
"grad_norm": 0.7386597990989685,
"learning_rate": 0.0001923879532511287,
"loss": 0.366,
"step": 247
},
{
"epoch": 0.5403050108932462,
"grad_norm": 0.7394566535949707,
"learning_rate": 0.00019232161264475997,
"loss": 0.387,
"step": 248
},
{
"epoch": 0.5424836601307189,
"grad_norm": 0.7640907168388367,
"learning_rate": 0.0001922549957444696,
"loss": 0.58,
"step": 249
},
{
"epoch": 0.5446623093681917,
"grad_norm": 0.6614570021629333,
"learning_rate": 0.00019218810274962417,
"loss": 0.3073,
"step": 250
},
{
"epoch": 0.5468409586056645,
"grad_norm": 0.966230034828186,
"learning_rate": 0.0001921209338604166,
"loss": 0.5173,
"step": 251
},
{
"epoch": 0.5490196078431373,
"grad_norm": 0.822700560092926,
"learning_rate": 0.00019205348927786532,
"loss": 0.4205,
"step": 252
},
{
"epoch": 0.55119825708061,
"grad_norm": 0.7053878307342529,
"learning_rate": 0.00019198576920381405,
"loss": 0.3872,
"step": 253
},
{
"epoch": 0.5533769063180828,
"grad_norm": 0.7365344166755676,
"learning_rate": 0.00019191777384093081,
"loss": 0.2189,
"step": 254
},
{
"epoch": 0.5555555555555556,
"grad_norm": 0.738994300365448,
"learning_rate": 0.0001918495033927076,
"loss": 0.4104,
"step": 255
},
{
"epoch": 0.5577342047930284,
"grad_norm": 0.6301658153533936,
"learning_rate": 0.0001917809580634596,
"loss": 0.217,
"step": 256
},
{
"epoch": 0.5599128540305011,
"grad_norm": 0.6912908554077148,
"learning_rate": 0.0001917121380583247,
"loss": 0.2477,
"step": 257
},
{
"epoch": 0.5620915032679739,
"grad_norm": 0.7470822930335999,
"learning_rate": 0.00019164304358326275,
"loss": 0.2822,
"step": 258
},
{
"epoch": 0.5642701525054467,
"grad_norm": 0.8313736915588379,
"learning_rate": 0.0001915736748450551,
"loss": 0.4136,
"step": 259
},
{
"epoch": 0.5664488017429193,
"grad_norm": 0.9498727321624756,
"learning_rate": 0.00019150403205130383,
"loss": 0.4656,
"step": 260
},
{
"epoch": 0.5686274509803921,
"grad_norm": 0.7486637830734253,
"learning_rate": 0.0001914341154104312,
"loss": 0.3612,
"step": 261
},
{
"epoch": 0.5708061002178649,
"grad_norm": 0.7852402925491333,
"learning_rate": 0.00019136392513167903,
"loss": 0.3849,
"step": 262
},
{
"epoch": 0.5729847494553377,
"grad_norm": 0.7787222862243652,
"learning_rate": 0.00019129346142510812,
"loss": 0.3202,
"step": 263
},
{
"epoch": 0.5751633986928104,
"grad_norm": 0.7108213305473328,
"learning_rate": 0.00019122272450159745,
"loss": 0.2964,
"step": 264
},
{
"epoch": 0.5773420479302832,
"grad_norm": 0.7679167985916138,
"learning_rate": 0.00019115171457284382,
"loss": 0.3068,
"step": 265
},
{
"epoch": 0.579520697167756,
"grad_norm": 0.9409091472625732,
"learning_rate": 0.0001910804318513609,
"loss": 0.3865,
"step": 266
},
{
"epoch": 0.5816993464052288,
"grad_norm": 0.7475356459617615,
"learning_rate": 0.00019100887655047885,
"loss": 0.3727,
"step": 267
},
{
"epoch": 0.5838779956427015,
"grad_norm": 0.8223069310188293,
"learning_rate": 0.0001909370488843436,
"loss": 0.3594,
"step": 268
},
{
"epoch": 0.5860566448801743,
"grad_norm": 0.8433902263641357,
"learning_rate": 0.00019086494906791614,
"loss": 0.3627,
"step": 269
},
{
"epoch": 0.5882352941176471,
"grad_norm": 0.7213712334632874,
"learning_rate": 0.00019079257731697196,
"loss": 0.3216,
"step": 270
},
{
"epoch": 0.5904139433551199,
"grad_norm": 0.9422861337661743,
"learning_rate": 0.00019071993384810036,
"loss": 0.4973,
"step": 271
},
{
"epoch": 0.5925925925925926,
"grad_norm": 0.7964383363723755,
"learning_rate": 0.0001906470188787039,
"loss": 0.3558,
"step": 272
},
{
"epoch": 0.5947712418300654,
"grad_norm": 0.8275824785232544,
"learning_rate": 0.0001905738326269975,
"loss": 0.2985,
"step": 273
},
{
"epoch": 0.5969498910675382,
"grad_norm": 0.9769518971443176,
"learning_rate": 0.00019050037531200814,
"loss": 0.5299,
"step": 274
},
{
"epoch": 0.599128540305011,
"grad_norm": 0.6739898920059204,
"learning_rate": 0.0001904266471535739,
"loss": 0.2324,
"step": 275
},
{
"epoch": 0.6013071895424836,
"grad_norm": 0.78001868724823,
"learning_rate": 0.00019035264837234347,
"loss": 0.3311,
"step": 276
},
{
"epoch": 0.6034858387799564,
"grad_norm": 0.6510067582130432,
"learning_rate": 0.00019027837918977544,
"loss": 0.2284,
"step": 277
},
{
"epoch": 0.6056644880174292,
"grad_norm": 0.7791504859924316,
"learning_rate": 0.00019020383982813765,
"loss": 0.3247,
"step": 278
},
{
"epoch": 0.6078431372549019,
"grad_norm": 0.7968712449073792,
"learning_rate": 0.00019012903051050643,
"loss": 0.3379,
"step": 279
},
{
"epoch": 0.6100217864923747,
"grad_norm": 0.7802785038948059,
"learning_rate": 0.00019005395146076616,
"loss": 0.3113,
"step": 280
},
{
"epoch": 0.6122004357298475,
"grad_norm": 0.8003398180007935,
"learning_rate": 0.00018997860290360832,
"loss": 0.3467,
"step": 281
},
{
"epoch": 0.6143790849673203,
"grad_norm": 0.9283706545829773,
"learning_rate": 0.00018990298506453104,
"loss": 0.3301,
"step": 282
},
{
"epoch": 0.616557734204793,
"grad_norm": 0.95260089635849,
"learning_rate": 0.00018982709816983828,
"loss": 0.4446,
"step": 283
},
{
"epoch": 0.6187363834422658,
"grad_norm": 0.8746076226234436,
"learning_rate": 0.0001897509424466393,
"loss": 0.3264,
"step": 284
},
{
"epoch": 0.6209150326797386,
"grad_norm": 0.9784271717071533,
"learning_rate": 0.00018967451812284777,
"loss": 0.3103,
"step": 285
},
{
"epoch": 0.6230936819172114,
"grad_norm": 0.7417327165603638,
"learning_rate": 0.00018959782542718128,
"loss": 0.3489,
"step": 286
},
{
"epoch": 0.6252723311546841,
"grad_norm": 0.835861325263977,
"learning_rate": 0.00018952086458916064,
"loss": 0.4059,
"step": 287
},
{
"epoch": 0.6274509803921569,
"grad_norm": 0.7815294861793518,
"learning_rate": 0.000189443635839109,
"loss": 0.3252,
"step": 288
},
{
"epoch": 0.6296296296296297,
"grad_norm": 0.7762477993965149,
"learning_rate": 0.00018936613940815145,
"loss": 0.3905,
"step": 289
},
{
"epoch": 0.6318082788671024,
"grad_norm": 0.7671297192573547,
"learning_rate": 0.00018928837552821404,
"loss": 0.3945,
"step": 290
},
{
"epoch": 0.6339869281045751,
"grad_norm": 0.7193905711174011,
"learning_rate": 0.00018921034443202333,
"loss": 0.2897,
"step": 291
},
{
"epoch": 0.6361655773420479,
"grad_norm": 0.7806922793388367,
"learning_rate": 0.0001891320463531055,
"loss": 0.3675,
"step": 292
},
{
"epoch": 0.6383442265795207,
"grad_norm": 0.9675951600074768,
"learning_rate": 0.0001890534815257858,
"loss": 0.2949,
"step": 293
},
{
"epoch": 0.6405228758169934,
"grad_norm": 0.7414330840110779,
"learning_rate": 0.00018897465018518782,
"loss": 0.2468,
"step": 294
},
{
"epoch": 0.6427015250544662,
"grad_norm": 0.9123559594154358,
"learning_rate": 0.00018889555256723262,
"loss": 0.3618,
"step": 295
},
{
"epoch": 0.644880174291939,
"grad_norm": 0.8628408312797546,
"learning_rate": 0.0001888161889086383,
"loss": 0.4728,
"step": 296
},
{
"epoch": 0.6470588235294118,
"grad_norm": 0.9308063983917236,
"learning_rate": 0.00018873655944691902,
"loss": 0.3469,
"step": 297
},
{
"epoch": 0.6492374727668845,
"grad_norm": 0.7682824730873108,
"learning_rate": 0.00018865666442038456,
"loss": 0.4138,
"step": 298
},
{
"epoch": 0.6514161220043573,
"grad_norm": 0.8124529123306274,
"learning_rate": 0.00018857650406813937,
"loss": 0.3172,
"step": 299
},
{
"epoch": 0.6535947712418301,
"grad_norm": 1.0015543699264526,
"learning_rate": 0.00018849607863008193,
"loss": 0.3274,
"step": 300
},
{
"epoch": 0.6557734204793029,
"grad_norm": 0.8008652329444885,
"learning_rate": 0.0001884153883469041,
"loss": 0.2765,
"step": 301
},
{
"epoch": 0.6579520697167756,
"grad_norm": 0.8472908139228821,
"learning_rate": 0.0001883344334600904,
"loss": 0.362,
"step": 302
},
{
"epoch": 0.6601307189542484,
"grad_norm": 0.953235387802124,
"learning_rate": 0.0001882532142119171,
"loss": 0.383,
"step": 303
},
{
"epoch": 0.6623093681917211,
"grad_norm": 0.9220999479293823,
"learning_rate": 0.00018817173084545176,
"loss": 0.4209,
"step": 304
},
{
"epoch": 0.664488017429194,
"grad_norm": 0.7949219942092896,
"learning_rate": 0.00018808998360455233,
"loss": 0.3869,
"step": 305
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.851177990436554,
"learning_rate": 0.0001880079727338664,
"loss": 0.2691,
"step": 306
},
{
"epoch": 0.6688453159041394,
"grad_norm": 0.8438466787338257,
"learning_rate": 0.00018792569847883068,
"loss": 0.3314,
"step": 307
},
{
"epoch": 0.6710239651416122,
"grad_norm": 0.842267632484436,
"learning_rate": 0.00018784316108566996,
"loss": 0.3046,
"step": 308
},
{
"epoch": 0.673202614379085,
"grad_norm": 0.6835848689079285,
"learning_rate": 0.00018776036080139666,
"loss": 0.2026,
"step": 309
},
{
"epoch": 0.6753812636165577,
"grad_norm": 0.8685810565948486,
"learning_rate": 0.00018767729787380985,
"loss": 0.2611,
"step": 310
},
{
"epoch": 0.6775599128540305,
"grad_norm": 0.8736341595649719,
"learning_rate": 0.00018759397255149475,
"loss": 0.3135,
"step": 311
},
{
"epoch": 0.6797385620915033,
"grad_norm": 0.6718863844871521,
"learning_rate": 0.00018751038508382176,
"loss": 0.2202,
"step": 312
},
{
"epoch": 0.681917211328976,
"grad_norm": 0.686114490032196,
"learning_rate": 0.00018742653572094583,
"loss": 0.2861,
"step": 313
},
{
"epoch": 0.6840958605664488,
"grad_norm": 0.7779257297515869,
"learning_rate": 0.00018734242471380572,
"loss": 0.2375,
"step": 314
},
{
"epoch": 0.6862745098039216,
"grad_norm": 0.7334826588630676,
"learning_rate": 0.00018725805231412318,
"loss": 0.2855,
"step": 315
},
{
"epoch": 0.6884531590413944,
"grad_norm": 0.8215782046318054,
"learning_rate": 0.00018717341877440226,
"loss": 0.2827,
"step": 316
},
{
"epoch": 0.690631808278867,
"grad_norm": 0.7793420553207397,
"learning_rate": 0.00018708852434792857,
"loss": 0.3593,
"step": 317
},
{
"epoch": 0.6928104575163399,
"grad_norm": 0.6807507872581482,
"learning_rate": 0.0001870033692887684,
"loss": 0.2489,
"step": 318
},
{
"epoch": 0.6949891067538126,
"grad_norm": 0.8848825097084045,
"learning_rate": 0.00018691795385176815,
"loss": 0.3622,
"step": 319
},
{
"epoch": 0.6971677559912854,
"grad_norm": 0.7745735049247742,
"learning_rate": 0.00018683227829255334,
"loss": 0.2808,
"step": 320
},
{
"epoch": 0.6993464052287581,
"grad_norm": 0.8624871969223022,
"learning_rate": 0.00018674634286752805,
"loss": 0.326,
"step": 321
},
{
"epoch": 0.7015250544662309,
"grad_norm": 0.7500248551368713,
"learning_rate": 0.00018666014783387408,
"loss": 0.3413,
"step": 322
},
{
"epoch": 0.7037037037037037,
"grad_norm": 0.7700647115707397,
"learning_rate": 0.0001865736934495501,
"loss": 0.2878,
"step": 323
},
{
"epoch": 0.7058823529411765,
"grad_norm": 0.7682783603668213,
"learning_rate": 0.000186486979973291,
"loss": 0.3626,
"step": 324
},
{
"epoch": 0.7080610021786492,
"grad_norm": 0.8913851976394653,
"learning_rate": 0.00018640000766460704,
"loss": 0.3684,
"step": 325
},
{
"epoch": 0.710239651416122,
"grad_norm": 0.6541398763656616,
"learning_rate": 0.0001863127767837831,
"loss": 0.3203,
"step": 326
},
{
"epoch": 0.7124183006535948,
"grad_norm": 0.9459714889526367,
"learning_rate": 0.00018622528759187795,
"loss": 0.3794,
"step": 327
},
{
"epoch": 0.7145969498910676,
"grad_norm": 0.8284517526626587,
"learning_rate": 0.0001861375403507233,
"loss": 0.2859,
"step": 328
},
{
"epoch": 0.7167755991285403,
"grad_norm": 0.7966834306716919,
"learning_rate": 0.00018604953532292323,
"loss": 0.3299,
"step": 329
},
{
"epoch": 0.7189542483660131,
"grad_norm": 0.9105852842330933,
"learning_rate": 0.00018596127277185329,
"loss": 0.3489,
"step": 330
},
{
"epoch": 0.7211328976034859,
"grad_norm": 0.9086281657218933,
"learning_rate": 0.0001858727529616597,
"loss": 0.4104,
"step": 331
},
{
"epoch": 0.7233115468409586,
"grad_norm": 0.8128787875175476,
"learning_rate": 0.0001857839761572586,
"loss": 0.3057,
"step": 332
},
{
"epoch": 0.7254901960784313,
"grad_norm": 0.7139798402786255,
"learning_rate": 0.0001856949426243352,
"loss": 0.2226,
"step": 333
},
{
"epoch": 0.7276688453159041,
"grad_norm": 0.8051466941833496,
"learning_rate": 0.00018560565262934318,
"loss": 0.2778,
"step": 334
},
{
"epoch": 0.7298474945533769,
"grad_norm": 0.7806089520454407,
"learning_rate": 0.00018551610643950358,
"loss": 0.3074,
"step": 335
},
{
"epoch": 0.7320261437908496,
"grad_norm": 0.8499903678894043,
"learning_rate": 0.00018542630432280422,
"loss": 0.2625,
"step": 336
},
{
"epoch": 0.7342047930283224,
"grad_norm": 0.9724310040473938,
"learning_rate": 0.00018533624654799887,
"loss": 0.3267,
"step": 337
},
{
"epoch": 0.7363834422657952,
"grad_norm": 0.845237135887146,
"learning_rate": 0.00018524593338460635,
"loss": 0.3624,
"step": 338
},
{
"epoch": 0.738562091503268,
"grad_norm": 0.8381814360618591,
"learning_rate": 0.00018515536510290987,
"loss": 0.2973,
"step": 339
},
{
"epoch": 0.7407407407407407,
"grad_norm": 0.7939983606338501,
"learning_rate": 0.00018506454197395606,
"loss": 0.2893,
"step": 340
},
{
"epoch": 0.7429193899782135,
"grad_norm": 1.1554635763168335,
"learning_rate": 0.00018497346426955434,
"loss": 0.4355,
"step": 341
},
{
"epoch": 0.7450980392156863,
"grad_norm": 0.7291870713233948,
"learning_rate": 0.00018488213226227588,
"loss": 0.3172,
"step": 342
},
{
"epoch": 0.7472766884531591,
"grad_norm": 0.6162805557250977,
"learning_rate": 0.00018479054622545302,
"loss": 0.1914,
"step": 343
},
{
"epoch": 0.7494553376906318,
"grad_norm": 0.800554096698761,
"learning_rate": 0.0001846987064331783,
"loss": 0.3092,
"step": 344
},
{
"epoch": 0.7516339869281046,
"grad_norm": 0.8024484515190125,
"learning_rate": 0.00018460661316030365,
"loss": 0.2598,
"step": 345
},
{
"epoch": 0.7516339869281046,
"eval_loss": 0.35579976439476013,
"eval_runtime": 0.9369,
"eval_samples_per_second": 182.512,
"eval_steps_per_second": 13.875,
"step": 345
},
{
"epoch": 0.7538126361655774,
"grad_norm": 0.6010091304779053,
"learning_rate": 0.00018451426668243963,
"loss": 0.1268,
"step": 346
},
{
"epoch": 0.7559912854030502,
"grad_norm": 0.909592866897583,
"learning_rate": 0.0001844216672759546,
"loss": 0.2808,
"step": 347
},
{
"epoch": 0.7581699346405228,
"grad_norm": 0.8788052201271057,
"learning_rate": 0.0001843288152179739,
"loss": 0.3516,
"step": 348
},
{
"epoch": 0.7603485838779956,
"grad_norm": 0.987259030342102,
"learning_rate": 0.00018423571078637885,
"loss": 0.4607,
"step": 349
},
{
"epoch": 0.7625272331154684,
"grad_norm": 1.0777586698532104,
"learning_rate": 0.00018414235425980616,
"loss": 0.3367,
"step": 350
},
{
"epoch": 0.7647058823529411,
"grad_norm": 0.8233873844146729,
"learning_rate": 0.00018404874591764696,
"loss": 0.2616,
"step": 351
},
{
"epoch": 0.7668845315904139,
"grad_norm": 0.6623446345329285,
"learning_rate": 0.00018395488604004603,
"loss": 0.154,
"step": 352
},
{
"epoch": 0.7690631808278867,
"grad_norm": 0.9828007817268372,
"learning_rate": 0.0001838607749079009,
"loss": 0.3084,
"step": 353
},
{
"epoch": 0.7712418300653595,
"grad_norm": 0.7043982148170471,
"learning_rate": 0.00018376641280286107,
"loss": 0.2568,
"step": 354
},
{
"epoch": 0.7734204793028322,
"grad_norm": 0.7597530484199524,
"learning_rate": 0.00018367180000732706,
"loss": 0.306,
"step": 355
},
{
"epoch": 0.775599128540305,
"grad_norm": 0.8099778890609741,
"learning_rate": 0.00018357693680444976,
"loss": 0.2389,
"step": 356
},
{
"epoch": 0.7777777777777778,
"grad_norm": 0.7014642953872681,
"learning_rate": 0.00018348182347812931,
"loss": 0.2601,
"step": 357
},
{
"epoch": 0.7799564270152506,
"grad_norm": 0.8545548915863037,
"learning_rate": 0.00018338646031301458,
"loss": 0.3604,
"step": 358
},
{
"epoch": 0.7821350762527233,
"grad_norm": 0.7516512870788574,
"learning_rate": 0.00018329084759450192,
"loss": 0.2489,
"step": 359
},
{
"epoch": 0.7843137254901961,
"grad_norm": 0.8581446409225464,
"learning_rate": 0.00018319498560873476,
"loss": 0.28,
"step": 360
},
{
"epoch": 0.7864923747276689,
"grad_norm": 0.8704679608345032,
"learning_rate": 0.00018309887464260238,
"loss": 0.3917,
"step": 361
},
{
"epoch": 0.7886710239651417,
"grad_norm": 0.832797110080719,
"learning_rate": 0.00018300251498373923,
"loss": 0.3356,
"step": 362
},
{
"epoch": 0.7908496732026143,
"grad_norm": 0.787756085395813,
"learning_rate": 0.00018290590692052398,
"loss": 0.3156,
"step": 363
},
{
"epoch": 0.7930283224400871,
"grad_norm": 1.014853596687317,
"learning_rate": 0.00018280905074207884,
"loss": 0.3131,
"step": 364
},
{
"epoch": 0.7952069716775599,
"grad_norm": 0.7135484218597412,
"learning_rate": 0.00018271194673826838,
"loss": 0.1935,
"step": 365
},
{
"epoch": 0.7973856209150327,
"grad_norm": 0.6172918677330017,
"learning_rate": 0.000182614595199699,
"loss": 0.1916,
"step": 366
},
{
"epoch": 0.7995642701525054,
"grad_norm": 0.689791738986969,
"learning_rate": 0.00018251699641771784,
"loss": 0.1738,
"step": 367
},
{
"epoch": 0.8017429193899782,
"grad_norm": 0.8571555614471436,
"learning_rate": 0.00018241915068441196,
"loss": 0.2738,
"step": 368
},
{
"epoch": 0.803921568627451,
"grad_norm": 0.7348290681838989,
"learning_rate": 0.00018232105829260752,
"loss": 0.1617,
"step": 369
},
{
"epoch": 0.8061002178649237,
"grad_norm": 0.72556471824646,
"learning_rate": 0.00018222271953586883,
"loss": 0.2201,
"step": 370
},
{
"epoch": 0.8082788671023965,
"grad_norm": 0.915581226348877,
"learning_rate": 0.0001821241347084975,
"loss": 0.3508,
"step": 371
},
{
"epoch": 0.8104575163398693,
"grad_norm": 0.9371058344841003,
"learning_rate": 0.00018202530410553163,
"loss": 0.3495,
"step": 372
},
{
"epoch": 0.8126361655773421,
"grad_norm": 0.696180522441864,
"learning_rate": 0.00018192622802274476,
"loss": 0.2297,
"step": 373
},
{
"epoch": 0.8148148148148148,
"grad_norm": 1.2849663496017456,
"learning_rate": 0.00018182690675664514,
"loss": 0.3872,
"step": 374
},
{
"epoch": 0.8169934640522876,
"grad_norm": 0.7671440243721008,
"learning_rate": 0.00018172734060447482,
"loss": 0.2522,
"step": 375
},
{
"epoch": 0.8191721132897604,
"grad_norm": 0.7630666494369507,
"learning_rate": 0.00018162752986420868,
"loss": 0.1857,
"step": 376
},
{
"epoch": 0.8213507625272332,
"grad_norm": 0.8664875626564026,
"learning_rate": 0.00018152747483455358,
"loss": 0.3085,
"step": 377
},
{
"epoch": 0.8235294117647058,
"grad_norm": 0.7418748140335083,
"learning_rate": 0.0001814271758149475,
"loss": 0.2537,
"step": 378
},
{
"epoch": 0.8257080610021786,
"grad_norm": 0.9873255491256714,
"learning_rate": 0.0001813266331055586,
"loss": 0.335,
"step": 379
},
{
"epoch": 0.8278867102396514,
"grad_norm": 0.8407981991767883,
"learning_rate": 0.00018122584700728443,
"loss": 0.2625,
"step": 380
},
{
"epoch": 0.8300653594771242,
"grad_norm": 0.8493285179138184,
"learning_rate": 0.0001811248178217507,
"loss": 0.2167,
"step": 381
},
{
"epoch": 0.8322440087145969,
"grad_norm": 0.7906709909439087,
"learning_rate": 0.00018102354585131092,
"loss": 0.2919,
"step": 382
},
{
"epoch": 0.8344226579520697,
"grad_norm": 0.8033335208892822,
"learning_rate": 0.00018092203139904496,
"loss": 0.2571,
"step": 383
},
{
"epoch": 0.8366013071895425,
"grad_norm": 0.616438627243042,
"learning_rate": 0.00018082027476875847,
"loss": 0.2102,
"step": 384
},
{
"epoch": 0.8387799564270153,
"grad_norm": 0.6517376899719238,
"learning_rate": 0.00018071827626498185,
"loss": 0.2242,
"step": 385
},
{
"epoch": 0.840958605664488,
"grad_norm": 0.7315549254417419,
"learning_rate": 0.00018061603619296942,
"loss": 0.2594,
"step": 386
},
{
"epoch": 0.8431372549019608,
"grad_norm": 0.6937726736068726,
"learning_rate": 0.00018051355485869833,
"loss": 0.1991,
"step": 387
},
{
"epoch": 0.8453159041394336,
"grad_norm": 0.7155842185020447,
"learning_rate": 0.0001804108325688679,
"loss": 0.2051,
"step": 388
},
{
"epoch": 0.8474945533769063,
"grad_norm": 1.1797689199447632,
"learning_rate": 0.00018030786963089845,
"loss": 0.3421,
"step": 389
},
{
"epoch": 0.8496732026143791,
"grad_norm": 0.7603274583816528,
"learning_rate": 0.00018020466635293057,
"loss": 0.1985,
"step": 390
},
{
"epoch": 0.8518518518518519,
"grad_norm": 0.7939961552619934,
"learning_rate": 0.0001801012230438241,
"loss": 0.2753,
"step": 391
},
{
"epoch": 0.8540305010893247,
"grad_norm": 0.8357366323471069,
"learning_rate": 0.0001799975400131572,
"loss": 0.3453,
"step": 392
},
{
"epoch": 0.8562091503267973,
"grad_norm": 0.7716991305351257,
"learning_rate": 0.00017989361757122553,
"loss": 0.2441,
"step": 393
},
{
"epoch": 0.8583877995642701,
"grad_norm": 0.7844340205192566,
"learning_rate": 0.00017978945602904116,
"loss": 0.3635,
"step": 394
},
{
"epoch": 0.8605664488017429,
"grad_norm": 0.6692304611206055,
"learning_rate": 0.00017968505569833173,
"loss": 0.1895,
"step": 395
},
{
"epoch": 0.8627450980392157,
"grad_norm": 0.7669034004211426,
"learning_rate": 0.0001795804168915396,
"loss": 0.1833,
"step": 396
},
{
"epoch": 0.8649237472766884,
"grad_norm": 0.9231857657432556,
"learning_rate": 0.00017947553992182075,
"loss": 0.3172,
"step": 397
},
{
"epoch": 0.8671023965141612,
"grad_norm": 0.9847748875617981,
"learning_rate": 0.00017937042510304392,
"loss": 0.3479,
"step": 398
},
{
"epoch": 0.869281045751634,
"grad_norm": 0.719962477684021,
"learning_rate": 0.00017926507274978963,
"loss": 0.2653,
"step": 399
},
{
"epoch": 0.8714596949891068,
"grad_norm": 0.7490435242652893,
"learning_rate": 0.00017915948317734942,
"loss": 0.2387,
"step": 400
},
{
"epoch": 0.8736383442265795,
"grad_norm": 0.744804322719574,
"learning_rate": 0.00017905365670172458,
"loss": 0.2368,
"step": 401
},
{
"epoch": 0.8758169934640523,
"grad_norm": 0.6886241436004639,
"learning_rate": 0.00017894759363962554,
"loss": 0.2125,
"step": 402
},
{
"epoch": 0.8779956427015251,
"grad_norm": 0.7730022072792053,
"learning_rate": 0.0001788412943084707,
"loss": 0.2253,
"step": 403
},
{
"epoch": 0.8801742919389978,
"grad_norm": 0.8058857321739197,
"learning_rate": 0.00017873475902638553,
"loss": 0.2845,
"step": 404
},
{
"epoch": 0.8823529411764706,
"grad_norm": 0.6579281091690063,
"learning_rate": 0.0001786279881122017,
"loss": 0.1922,
"step": 405
},
{
"epoch": 0.8845315904139434,
"grad_norm": 0.6769328713417053,
"learning_rate": 0.00017852098188545602,
"loss": 0.1609,
"step": 406
},
{
"epoch": 0.8867102396514162,
"grad_norm": 0.7010467648506165,
"learning_rate": 0.0001784137406663895,
"loss": 0.2387,
"step": 407
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.7626838088035583,
"learning_rate": 0.00017830626477594654,
"loss": 0.237,
"step": 408
},
{
"epoch": 0.8910675381263616,
"grad_norm": 0.8782392144203186,
"learning_rate": 0.0001781985545357737,
"loss": 0.3278,
"step": 409
},
{
"epoch": 0.8932461873638344,
"grad_norm": 0.6178898215293884,
"learning_rate": 0.00017809061026821896,
"loss": 0.1584,
"step": 410
},
{
"epoch": 0.8954248366013072,
"grad_norm": 0.7101506590843201,
"learning_rate": 0.00017798243229633068,
"loss": 0.2384,
"step": 411
},
{
"epoch": 0.8976034858387799,
"grad_norm": 0.5807170867919922,
"learning_rate": 0.00017787402094385666,
"loss": 0.1256,
"step": 412
},
{
"epoch": 0.8997821350762527,
"grad_norm": 0.6202595829963684,
"learning_rate": 0.00017776537653524307,
"loss": 0.168,
"step": 413
},
{
"epoch": 0.9019607843137255,
"grad_norm": 0.5887298583984375,
"learning_rate": 0.00017765649939563365,
"loss": 0.1411,
"step": 414
},
{
"epoch": 0.9041394335511983,
"grad_norm": 0.745570957660675,
"learning_rate": 0.0001775473898508685,
"loss": 0.2842,
"step": 415
},
{
"epoch": 0.906318082788671,
"grad_norm": 0.8904745578765869,
"learning_rate": 0.00017743804822748345,
"loss": 0.3217,
"step": 416
},
{
"epoch": 0.9084967320261438,
"grad_norm": 0.8730976581573486,
"learning_rate": 0.0001773284748527087,
"loss": 0.3403,
"step": 417
},
{
"epoch": 0.9106753812636166,
"grad_norm": 0.7911598086357117,
"learning_rate": 0.00017721867005446806,
"loss": 0.2731,
"step": 418
},
{
"epoch": 0.9128540305010894,
"grad_norm": 0.6702750325202942,
"learning_rate": 0.00017710863416137805,
"loss": 0.2073,
"step": 419
},
{
"epoch": 0.9150326797385621,
"grad_norm": 0.5198416113853455,
"learning_rate": 0.00017699836750274662,
"loss": 0.129,
"step": 420
},
{
"epoch": 0.9172113289760349,
"grad_norm": 0.6400766372680664,
"learning_rate": 0.00017688787040857245,
"loss": 0.1682,
"step": 421
},
{
"epoch": 0.9193899782135077,
"grad_norm": 0.7851924896240234,
"learning_rate": 0.00017677714320954378,
"loss": 0.221,
"step": 422
},
{
"epoch": 0.9215686274509803,
"grad_norm": 0.7246972918510437,
"learning_rate": 0.0001766661862370376,
"loss": 0.2438,
"step": 423
},
{
"epoch": 0.9237472766884531,
"grad_norm": 0.7575098872184753,
"learning_rate": 0.00017655499982311847,
"loss": 0.2633,
"step": 424
},
{
"epoch": 0.9259259259259259,
"grad_norm": 0.902695894241333,
"learning_rate": 0.0001764435843005376,
"loss": 0.3567,
"step": 425
},
{
"epoch": 0.9281045751633987,
"grad_norm": 0.7910832762718201,
"learning_rate": 0.00017633194000273188,
"loss": 0.2578,
"step": 426
},
{
"epoch": 0.9302832244008714,
"grad_norm": 0.7221683263778687,
"learning_rate": 0.00017622006726382287,
"loss": 0.2075,
"step": 427
},
{
"epoch": 0.9324618736383442,
"grad_norm": 0.7584638595581055,
"learning_rate": 0.00017610796641861581,
"loss": 0.2313,
"step": 428
},
{
"epoch": 0.934640522875817,
"grad_norm": 0.6272245645523071,
"learning_rate": 0.00017599563780259858,
"loss": 0.1876,
"step": 429
},
{
"epoch": 0.9368191721132898,
"grad_norm": 0.8224185705184937,
"learning_rate": 0.0001758830817519407,
"loss": 0.2558,
"step": 430
},
{
"epoch": 0.9389978213507625,
"grad_norm": 0.7095481157302856,
"learning_rate": 0.00017577029860349233,
"loss": 0.2256,
"step": 431
},
{
"epoch": 0.9411764705882353,
"grad_norm": 0.7715668678283691,
"learning_rate": 0.00017565728869478337,
"loss": 0.2483,
"step": 432
},
{
"epoch": 0.9433551198257081,
"grad_norm": 0.905522882938385,
"learning_rate": 0.00017554405236402222,
"loss": 0.2525,
"step": 433
},
{
"epoch": 0.9455337690631809,
"grad_norm": 0.6268091797828674,
"learning_rate": 0.00017543058995009503,
"loss": 0.1703,
"step": 434
},
{
"epoch": 0.9477124183006536,
"grad_norm": 0.730185329914093,
"learning_rate": 0.0001753169017925644,
"loss": 0.2187,
"step": 435
},
{
"epoch": 0.9498910675381264,
"grad_norm": 0.7279319763183594,
"learning_rate": 0.00017520298823166873,
"loss": 0.2584,
"step": 436
},
{
"epoch": 0.9520697167755992,
"grad_norm": 0.7050124406814575,
"learning_rate": 0.00017508884960832076,
"loss": 0.1723,
"step": 437
},
{
"epoch": 0.954248366013072,
"grad_norm": 0.7544010281562805,
"learning_rate": 0.000174974486264107,
"loss": 0.1899,
"step": 438
},
{
"epoch": 0.9564270152505446,
"grad_norm": 0.8714267015457153,
"learning_rate": 0.00017485989854128627,
"loss": 0.2518,
"step": 439
},
{
"epoch": 0.9586056644880174,
"grad_norm": 0.7128406167030334,
"learning_rate": 0.00017474508678278915,
"loss": 0.1832,
"step": 440
},
{
"epoch": 0.9607843137254902,
"grad_norm": 0.7446244955062866,
"learning_rate": 0.00017463005133221645,
"loss": 0.2089,
"step": 441
},
{
"epoch": 0.9629629629629629,
"grad_norm": 0.7556454539299011,
"learning_rate": 0.00017451479253383857,
"loss": 0.2327,
"step": 442
},
{
"epoch": 0.9651416122004357,
"grad_norm": 0.5932292938232422,
"learning_rate": 0.00017439931073259427,
"loss": 0.1094,
"step": 443
},
{
"epoch": 0.9673202614379085,
"grad_norm": 0.909900426864624,
"learning_rate": 0.00017428360627408978,
"loss": 0.2175,
"step": 444
},
{
"epoch": 0.9694989106753813,
"grad_norm": 0.7743591666221619,
"learning_rate": 0.00017416767950459766,
"loss": 0.1818,
"step": 445
},
{
"epoch": 0.971677559912854,
"grad_norm": 0.7306420207023621,
"learning_rate": 0.0001740515307710557,
"loss": 0.1957,
"step": 446
},
{
"epoch": 0.9738562091503268,
"grad_norm": 0.5480270981788635,
"learning_rate": 0.00017393516042106603,
"loss": 0.1477,
"step": 447
},
{
"epoch": 0.9760348583877996,
"grad_norm": 0.7661142945289612,
"learning_rate": 0.000173818568802894,
"loss": 0.2177,
"step": 448
},
{
"epoch": 0.9782135076252724,
"grad_norm": 0.8411223292350769,
"learning_rate": 0.00017370175626546728,
"loss": 0.2104,
"step": 449
},
{
"epoch": 0.9803921568627451,
"grad_norm": 0.816074013710022,
"learning_rate": 0.00017358472315837447,
"loss": 0.2349,
"step": 450
},
{
"epoch": 0.9825708061002179,
"grad_norm": 1.0009855031967163,
"learning_rate": 0.00017346746983186442,
"loss": 0.2917,
"step": 451
},
{
"epoch": 0.9847494553376906,
"grad_norm": 0.7690507769584656,
"learning_rate": 0.00017334999663684504,
"loss": 0.2216,
"step": 452
},
{
"epoch": 0.9869281045751634,
"grad_norm": 0.7864702939987183,
"learning_rate": 0.00017323230392488222,
"loss": 0.2601,
"step": 453
},
{
"epoch": 0.9891067538126361,
"grad_norm": 0.8309422731399536,
"learning_rate": 0.00017311439204819874,
"loss": 0.2257,
"step": 454
},
{
"epoch": 0.9912854030501089,
"grad_norm": 0.7395288944244385,
"learning_rate": 0.00017299626135967343,
"loss": 0.1859,
"step": 455
},
{
"epoch": 0.9934640522875817,
"grad_norm": 0.6524999737739563,
"learning_rate": 0.00017287791221283984,
"loss": 0.1523,
"step": 456
},
{
"epoch": 0.9956427015250545,
"grad_norm": 0.6195773482322693,
"learning_rate": 0.00017275934496188534,
"loss": 0.1307,
"step": 457
},
{
"epoch": 0.9978213507625272,
"grad_norm": 0.8236491084098816,
"learning_rate": 0.00017264055996165007,
"loss": 0.2214,
"step": 458
},
{
"epoch": 1.0,
"grad_norm": 0.5916262865066528,
"learning_rate": 0.00017252155756762575,
"loss": 0.1633,
"step": 459
},
{
"epoch": 1.0021786492374727,
"grad_norm": 1.0080763101577759,
"learning_rate": 0.00017240233813595478,
"loss": 0.2227,
"step": 460
},
{
"epoch": 1.0021786492374727,
"eval_loss": 0.3434114158153534,
"eval_runtime": 0.9376,
"eval_samples_per_second": 182.377,
"eval_steps_per_second": 13.865,
"step": 460
},
{
"epoch": 1.0043572984749456,
"grad_norm": 0.8395419716835022,
"learning_rate": 0.00017228290202342907,
"loss": 0.1963,
"step": 461
},
{
"epoch": 1.0065359477124183,
"grad_norm": 0.8859379291534424,
"learning_rate": 0.000172163249587489,
"loss": 0.2992,
"step": 462
},
{
"epoch": 1.008714596949891,
"grad_norm": 0.8899995684623718,
"learning_rate": 0.00017204338118622232,
"loss": 0.2504,
"step": 463
},
{
"epoch": 1.0108932461873639,
"grad_norm": 0.7307553291320801,
"learning_rate": 0.00017192329717836315,
"loss": 0.166,
"step": 464
},
{
"epoch": 1.0130718954248366,
"grad_norm": 0.9266228079795837,
"learning_rate": 0.00017180299792329086,
"loss": 0.2422,
"step": 465
},
{
"epoch": 1.0152505446623095,
"grad_norm": 0.7402477264404297,
"learning_rate": 0.00017168248378102892,
"loss": 0.1896,
"step": 466
},
{
"epoch": 1.0174291938997821,
"grad_norm": 0.7960779070854187,
"learning_rate": 0.00017156175511224403,
"loss": 0.254,
"step": 467
},
{
"epoch": 1.0196078431372548,
"grad_norm": 0.6837732791900635,
"learning_rate": 0.0001714408122782448,
"loss": 0.1974,
"step": 468
},
{
"epoch": 1.0217864923747277,
"grad_norm": 0.7475525736808777,
"learning_rate": 0.00017131965564098084,
"loss": 0.1873,
"step": 469
},
{
"epoch": 1.0239651416122004,
"grad_norm": 0.7993423342704773,
"learning_rate": 0.0001711982855630416,
"loss": 0.1735,
"step": 470
},
{
"epoch": 1.026143790849673,
"grad_norm": 0.803156852722168,
"learning_rate": 0.00017107670240765527,
"loss": 0.2003,
"step": 471
},
{
"epoch": 1.0021786492374727,
"grad_norm": 0.7278410196304321,
"learning_rate": 0.00017095490653868778,
"loss": 0.1855,
"step": 472
},
{
"epoch": 1.0043572984749456,
"grad_norm": 0.6922372579574585,
"learning_rate": 0.0001708328983206416,
"loss": 0.1877,
"step": 473
},
{
"epoch": 1.0065359477124183,
"grad_norm": 0.7497193813323975,
"learning_rate": 0.00017071067811865476,
"loss": 0.1668,
"step": 474
},
{
"epoch": 1.008714596949891,
"grad_norm": 0.7669565081596375,
"learning_rate": 0.00017058824629849966,
"loss": 0.1425,
"step": 475
},
{
"epoch": 1.0108932461873639,
"grad_norm": 0.9330861568450928,
"learning_rate": 0.000170465603226582,
"loss": 0.2458,
"step": 476
},
{
"epoch": 1.0130718954248366,
"grad_norm": 0.6653205156326294,
"learning_rate": 0.00017034274926993977,
"loss": 0.1898,
"step": 477
},
{
"epoch": 1.0152505446623095,
"grad_norm": 0.6905311346054077,
"learning_rate": 0.00017021968479624203,
"loss": 0.1874,
"step": 478
},
{
"epoch": 1.0174291938997821,
"grad_norm": 0.5661349892616272,
"learning_rate": 0.00017009641017378784,
"loss": 0.109,
"step": 479
},
{
"epoch": 1.0196078431372548,
"grad_norm": 0.6994882822036743,
"learning_rate": 0.00016997292577150528,
"loss": 0.1615,
"step": 480
},
{
"epoch": 1.0217864923747277,
"grad_norm": 0.8208505511283875,
"learning_rate": 0.00016984923195895011,
"loss": 0.2553,
"step": 481
},
{
"epoch": 1.0239651416122004,
"grad_norm": 0.6243330240249634,
"learning_rate": 0.0001697253291063049,
"loss": 0.1638,
"step": 482
},
{
"epoch": 1.026143790849673,
"grad_norm": 0.7907949090003967,
"learning_rate": 0.00016960121758437775,
"loss": 0.2016,
"step": 483
},
{
"epoch": 1.028322440087146,
"grad_norm": 0.8235294222831726,
"learning_rate": 0.0001694768977646013,
"loss": 0.2052,
"step": 484
},
{
"epoch": 1.0305010893246187,
"grad_norm": 0.6623209714889526,
"learning_rate": 0.00016935237001903158,
"loss": 0.1608,
"step": 485
},
{
"epoch": 1.0326797385620916,
"grad_norm": 0.7501957416534424,
"learning_rate": 0.00016922763472034685,
"loss": 0.1528,
"step": 486
},
{
"epoch": 1.0348583877995643,
"grad_norm": 0.7507126331329346,
"learning_rate": 0.00016910269224184655,
"loss": 0.1717,
"step": 487
},
{
"epoch": 1.037037037037037,
"grad_norm": 0.6387655138969421,
"learning_rate": 0.00016897754295745008,
"loss": 0.1594,
"step": 488
},
{
"epoch": 1.0392156862745099,
"grad_norm": 0.7997871041297913,
"learning_rate": 0.00016885218724169588,
"loss": 0.2297,
"step": 489
},
{
"epoch": 1.0413943355119826,
"grad_norm": 0.7794216871261597,
"learning_rate": 0.00016872662546974008,
"loss": 0.2092,
"step": 490
},
{
"epoch": 1.0435729847494553,
"grad_norm": 0.7575411200523376,
"learning_rate": 0.00016860085801735552,
"loss": 0.1465,
"step": 491
},
{
"epoch": 1.0457516339869282,
"grad_norm": 0.876860499382019,
"learning_rate": 0.0001684748852609306,
"loss": 0.2041,
"step": 492
},
{
"epoch": 1.0479302832244008,
"grad_norm": 0.8270288109779358,
"learning_rate": 0.00016834870757746813,
"loss": 0.1615,
"step": 493
},
{
"epoch": 1.0501089324618735,
"grad_norm": 0.7617962956428528,
"learning_rate": 0.00016822232534458416,
"loss": 0.1708,
"step": 494
},
{
"epoch": 1.0522875816993464,
"grad_norm": 0.7531927227973938,
"learning_rate": 0.00016809573894050703,
"loss": 0.2026,
"step": 495
},
{
"epoch": 1.0544662309368191,
"grad_norm": 0.6922577619552612,
"learning_rate": 0.00016796894874407595,
"loss": 0.1573,
"step": 496
},
{
"epoch": 1.056644880174292,
"grad_norm": 0.75091153383255,
"learning_rate": 0.00016784195513474013,
"loss": 0.2269,
"step": 497
},
{
"epoch": 1.0588235294117647,
"grad_norm": 0.6911150813102722,
"learning_rate": 0.00016771475849255754,
"loss": 0.1471,
"step": 498
},
{
"epoch": 1.0610021786492374,
"grad_norm": 0.8379197120666504,
"learning_rate": 0.0001675873591981937,
"loss": 0.2183,
"step": 499
},
{
"epoch": 1.0631808278867103,
"grad_norm": 0.6960466504096985,
"learning_rate": 0.0001674597576329207,
"loss": 0.1918,
"step": 500
},
{
"epoch": 1.065359477124183,
"grad_norm": 0.7402621507644653,
"learning_rate": 0.00016733195417861592,
"loss": 0.1682,
"step": 501
},
{
"epoch": 1.0675381263616557,
"grad_norm": 0.7742673754692078,
"learning_rate": 0.00016720394921776097,
"loss": 0.1559,
"step": 502
},
{
"epoch": 1.0697167755991286,
"grad_norm": 0.864452064037323,
"learning_rate": 0.00016707574313344048,
"loss": 0.1899,
"step": 503
},
{
"epoch": 1.0718954248366013,
"grad_norm": 0.8253500461578369,
"learning_rate": 0.000166947336309341,
"loss": 0.1949,
"step": 504
},
{
"epoch": 1.074074074074074,
"grad_norm": 0.8021739721298218,
"learning_rate": 0.00016681872912974988,
"loss": 0.2002,
"step": 505
},
{
"epoch": 1.0762527233115469,
"grad_norm": 0.6518934965133667,
"learning_rate": 0.00016668992197955398,
"loss": 0.1708,
"step": 506
},
{
"epoch": 1.0784313725490196,
"grad_norm": 0.7878454923629761,
"learning_rate": 0.0001665609152442388,
"loss": 0.1642,
"step": 507
},
{
"epoch": 1.0806100217864925,
"grad_norm": 0.7697330713272095,
"learning_rate": 0.00016643170930988698,
"loss": 0.2059,
"step": 508
},
{
"epoch": 1.0827886710239651,
"grad_norm": 0.7072964310646057,
"learning_rate": 0.0001663023045631773,
"loss": 0.1654,
"step": 509
},
{
"epoch": 1.0849673202614378,
"grad_norm": 0.7548463940620422,
"learning_rate": 0.00016617270139138371,
"loss": 0.1562,
"step": 510
},
{
"epoch": 1.0871459694989107,
"grad_norm": 0.8657066226005554,
"learning_rate": 0.00016604290018237377,
"loss": 0.2446,
"step": 511
},
{
"epoch": 1.0893246187363834,
"grad_norm": 0.6990319490432739,
"learning_rate": 0.0001659129013246079,
"loss": 0.1481,
"step": 512
},
{
"epoch": 1.091503267973856,
"grad_norm": 0.7399086952209473,
"learning_rate": 0.0001657827052071379,
"loss": 0.1822,
"step": 513
},
{
"epoch": 1.093681917211329,
"grad_norm": 0.47281554341316223,
"learning_rate": 0.000165652312219606,
"loss": 0.077,
"step": 514
},
{
"epoch": 1.0958605664488017,
"grad_norm": 0.7572283744812012,
"learning_rate": 0.00016552172275224357,
"loss": 0.1543,
"step": 515
},
{
"epoch": 1.0980392156862746,
"grad_norm": 0.5454416275024414,
"learning_rate": 0.00016539093719586994,
"loss": 0.0874,
"step": 516
},
{
"epoch": 1.1002178649237473,
"grad_norm": 0.7739827036857605,
"learning_rate": 0.00016525995594189144,
"loss": 0.1887,
"step": 517
},
{
"epoch": 1.10239651416122,
"grad_norm": 0.9118095636367798,
"learning_rate": 0.00016512877938229986,
"loss": 0.2067,
"step": 518
},
{
"epoch": 1.1045751633986929,
"grad_norm": 0.6182532906532288,
"learning_rate": 0.0001649974079096717,
"loss": 0.0981,
"step": 519
},
{
"epoch": 1.1067538126361656,
"grad_norm": 0.8062636256217957,
"learning_rate": 0.0001648658419171666,
"loss": 0.1637,
"step": 520
},
{
"epoch": 1.1089324618736383,
"grad_norm": 0.5750370025634766,
"learning_rate": 0.00016473408179852646,
"loss": 0.091,
"step": 521
},
{
"epoch": 1.1111111111111112,
"grad_norm": 1.024340271949768,
"learning_rate": 0.00016460212794807414,
"loss": 0.206,
"step": 522
},
{
"epoch": 1.1132897603485838,
"grad_norm": 0.6996582746505737,
"learning_rate": 0.00016446998076071224,
"loss": 0.1382,
"step": 523
},
{
"epoch": 1.1154684095860565,
"grad_norm": 0.7497454881668091,
"learning_rate": 0.00016433764063192194,
"loss": 0.2302,
"step": 524
},
{
"epoch": 1.1176470588235294,
"grad_norm": 0.7948709726333618,
"learning_rate": 0.00016420510795776196,
"loss": 0.2015,
"step": 525
},
{
"epoch": 1.1198257080610021,
"grad_norm": 0.5821182131767273,
"learning_rate": 0.00016407238313486712,
"loss": 0.1278,
"step": 526
},
{
"epoch": 1.122004357298475,
"grad_norm": 0.773891031742096,
"learning_rate": 0.00016393946656044744,
"loss": 0.1889,
"step": 527
},
{
"epoch": 1.1241830065359477,
"grad_norm": 0.7371392846107483,
"learning_rate": 0.0001638063586322866,
"loss": 0.1965,
"step": 528
},
{
"epoch": 1.1263616557734204,
"grad_norm": 0.7355236411094666,
"learning_rate": 0.0001636730597487412,
"loss": 0.1587,
"step": 529
},
{
"epoch": 1.1285403050108933,
"grad_norm": 0.7013958096504211,
"learning_rate": 0.0001635395703087391,
"loss": 0.1596,
"step": 530
},
{
"epoch": 1.130718954248366,
"grad_norm": 0.707258403301239,
"learning_rate": 0.00016340589071177854,
"loss": 0.1792,
"step": 531
},
{
"epoch": 1.132897603485839,
"grad_norm": 0.672795295715332,
"learning_rate": 0.00016327202135792685,
"loss": 0.1562,
"step": 532
},
{
"epoch": 1.1350762527233116,
"grad_norm": 0.6543575525283813,
"learning_rate": 0.00016313796264781925,
"loss": 0.1348,
"step": 533
},
{
"epoch": 1.1372549019607843,
"grad_norm": 0.755180299282074,
"learning_rate": 0.00016300371498265763,
"loss": 0.1645,
"step": 534
},
{
"epoch": 1.1394335511982572,
"grad_norm": 0.7156417965888977,
"learning_rate": 0.0001628692787642094,
"loss": 0.1231,
"step": 535
},
{
"epoch": 1.1416122004357299,
"grad_norm": 0.7680862545967102,
"learning_rate": 0.00016273465439480618,
"loss": 0.1644,
"step": 536
},
{
"epoch": 1.1437908496732025,
"grad_norm": 0.6331315040588379,
"learning_rate": 0.00016259984227734285,
"loss": 0.1276,
"step": 537
},
{
"epoch": 1.1459694989106755,
"grad_norm": 0.8776397109031677,
"learning_rate": 0.000162464842815276,
"loss": 0.1976,
"step": 538
},
{
"epoch": 1.1481481481481481,
"grad_norm": 0.9892627000808716,
"learning_rate": 0.00016232965641262297,
"loss": 0.2732,
"step": 539
},
{
"epoch": 1.1503267973856208,
"grad_norm": 0.6039031147956848,
"learning_rate": 0.00016219428347396053,
"loss": 0.126,
"step": 540
},
{
"epoch": 1.1525054466230937,
"grad_norm": 0.757251501083374,
"learning_rate": 0.0001620587244044237,
"loss": 0.1848,
"step": 541
},
{
"epoch": 1.1546840958605664,
"grad_norm": 0.6978369355201721,
"learning_rate": 0.0001619229796097046,
"loss": 0.1364,
"step": 542
},
{
"epoch": 1.156862745098039,
"grad_norm": 0.727165937423706,
"learning_rate": 0.00016178704949605113,
"loss": 0.144,
"step": 543
},
{
"epoch": 1.159041394335512,
"grad_norm": 0.784420371055603,
"learning_rate": 0.0001616509344702658,
"loss": 0.1917,
"step": 544
},
{
"epoch": 1.1612200435729847,
"grad_norm": 0.8775203824043274,
"learning_rate": 0.00016151463493970446,
"loss": 0.1981,
"step": 545
},
{
"epoch": 1.1633986928104576,
"grad_norm": 0.8711812496185303,
"learning_rate": 0.00016137815131227526,
"loss": 0.1516,
"step": 546
},
{
"epoch": 1.1655773420479303,
"grad_norm": 0.5256686806678772,
"learning_rate": 0.00016124148399643723,
"loss": 0.09,
"step": 547
},
{
"epoch": 1.167755991285403,
"grad_norm": 0.7360031008720398,
"learning_rate": 0.00016110463340119913,
"loss": 0.1377,
"step": 548
},
{
"epoch": 1.1699346405228759,
"grad_norm": 0.7044847011566162,
"learning_rate": 0.0001609675999361182,
"loss": 0.117,
"step": 549
},
{
"epoch": 1.1721132897603486,
"grad_norm": 0.8387324810028076,
"learning_rate": 0.000160830384011299,
"loss": 0.2031,
"step": 550
},
{
"epoch": 1.1742919389978215,
"grad_norm": 0.8079413175582886,
"learning_rate": 0.00016069298603739216,
"loss": 0.1536,
"step": 551
},
{
"epoch": 1.1764705882352942,
"grad_norm": 0.8063020706176758,
"learning_rate": 0.00016055540642559305,
"loss": 0.1378,
"step": 552
},
{
"epoch": 1.1786492374727668,
"grad_norm": 0.6223093271255493,
"learning_rate": 0.0001604176455876408,
"loss": 0.1464,
"step": 553
},
{
"epoch": 1.1808278867102397,
"grad_norm": 0.6043615341186523,
"learning_rate": 0.00016027970393581666,
"loss": 0.1304,
"step": 554
},
{
"epoch": 1.1830065359477124,
"grad_norm": 0.6139847636222839,
"learning_rate": 0.00016014158188294326,
"loss": 0.1109,
"step": 555
},
{
"epoch": 1.1851851851851851,
"grad_norm": 0.790346622467041,
"learning_rate": 0.00016000327984238292,
"loss": 0.1916,
"step": 556
},
{
"epoch": 1.187363834422658,
"grad_norm": 0.5799733996391296,
"learning_rate": 0.00015986479822803671,
"loss": 0.1181,
"step": 557
},
{
"epoch": 1.1895424836601307,
"grad_norm": 0.8529340624809265,
"learning_rate": 0.00015972613745434314,
"loss": 0.1856,
"step": 558
},
{
"epoch": 1.1917211328976034,
"grad_norm": 0.7188248038291931,
"learning_rate": 0.00015958729793627682,
"loss": 0.1505,
"step": 559
},
{
"epoch": 1.1938997821350763,
"grad_norm": 1.0040041208267212,
"learning_rate": 0.0001594482800893474,
"loss": 0.2638,
"step": 560
},
{
"epoch": 1.196078431372549,
"grad_norm": 0.6917260885238647,
"learning_rate": 0.00015930908432959808,
"loss": 0.1185,
"step": 561
},
{
"epoch": 1.1982570806100217,
"grad_norm": 0.7081711292266846,
"learning_rate": 0.00015916971107360461,
"loss": 0.1343,
"step": 562
},
{
"epoch": 1.2004357298474946,
"grad_norm": 0.8055239319801331,
"learning_rate": 0.0001590301607384739,
"loss": 0.2216,
"step": 563
},
{
"epoch": 1.2026143790849673,
"grad_norm": 0.9081418514251709,
"learning_rate": 0.00015889043374184286,
"loss": 0.2004,
"step": 564
},
{
"epoch": 1.2047930283224402,
"grad_norm": 0.7224873304367065,
"learning_rate": 0.00015875053050187706,
"loss": 0.1824,
"step": 565
},
{
"epoch": 1.2069716775599129,
"grad_norm": 0.515967607498169,
"learning_rate": 0.00015861045143726946,
"loss": 0.0807,
"step": 566
},
{
"epoch": 1.2091503267973855,
"grad_norm": 0.6762127876281738,
"learning_rate": 0.0001584701969672393,
"loss": 0.1481,
"step": 567
},
{
"epoch": 1.2113289760348585,
"grad_norm": 0.8208984732627869,
"learning_rate": 0.00015832976751153078,
"loss": 0.1828,
"step": 568
},
{
"epoch": 1.2135076252723311,
"grad_norm": 0.6228875517845154,
"learning_rate": 0.00015818916349041165,
"loss": 0.1115,
"step": 569
},
{
"epoch": 1.215686274509804,
"grad_norm": 0.7444784641265869,
"learning_rate": 0.0001580483853246723,
"loss": 0.1559,
"step": 570
},
{
"epoch": 1.2178649237472767,
"grad_norm": 0.8666041493415833,
"learning_rate": 0.00015790743343562408,
"loss": 0.1908,
"step": 571
},
{
"epoch": 1.2200435729847494,
"grad_norm": 0.7155983448028564,
"learning_rate": 0.0001577663082450984,
"loss": 0.1807,
"step": 572
},
{
"epoch": 1.2222222222222223,
"grad_norm": 0.7652324438095093,
"learning_rate": 0.0001576250101754452,
"loss": 0.1689,
"step": 573
},
{
"epoch": 1.224400871459695,
"grad_norm": 0.5946928262710571,
"learning_rate": 0.00015748353964953186,
"loss": 0.0888,
"step": 574
},
{
"epoch": 1.2265795206971677,
"grad_norm": 0.6897603869438171,
"learning_rate": 0.00015734189709074188,
"loss": 0.1381,
"step": 575
},
{
"epoch": 1.2265795206971677,
"eval_loss": 0.3376106917858124,
"eval_runtime": 0.9372,
"eval_samples_per_second": 182.463,
"eval_steps_per_second": 13.871,
"step": 575
},
{
"epoch": 1.2287581699346406,
"grad_norm": 0.700342059135437,
"learning_rate": 0.00015720008292297364,
"loss": 0.1898,
"step": 576
},
{
"epoch": 1.2309368191721133,
"grad_norm": 0.6738066077232361,
"learning_rate": 0.00015705809757063897,
"loss": 0.1543,
"step": 577
},
{
"epoch": 1.233115468409586,
"grad_norm": 0.5636610984802246,
"learning_rate": 0.00015691594145866215,
"loss": 0.1218,
"step": 578
},
{
"epoch": 1.2352941176470589,
"grad_norm": 0.7896304130554199,
"learning_rate": 0.00015677361501247844,
"loss": 0.1856,
"step": 579
},
{
"epoch": 1.2374727668845316,
"grad_norm": 0.7217385768890381,
"learning_rate": 0.00015663111865803285,
"loss": 0.1399,
"step": 580
},
{
"epoch": 1.2396514161220042,
"grad_norm": 0.7752529978752136,
"learning_rate": 0.00015648845282177892,
"loss": 0.1501,
"step": 581
},
{
"epoch": 1.2418300653594772,
"grad_norm": 0.9684514403343201,
"learning_rate": 0.00015634561793067737,
"loss": 0.246,
"step": 582
},
{
"epoch": 1.2440087145969498,
"grad_norm": 0.6930341124534607,
"learning_rate": 0.0001562026144121949,
"loss": 0.145,
"step": 583
},
{
"epoch": 1.2461873638344227,
"grad_norm": 0.701931357383728,
"learning_rate": 0.00015605944269430277,
"loss": 0.1354,
"step": 584
},
{
"epoch": 1.2483660130718954,
"grad_norm": 0.6491216421127319,
"learning_rate": 0.00015591610320547574,
"loss": 0.1297,
"step": 585
},
{
"epoch": 1.2505446623093681,
"grad_norm": 0.6969993710517883,
"learning_rate": 0.00015577259637469058,
"loss": 0.1387,
"step": 586
},
{
"epoch": 1.252723311546841,
"grad_norm": 0.915093183517456,
"learning_rate": 0.00015562892263142487,
"loss": 0.2019,
"step": 587
},
{
"epoch": 1.2549019607843137,
"grad_norm": 0.5827008485794067,
"learning_rate": 0.00015548508240565583,
"loss": 0.0996,
"step": 588
},
{
"epoch": 1.2570806100217866,
"grad_norm": 0.6576254963874817,
"learning_rate": 0.00015534107612785874,
"loss": 0.1374,
"step": 589
},
{
"epoch": 1.2592592592592593,
"grad_norm": 0.7204159498214722,
"learning_rate": 0.00015519690422900593,
"loss": 0.173,
"step": 590
},
{
"epoch": 1.261437908496732,
"grad_norm": 0.8468919396400452,
"learning_rate": 0.00015505256714056536,
"loss": 0.208,
"step": 591
},
{
"epoch": 1.263616557734205,
"grad_norm": 0.7702469229698181,
"learning_rate": 0.00015490806529449945,
"loss": 0.1471,
"step": 592
},
{
"epoch": 1.2657952069716776,
"grad_norm": 0.8383822441101074,
"learning_rate": 0.00015476339912326356,
"loss": 0.2016,
"step": 593
},
{
"epoch": 1.2679738562091503,
"grad_norm": 0.9662691354751587,
"learning_rate": 0.0001546185690598049,
"loss": 0.1543,
"step": 594
},
{
"epoch": 1.2701525054466232,
"grad_norm": 0.7026709914207458,
"learning_rate": 0.00015447357553756115,
"loss": 0.1429,
"step": 595
},
{
"epoch": 1.2723311546840959,
"grad_norm": 0.4856622815132141,
"learning_rate": 0.0001543284189904592,
"loss": 0.0799,
"step": 596
},
{
"epoch": 1.2745098039215685,
"grad_norm": 0.5788432955741882,
"learning_rate": 0.0001541830998529138,
"loss": 0.1108,
"step": 597
},
{
"epoch": 1.2766884531590414,
"grad_norm": 0.6814302802085876,
"learning_rate": 0.00015403761855982631,
"loss": 0.1247,
"step": 598
},
{
"epoch": 1.2788671023965141,
"grad_norm": 0.834804892539978,
"learning_rate": 0.0001538919755465834,
"loss": 0.1314,
"step": 599
},
{
"epoch": 1.2810457516339868,
"grad_norm": 0.6751343011856079,
"learning_rate": 0.00015374617124905564,
"loss": 0.0955,
"step": 600
},
{
"epoch": 1.2832244008714597,
"grad_norm": 0.6577438116073608,
"learning_rate": 0.0001536002061035964,
"loss": 0.1376,
"step": 601
},
{
"epoch": 1.2854030501089324,
"grad_norm": 0.7378783226013184,
"learning_rate": 0.0001534540805470403,
"loss": 0.1563,
"step": 602
},
{
"epoch": 1.287581699346405,
"grad_norm": 0.7493640780448914,
"learning_rate": 0.00015330779501670217,
"loss": 0.1554,
"step": 603
},
{
"epoch": 1.289760348583878,
"grad_norm": 0.6582645177841187,
"learning_rate": 0.00015316134995037545,
"loss": 0.1107,
"step": 604
},
{
"epoch": 1.2919389978213507,
"grad_norm": 0.7354329824447632,
"learning_rate": 0.00015301474578633116,
"loss": 0.1562,
"step": 605
},
{
"epoch": 1.2941176470588236,
"grad_norm": 0.6455869078636169,
"learning_rate": 0.00015286798296331632,
"loss": 0.1119,
"step": 606
},
{
"epoch": 1.2962962962962963,
"grad_norm": 0.7667369842529297,
"learning_rate": 0.00015272106192055294,
"loss": 0.1678,
"step": 607
},
{
"epoch": 1.2984749455337692,
"grad_norm": 0.7993078827857971,
"learning_rate": 0.00015257398309773633,
"loss": 0.179,
"step": 608
},
{
"epoch": 1.3006535947712419,
"grad_norm": 0.5525803565979004,
"learning_rate": 0.00015242674693503424,
"loss": 0.092,
"step": 609
},
{
"epoch": 1.3028322440087146,
"grad_norm": 0.6435709595680237,
"learning_rate": 0.00015227935387308511,
"loss": 0.1371,
"step": 610
},
{
"epoch": 1.3050108932461875,
"grad_norm": 0.8311496376991272,
"learning_rate": 0.00015213180435299698,
"loss": 0.1474,
"step": 611
},
{
"epoch": 1.3071895424836601,
"grad_norm": 0.8488547205924988,
"learning_rate": 0.00015198409881634617,
"loss": 0.1924,
"step": 612
},
{
"epoch": 1.3093681917211328,
"grad_norm": 0.6138650178909302,
"learning_rate": 0.00015183623770517586,
"loss": 0.1032,
"step": 613
},
{
"epoch": 1.3115468409586057,
"grad_norm": 0.720344066619873,
"learning_rate": 0.0001516882214619949,
"loss": 0.1372,
"step": 614
},
{
"epoch": 1.3137254901960784,
"grad_norm": 0.6820862293243408,
"learning_rate": 0.00015154005052977633,
"loss": 0.1296,
"step": 615
},
{
"epoch": 1.3159041394335511,
"grad_norm": 0.7137134671211243,
"learning_rate": 0.00015139172535195617,
"loss": 0.1669,
"step": 616
},
{
"epoch": 1.318082788671024,
"grad_norm": 0.8309161067008972,
"learning_rate": 0.00015124324637243205,
"loss": 0.1803,
"step": 617
},
{
"epoch": 1.3202614379084967,
"grad_norm": 0.6600125432014465,
"learning_rate": 0.0001510946140355619,
"loss": 0.1044,
"step": 618
},
{
"epoch": 1.3224400871459694,
"grad_norm": 0.5543960928916931,
"learning_rate": 0.00015094582878616257,
"loss": 0.0907,
"step": 619
},
{
"epoch": 1.3246187363834423,
"grad_norm": 0.7326435446739197,
"learning_rate": 0.00015079689106950854,
"loss": 0.1385,
"step": 620
},
{
"epoch": 1.326797385620915,
"grad_norm": 0.6444526314735413,
"learning_rate": 0.00015064780133133067,
"loss": 0.1075,
"step": 621
},
{
"epoch": 1.3289760348583877,
"grad_norm": 0.7079174518585205,
"learning_rate": 0.0001504985600178147,
"loss": 0.128,
"step": 622
},
{
"epoch": 1.3311546840958606,
"grad_norm": 0.7377613186836243,
"learning_rate": 0.00015034916757559997,
"loss": 0.1332,
"step": 623
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.6091183423995972,
"learning_rate": 0.00015019962445177819,
"loss": 0.0767,
"step": 624
},
{
"epoch": 1.3355119825708062,
"grad_norm": 0.5283781290054321,
"learning_rate": 0.00015004993109389193,
"loss": 0.077,
"step": 625
},
{
"epoch": 1.3376906318082789,
"grad_norm": 0.7368810176849365,
"learning_rate": 0.00014990008794993345,
"loss": 0.1237,
"step": 626
},
{
"epoch": 1.3398692810457518,
"grad_norm": 0.6941757202148438,
"learning_rate": 0.00014975009546834325,
"loss": 0.1072,
"step": 627
},
{
"epoch": 1.3420479302832244,
"grad_norm": 0.6983378529548645,
"learning_rate": 0.00014959995409800873,
"loss": 0.1323,
"step": 628
},
{
"epoch": 1.3442265795206971,
"grad_norm": 0.7478280067443848,
"learning_rate": 0.00014944966428826292,
"loss": 0.1574,
"step": 629
},
{
"epoch": 1.34640522875817,
"grad_norm": 0.6382248997688293,
"learning_rate": 0.00014929922648888308,
"loss": 0.1042,
"step": 630
},
{
"epoch": 1.3485838779956427,
"grad_norm": 0.616068422794342,
"learning_rate": 0.00014914864115008936,
"loss": 0.1151,
"step": 631
},
{
"epoch": 1.3507625272331154,
"grad_norm": 0.6361529231071472,
"learning_rate": 0.0001489979087225434,
"loss": 0.1311,
"step": 632
},
{
"epoch": 1.3529411764705883,
"grad_norm": 0.5792030096054077,
"learning_rate": 0.0001488470296573471,
"loss": 0.1178,
"step": 633
},
{
"epoch": 1.355119825708061,
"grad_norm": 0.6275602579116821,
"learning_rate": 0.00014869600440604118,
"loss": 0.1048,
"step": 634
},
{
"epoch": 1.3572984749455337,
"grad_norm": 0.5165741443634033,
"learning_rate": 0.00014854483342060393,
"loss": 0.0938,
"step": 635
},
{
"epoch": 1.3594771241830066,
"grad_norm": 0.7131217122077942,
"learning_rate": 0.00014839351715344968,
"loss": 0.138,
"step": 636
},
{
"epoch": 1.3616557734204793,
"grad_norm": 0.7741950750350952,
"learning_rate": 0.0001482420560574276,
"loss": 0.139,
"step": 637
},
{
"epoch": 1.363834422657952,
"grad_norm": 0.5395483374595642,
"learning_rate": 0.00014809045058582026,
"loss": 0.0794,
"step": 638
},
{
"epoch": 1.3660130718954249,
"grad_norm": 0.5530499219894409,
"learning_rate": 0.00014793870119234235,
"loss": 0.0871,
"step": 639
},
{
"epoch": 1.3681917211328976,
"grad_norm": 0.5440542697906494,
"learning_rate": 0.00014778680833113926,
"loss": 0.0921,
"step": 640
},
{
"epoch": 1.3703703703703702,
"grad_norm": 0.8153759837150574,
"learning_rate": 0.00014763477245678577,
"loss": 0.1351,
"step": 641
},
{
"epoch": 1.3725490196078431,
"grad_norm": 0.7071852684020996,
"learning_rate": 0.00014748259402428462,
"loss": 0.119,
"step": 642
},
{
"epoch": 1.3747276688453158,
"grad_norm": 0.6104605793952942,
"learning_rate": 0.00014733027348906518,
"loss": 0.0895,
"step": 643
},
{
"epoch": 1.3769063180827887,
"grad_norm": 0.6992385983467102,
"learning_rate": 0.00014717781130698212,
"loss": 0.1122,
"step": 644
},
{
"epoch": 1.3790849673202614,
"grad_norm": 0.6249366402626038,
"learning_rate": 0.00014702520793431404,
"loss": 0.1021,
"step": 645
},
{
"epoch": 1.3812636165577343,
"grad_norm": 0.7198147773742676,
"learning_rate": 0.00014687246382776205,
"loss": 0.128,
"step": 646
},
{
"epoch": 1.383442265795207,
"grad_norm": 0.7576356530189514,
"learning_rate": 0.00014671957944444847,
"loss": 0.1337,
"step": 647
},
{
"epoch": 1.3856209150326797,
"grad_norm": 0.5735803246498108,
"learning_rate": 0.00014656655524191537,
"loss": 0.098,
"step": 648
},
{
"epoch": 1.3877995642701526,
"grad_norm": 0.723497748374939,
"learning_rate": 0.0001464133916781234,
"loss": 0.1192,
"step": 649
},
{
"epoch": 1.3899782135076253,
"grad_norm": 0.4777669310569763,
"learning_rate": 0.0001462600892114501,
"loss": 0.0759,
"step": 650
},
{
"epoch": 1.392156862745098,
"grad_norm": 0.6985266208648682,
"learning_rate": 0.00014610664830068875,
"loss": 0.1172,
"step": 651
},
{
"epoch": 1.3943355119825709,
"grad_norm": 0.6342020034790039,
"learning_rate": 0.00014595306940504716,
"loss": 0.1173,
"step": 652
},
{
"epoch": 1.3965141612200436,
"grad_norm": 0.7338960766792297,
"learning_rate": 0.0001457993529841458,
"loss": 0.1088,
"step": 653
},
{
"epoch": 1.3986928104575163,
"grad_norm": 0.5785338878631592,
"learning_rate": 0.00014564549949801694,
"loss": 0.1058,
"step": 654
},
{
"epoch": 1.4008714596949892,
"grad_norm": 0.7608123421669006,
"learning_rate": 0.00014549150940710285,
"loss": 0.1345,
"step": 655
},
{
"epoch": 1.4030501089324618,
"grad_norm": 0.6694321036338806,
"learning_rate": 0.00014533738317225485,
"loss": 0.1019,
"step": 656
},
{
"epoch": 1.4052287581699345,
"grad_norm": 0.7982690334320068,
"learning_rate": 0.00014518312125473152,
"loss": 0.1409,
"step": 657
},
{
"epoch": 1.4074074074074074,
"grad_norm": 0.5442335605621338,
"learning_rate": 0.00014502872411619757,
"loss": 0.0952,
"step": 658
},
{
"epoch": 1.4095860566448801,
"grad_norm": 0.6602448225021362,
"learning_rate": 0.00014487419221872238,
"loss": 0.0968,
"step": 659
},
{
"epoch": 1.4117647058823528,
"grad_norm": 0.6015776991844177,
"learning_rate": 0.00014471952602477866,
"loss": 0.0994,
"step": 660
},
{
"epoch": 1.4139433551198257,
"grad_norm": 0.7918272614479065,
"learning_rate": 0.000144564725997241,
"loss": 0.1718,
"step": 661
},
{
"epoch": 1.4161220043572984,
"grad_norm": 0.6220570802688599,
"learning_rate": 0.0001444097925993845,
"loss": 0.1123,
"step": 662
},
{
"epoch": 1.4183006535947713,
"grad_norm": 0.6618373394012451,
"learning_rate": 0.0001442547262948835,
"loss": 0.115,
"step": 663
},
{
"epoch": 1.420479302832244,
"grad_norm": 0.7879993915557861,
"learning_rate": 0.0001440995275478099,
"loss": 0.1448,
"step": 664
},
{
"epoch": 1.422657952069717,
"grad_norm": 0.6283368468284607,
"learning_rate": 0.00014394419682263218,
"loss": 0.108,
"step": 665
},
{
"epoch": 1.4248366013071896,
"grad_norm": 0.6305772662162781,
"learning_rate": 0.0001437887345842137,
"loss": 0.104,
"step": 666
},
{
"epoch": 1.4270152505446623,
"grad_norm": 0.6052739024162292,
"learning_rate": 0.00014363314129781137,
"loss": 0.0865,
"step": 667
},
{
"epoch": 1.4291938997821352,
"grad_norm": 0.7560857534408569,
"learning_rate": 0.00014347741742907433,
"loss": 0.1383,
"step": 668
},
{
"epoch": 1.4313725490196079,
"grad_norm": 0.6733012199401855,
"learning_rate": 0.00014332156344404253,
"loss": 0.1277,
"step": 669
},
{
"epoch": 1.4335511982570806,
"grad_norm": 0.6026163101196289,
"learning_rate": 0.00014316557980914528,
"loss": 0.0738,
"step": 670
},
{
"epoch": 1.4357298474945535,
"grad_norm": 0.5354239344596863,
"learning_rate": 0.00014300946699119998,
"loss": 0.0796,
"step": 671
},
{
"epoch": 1.4379084967320261,
"grad_norm": 0.7443612813949585,
"learning_rate": 0.00014285322545741052,
"loss": 0.1208,
"step": 672
},
{
"epoch": 1.4400871459694988,
"grad_norm": 0.5782439708709717,
"learning_rate": 0.00014269685567536614,
"loss": 0.0812,
"step": 673
},
{
"epoch": 1.4422657952069717,
"grad_norm": 0.7431501746177673,
"learning_rate": 0.0001425403581130398,
"loss": 0.1537,
"step": 674
},
{
"epoch": 1.4444444444444444,
"grad_norm": 0.6386312246322632,
"learning_rate": 0.00014238373323878685,
"loss": 0.0868,
"step": 675
},
{
"epoch": 1.446623093681917,
"grad_norm": 0.6820403337478638,
"learning_rate": 0.00014222698152134374,
"loss": 0.0978,
"step": 676
},
{
"epoch": 1.44880174291939,
"grad_norm": 0.5556746125221252,
"learning_rate": 0.00014207010342982642,
"loss": 0.0652,
"step": 677
},
{
"epoch": 1.4509803921568627,
"grad_norm": 0.5212750434875488,
"learning_rate": 0.0001419130994337292,
"loss": 0.0734,
"step": 678
},
{
"epoch": 1.4531590413943354,
"grad_norm": 0.5288644433021545,
"learning_rate": 0.000141755970002923,
"loss": 0.077,
"step": 679
},
{
"epoch": 1.4553376906318083,
"grad_norm": 0.8352729082107544,
"learning_rate": 0.00014159871560765432,
"loss": 0.1627,
"step": 680
},
{
"epoch": 1.457516339869281,
"grad_norm": 0.4913583993911743,
"learning_rate": 0.00014144133671854347,
"loss": 0.0621,
"step": 681
},
{
"epoch": 1.4596949891067539,
"grad_norm": 0.7057034969329834,
"learning_rate": 0.0001412838338065835,
"loss": 0.1276,
"step": 682
},
{
"epoch": 1.4618736383442266,
"grad_norm": 0.4989885985851288,
"learning_rate": 0.00014112620734313847,
"loss": 0.0577,
"step": 683
},
{
"epoch": 1.4640522875816995,
"grad_norm": 0.5323114991188049,
"learning_rate": 0.0001409684577999423,
"loss": 0.0687,
"step": 684
},
{
"epoch": 1.4662309368191722,
"grad_norm": 0.7068952322006226,
"learning_rate": 0.00014081058564909723,
"loss": 0.0867,
"step": 685
},
{
"epoch": 1.4684095860566448,
"grad_norm": 0.48235902190208435,
"learning_rate": 0.00014065259136307242,
"loss": 0.0631,
"step": 686
},
{
"epoch": 1.4705882352941178,
"grad_norm": 0.7386283874511719,
"learning_rate": 0.0001404944754147026,
"loss": 0.1186,
"step": 687
},
{
"epoch": 1.4727668845315904,
"grad_norm": 0.8523921966552734,
"learning_rate": 0.0001403362382771865,
"loss": 0.1548,
"step": 688
},
{
"epoch": 1.4749455337690631,
"grad_norm": 0.6278656125068665,
"learning_rate": 0.00014017788042408564,
"loss": 0.0734,
"step": 689
},
{
"epoch": 1.477124183006536,
"grad_norm": 0.5892928838729858,
"learning_rate": 0.0001400194023293228,
"loss": 0.0718,
"step": 690
},
{
"epoch": 1.477124183006536,
"eval_loss": 0.337152898311615,
"eval_runtime": 0.9372,
"eval_samples_per_second": 182.455,
"eval_steps_per_second": 13.871,
"step": 690
},
{
"epoch": 1.4793028322440087,
"grad_norm": 0.5319302082061768,
"learning_rate": 0.00013986080446718043,
"loss": 0.0951,
"step": 691
},
{
"epoch": 1.4814814814814814,
"grad_norm": 0.43910735845565796,
"learning_rate": 0.00013970208731229974,
"loss": 0.0567,
"step": 692
},
{
"epoch": 1.4836601307189543,
"grad_norm": 0.429105281829834,
"learning_rate": 0.00013954325133967865,
"loss": 0.0557,
"step": 693
},
{
"epoch": 1.485838779956427,
"grad_norm": 0.7734906077384949,
"learning_rate": 0.00013938429702467086,
"loss": 0.1351,
"step": 694
},
{
"epoch": 1.4880174291938997,
"grad_norm": 0.7108368277549744,
"learning_rate": 0.00013922522484298414,
"loss": 0.1096,
"step": 695
},
{
"epoch": 1.4901960784313726,
"grad_norm": 0.5471106767654419,
"learning_rate": 0.000139066035270679,
"loss": 0.1027,
"step": 696
},
{
"epoch": 1.4923747276688453,
"grad_norm": 0.875432014465332,
"learning_rate": 0.00013890672878416737,
"loss": 0.1344,
"step": 697
},
{
"epoch": 1.494553376906318,
"grad_norm": 0.5589685440063477,
"learning_rate": 0.00013874730586021093,
"loss": 0.078,
"step": 698
},
{
"epoch": 1.4967320261437909,
"grad_norm": 0.6980583667755127,
"learning_rate": 0.00013858776697591997,
"loss": 0.0895,
"step": 699
},
{
"epoch": 1.4989106753812635,
"grad_norm": 0.7580942511558533,
"learning_rate": 0.00013842811260875168,
"loss": 0.1083,
"step": 700
},
{
"epoch": 1.5010893246187362,
"grad_norm": 0.6347730159759521,
"learning_rate": 0.000138268343236509,
"loss": 0.0779,
"step": 701
},
{
"epoch": 1.5032679738562091,
"grad_norm": 0.4675781726837158,
"learning_rate": 0.0001381084593373389,
"loss": 0.0651,
"step": 702
},
{
"epoch": 1.505446623093682,
"grad_norm": 0.5469012260437012,
"learning_rate": 0.00013794846138973123,
"loss": 0.0888,
"step": 703
},
{
"epoch": 1.5076252723311547,
"grad_norm": 0.5839260816574097,
"learning_rate": 0.00013778834987251707,
"loss": 0.0861,
"step": 704
},
{
"epoch": 1.5098039215686274,
"grad_norm": 0.6217342615127563,
"learning_rate": 0.00013762812526486743,
"loss": 0.0938,
"step": 705
},
{
"epoch": 1.5119825708061003,
"grad_norm": 0.7009667158126831,
"learning_rate": 0.00013746778804629177,
"loss": 0.0989,
"step": 706
},
{
"epoch": 1.514161220043573,
"grad_norm": 0.7072852253913879,
"learning_rate": 0.0001373073386966365,
"loss": 0.1256,
"step": 707
},
{
"epoch": 1.5163398692810457,
"grad_norm": 0.6454962491989136,
"learning_rate": 0.0001371467776960837,
"loss": 0.116,
"step": 708
},
{
"epoch": 1.5185185185185186,
"grad_norm": 0.5956925749778748,
"learning_rate": 0.00013698610552514956,
"loss": 0.085,
"step": 709
},
{
"epoch": 1.5206971677559913,
"grad_norm": 0.6272696852684021,
"learning_rate": 0.0001368253226646829,
"loss": 0.0776,
"step": 710
},
{
"epoch": 1.522875816993464,
"grad_norm": 0.5974109172821045,
"learning_rate": 0.00013666442959586395,
"loss": 0.0803,
"step": 711
},
{
"epoch": 1.5250544662309369,
"grad_norm": 0.6748714447021484,
"learning_rate": 0.00013650342680020258,
"loss": 0.081,
"step": 712
},
{
"epoch": 1.5272331154684096,
"grad_norm": 0.6029950380325317,
"learning_rate": 0.00013634231475953724,
"loss": 0.0739,
"step": 713
},
{
"epoch": 1.5294117647058822,
"grad_norm": 0.7018197178840637,
"learning_rate": 0.00013618109395603317,
"loss": 0.0993,
"step": 714
},
{
"epoch": 1.5315904139433552,
"grad_norm": 0.7441207766532898,
"learning_rate": 0.0001360197648721812,
"loss": 0.1119,
"step": 715
},
{
"epoch": 1.5337690631808278,
"grad_norm": 0.8434575796127319,
"learning_rate": 0.0001358583279907961,
"loss": 0.1392,
"step": 716
},
{
"epoch": 1.5359477124183005,
"grad_norm": 0.5164886116981506,
"learning_rate": 0.0001356967837950154,
"loss": 0.0792,
"step": 717
},
{
"epoch": 1.5381263616557734,
"grad_norm": 0.5095295310020447,
"learning_rate": 0.0001355351327682977,
"loss": 0.0672,
"step": 718
},
{
"epoch": 1.5403050108932463,
"grad_norm": 0.65059894323349,
"learning_rate": 0.00013537337539442132,
"loss": 0.0802,
"step": 719
},
{
"epoch": 1.5424836601307188,
"grad_norm": 0.6488040685653687,
"learning_rate": 0.0001352115121574829,
"loss": 0.0894,
"step": 720
},
{
"epoch": 1.5446623093681917,
"grad_norm": 0.7139689326286316,
"learning_rate": 0.00013504954354189583,
"loss": 0.1185,
"step": 721
},
{
"epoch": 1.5468409586056646,
"grad_norm": 0.5658460259437561,
"learning_rate": 0.00013488747003238892,
"loss": 0.0983,
"step": 722
},
{
"epoch": 1.5490196078431373,
"grad_norm": 0.6340675354003906,
"learning_rate": 0.00013472529211400484,
"loss": 0.0945,
"step": 723
},
{
"epoch": 1.55119825708061,
"grad_norm": 0.6365328431129456,
"learning_rate": 0.00013456301027209882,
"loss": 0.0753,
"step": 724
},
{
"epoch": 1.553376906318083,
"grad_norm": 0.6497185230255127,
"learning_rate": 0.00013440062499233709,
"loss": 0.0841,
"step": 725
},
{
"epoch": 1.5555555555555556,
"grad_norm": 0.6614805459976196,
"learning_rate": 0.00013423813676069534,
"loss": 0.0903,
"step": 726
},
{
"epoch": 1.5577342047930283,
"grad_norm": 0.6074105501174927,
"learning_rate": 0.00013407554606345747,
"loss": 0.0918,
"step": 727
},
{
"epoch": 1.5599128540305012,
"grad_norm": 0.6045775413513184,
"learning_rate": 0.000133912853387214,
"loss": 0.0918,
"step": 728
},
{
"epoch": 1.5620915032679739,
"grad_norm": 0.6219077110290527,
"learning_rate": 0.0001337500592188606,
"loss": 0.0884,
"step": 729
},
{
"epoch": 1.5642701525054465,
"grad_norm": 0.669049084186554,
"learning_rate": 0.0001335871640455968,
"loss": 0.1,
"step": 730
},
{
"epoch": 1.5664488017429194,
"grad_norm": 0.5828138589859009,
"learning_rate": 0.00013342416835492423,
"loss": 0.0808,
"step": 731
},
{
"epoch": 1.5686274509803921,
"grad_norm": 0.7363128066062927,
"learning_rate": 0.00013326107263464558,
"loss": 0.1117,
"step": 732
},
{
"epoch": 1.5708061002178648,
"grad_norm": 0.6877434849739075,
"learning_rate": 0.00013309787737286267,
"loss": 0.115,
"step": 733
},
{
"epoch": 1.5729847494553377,
"grad_norm": 0.5169875621795654,
"learning_rate": 0.00013293458305797533,
"loss": 0.0697,
"step": 734
},
{
"epoch": 1.5751633986928104,
"grad_norm": 0.6289706826210022,
"learning_rate": 0.00013277119017867983,
"loss": 0.1014,
"step": 735
},
{
"epoch": 1.577342047930283,
"grad_norm": 0.601397693157196,
"learning_rate": 0.0001326076992239674,
"loss": 0.0882,
"step": 736
},
{
"epoch": 1.579520697167756,
"grad_norm": 0.8283075094223022,
"learning_rate": 0.00013244411068312283,
"loss": 0.1334,
"step": 737
},
{
"epoch": 1.581699346405229,
"grad_norm": 0.6383387446403503,
"learning_rate": 0.00013228042504572285,
"loss": 0.0887,
"step": 738
},
{
"epoch": 1.5838779956427014,
"grad_norm": 0.610774576663971,
"learning_rate": 0.00013211664280163488,
"loss": 0.1124,
"step": 739
},
{
"epoch": 1.5860566448801743,
"grad_norm": 0.5463963150978088,
"learning_rate": 0.00013195276444101547,
"loss": 0.078,
"step": 740
},
{
"epoch": 1.5882352941176472,
"grad_norm": 0.5919215679168701,
"learning_rate": 0.00013178879045430862,
"loss": 0.0797,
"step": 741
},
{
"epoch": 1.5904139433551199,
"grad_norm": 0.4449659287929535,
"learning_rate": 0.00013162472133224483,
"loss": 0.0545,
"step": 742
},
{
"epoch": 1.5925925925925926,
"grad_norm": 0.5289342999458313,
"learning_rate": 0.00013146055756583906,
"loss": 0.0668,
"step": 743
},
{
"epoch": 1.5947712418300655,
"grad_norm": 0.6107696294784546,
"learning_rate": 0.0001312962996463896,
"loss": 0.1107,
"step": 744
},
{
"epoch": 1.5969498910675382,
"grad_norm": 0.6633789539337158,
"learning_rate": 0.00013113194806547656,
"loss": 0.1044,
"step": 745
},
{
"epoch": 1.5991285403050108,
"grad_norm": 0.4553978443145752,
"learning_rate": 0.00013096750331496033,
"loss": 0.0611,
"step": 746
},
{
"epoch": 1.6013071895424837,
"grad_norm": 0.6370587944984436,
"learning_rate": 0.00013080296588698006,
"loss": 0.0876,
"step": 747
},
{
"epoch": 1.6034858387799564,
"grad_norm": 0.6242510080337524,
"learning_rate": 0.0001306383362739523,
"loss": 0.0961,
"step": 748
},
{
"epoch": 1.6056644880174291,
"grad_norm": 0.4671713709831238,
"learning_rate": 0.00013047361496856957,
"loss": 0.0653,
"step": 749
},
{
"epoch": 1.607843137254902,
"grad_norm": 0.5274946689605713,
"learning_rate": 0.00013030880246379866,
"loss": 0.0747,
"step": 750
},
{
"epoch": 1.6100217864923747,
"grad_norm": 0.4920006990432739,
"learning_rate": 0.00013014389925287943,
"loss": 0.0694,
"step": 751
},
{
"epoch": 1.6122004357298474,
"grad_norm": 0.5613352060317993,
"learning_rate": 0.00012997890582932303,
"loss": 0.0777,
"step": 752
},
{
"epoch": 1.6143790849673203,
"grad_norm": 0.5812129974365234,
"learning_rate": 0.00012981382268691084,
"loss": 0.0798,
"step": 753
},
{
"epoch": 1.616557734204793,
"grad_norm": 0.42552122473716736,
"learning_rate": 0.00012964865031969252,
"loss": 0.0487,
"step": 754
},
{
"epoch": 1.6187363834422657,
"grad_norm": 0.5462669134140015,
"learning_rate": 0.0001294833892219848,
"loss": 0.0721,
"step": 755
},
{
"epoch": 1.6209150326797386,
"grad_norm": 0.6898488998413086,
"learning_rate": 0.0001293180398883701,
"loss": 0.0873,
"step": 756
},
{
"epoch": 1.6230936819172115,
"grad_norm": 0.8160077929496765,
"learning_rate": 0.0001291526028136947,
"loss": 0.1475,
"step": 757
},
{
"epoch": 1.625272331154684,
"grad_norm": 0.6779285073280334,
"learning_rate": 0.00012898707849306763,
"loss": 0.0983,
"step": 758
},
{
"epoch": 1.6274509803921569,
"grad_norm": 0.5817242860794067,
"learning_rate": 0.0001288214674218589,
"loss": 0.0718,
"step": 759
},
{
"epoch": 1.6296296296296298,
"grad_norm": 0.7141191959381104,
"learning_rate": 0.00012865577009569824,
"loss": 0.1006,
"step": 760
},
{
"epoch": 1.6318082788671024,
"grad_norm": 0.7559797763824463,
"learning_rate": 0.0001284899870104735,
"loss": 0.1181,
"step": 761
},
{
"epoch": 1.6339869281045751,
"grad_norm": 0.5632848739624023,
"learning_rate": 0.0001283241186623291,
"loss": 0.0782,
"step": 762
},
{
"epoch": 1.636165577342048,
"grad_norm": 0.5656502842903137,
"learning_rate": 0.00012815816554766476,
"loss": 0.0706,
"step": 763
},
{
"epoch": 1.6383442265795207,
"grad_norm": 0.7342005372047424,
"learning_rate": 0.00012799212816313376,
"loss": 0.1059,
"step": 764
},
{
"epoch": 1.6405228758169934,
"grad_norm": 0.5315876603126526,
"learning_rate": 0.00012782600700564166,
"loss": 0.0735,
"step": 765
},
{
"epoch": 1.6427015250544663,
"grad_norm": 0.527675986289978,
"learning_rate": 0.00012765980257234473,
"loss": 0.0638,
"step": 766
},
{
"epoch": 1.644880174291939,
"grad_norm": 0.6750484108924866,
"learning_rate": 0.00012749351536064834,
"loss": 0.0896,
"step": 767
},
{
"epoch": 1.6470588235294117,
"grad_norm": 0.5952948331832886,
"learning_rate": 0.00012732714586820583,
"loss": 0.0878,
"step": 768
},
{
"epoch": 1.6492374727668846,
"grad_norm": 0.8387467861175537,
"learning_rate": 0.00012716069459291652,
"loss": 0.1307,
"step": 769
},
{
"epoch": 1.6514161220043573,
"grad_norm": 0.6178788542747498,
"learning_rate": 0.00012699416203292466,
"loss": 0.0923,
"step": 770
},
{
"epoch": 1.65359477124183,
"grad_norm": 0.5772682428359985,
"learning_rate": 0.0001268275486866177,
"loss": 0.077,
"step": 771
},
{
"epoch": 1.6557734204793029,
"grad_norm": 0.5710946917533875,
"learning_rate": 0.00012666085505262485,
"loss": 0.0744,
"step": 772
},
{
"epoch": 1.6579520697167756,
"grad_norm": 0.5186509490013123,
"learning_rate": 0.0001264940816298157,
"loss": 0.0666,
"step": 773
},
{
"epoch": 1.6601307189542482,
"grad_norm": 0.44019749760627747,
"learning_rate": 0.00012632722891729845,
"loss": 0.0484,
"step": 774
},
{
"epoch": 1.6623093681917211,
"grad_norm": 0.5319947600364685,
"learning_rate": 0.00012616029741441877,
"loss": 0.058,
"step": 775
},
{
"epoch": 1.664488017429194,
"grad_norm": 0.682352602481842,
"learning_rate": 0.000125993287620758,
"loss": 0.0854,
"step": 776
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.5932778716087341,
"learning_rate": 0.0001258262000361319,
"loss": 0.1003,
"step": 777
},
{
"epoch": 1.6688453159041394,
"grad_norm": 0.5531163215637207,
"learning_rate": 0.00012565903516058882,
"loss": 0.0619,
"step": 778
},
{
"epoch": 1.6710239651416123,
"grad_norm": 0.6834267377853394,
"learning_rate": 0.00012549179349440875,
"loss": 0.0971,
"step": 779
},
{
"epoch": 1.673202614379085,
"grad_norm": 0.5356924533843994,
"learning_rate": 0.00012532447553810126,
"loss": 0.0673,
"step": 780
},
{
"epoch": 1.6753812636165577,
"grad_norm": 0.7086807489395142,
"learning_rate": 0.0001251570817924042,
"loss": 0.0978,
"step": 781
},
{
"epoch": 1.6775599128540306,
"grad_norm": 0.6605278253555298,
"learning_rate": 0.00012498961275828247,
"loss": 0.1036,
"step": 782
},
{
"epoch": 1.6797385620915033,
"grad_norm": 0.47696250677108765,
"learning_rate": 0.00012482206893692604,
"loss": 0.0589,
"step": 783
},
{
"epoch": 1.681917211328976,
"grad_norm": 0.6112656593322754,
"learning_rate": 0.00012465445082974886,
"loss": 0.0818,
"step": 784
},
{
"epoch": 1.6840958605664489,
"grad_norm": 0.6264757513999939,
"learning_rate": 0.0001244867589383871,
"loss": 0.0841,
"step": 785
},
{
"epoch": 1.6862745098039216,
"grad_norm": 0.5560308694839478,
"learning_rate": 0.00012431899376469784,
"loss": 0.0792,
"step": 786
},
{
"epoch": 1.6884531590413943,
"grad_norm": 0.4631575345993042,
"learning_rate": 0.00012415115581075741,
"loss": 0.0589,
"step": 787
},
{
"epoch": 1.6906318082788672,
"grad_norm": 0.7023651599884033,
"learning_rate": 0.00012398324557885994,
"loss": 0.1074,
"step": 788
},
{
"epoch": 1.6928104575163399,
"grad_norm": 0.5364183187484741,
"learning_rate": 0.00012381526357151592,
"loss": 0.0685,
"step": 789
},
{
"epoch": 1.6949891067538125,
"grad_norm": 0.5863189697265625,
"learning_rate": 0.0001236472102914506,
"loss": 0.0729,
"step": 790
},
{
"epoch": 1.6971677559912854,
"grad_norm": 0.48796355724334717,
"learning_rate": 0.00012347908624160258,
"loss": 0.0545,
"step": 791
},
{
"epoch": 1.6993464052287581,
"grad_norm": 0.6581359505653381,
"learning_rate": 0.00012331089192512218,
"loss": 0.1039,
"step": 792
},
{
"epoch": 1.7015250544662308,
"grad_norm": 0.639834463596344,
"learning_rate": 0.0001231426278453701,
"loss": 0.0921,
"step": 793
},
{
"epoch": 1.7037037037037037,
"grad_norm": 0.599344789981842,
"learning_rate": 0.00012297429450591575,
"loss": 0.0691,
"step": 794
},
{
"epoch": 1.7058823529411766,
"grad_norm": 0.7103595733642578,
"learning_rate": 0.00012280589241053585,
"loss": 0.0927,
"step": 795
},
{
"epoch": 1.708061002178649,
"grad_norm": 0.5287673473358154,
"learning_rate": 0.00012263742206321287,
"loss": 0.0721,
"step": 796
},
{
"epoch": 1.710239651416122,
"grad_norm": 0.6704793572425842,
"learning_rate": 0.00012246888396813356,
"loss": 0.0881,
"step": 797
},
{
"epoch": 1.712418300653595,
"grad_norm": 0.5632234215736389,
"learning_rate": 0.00012230027862968743,
"loss": 0.0758,
"step": 798
},
{
"epoch": 1.7145969498910676,
"grad_norm": 0.61404949426651,
"learning_rate": 0.00012213160655246517,
"loss": 0.1015,
"step": 799
},
{
"epoch": 1.7167755991285403,
"grad_norm": 0.6605061292648315,
"learning_rate": 0.00012196286824125726,
"loss": 0.0721,
"step": 800
},
{
"epoch": 1.7189542483660132,
"grad_norm": 0.5778952240943909,
"learning_rate": 0.0001217940642010524,
"loss": 0.0619,
"step": 801
},
{
"epoch": 1.7211328976034859,
"grad_norm": 0.4803905487060547,
"learning_rate": 0.000121625194937036,
"loss": 0.0623,
"step": 802
},
{
"epoch": 1.7233115468409586,
"grad_norm": 0.5594003796577454,
"learning_rate": 0.0001214562609545886,
"loss": 0.0664,
"step": 803
},
{
"epoch": 1.7254901960784315,
"grad_norm": 0.6021897792816162,
"learning_rate": 0.0001212872627592845,
"loss": 0.0747,
"step": 804
},
{
"epoch": 1.7276688453159041,
"grad_norm": 0.5629754662513733,
"learning_rate": 0.00012111820085689016,
"loss": 0.0684,
"step": 805
},
{
"epoch": 1.7276688453159041,
"eval_loss": 0.36081361770629883,
"eval_runtime": 0.9368,
"eval_samples_per_second": 182.546,
"eval_steps_per_second": 13.878,
"step": 805
},
{
"epoch": 1.7298474945533768,
"grad_norm": 0.7455347180366516,
"learning_rate": 0.00012094907575336267,
"loss": 0.0931,
"step": 806
},
{
"epoch": 1.7320261437908497,
"grad_norm": 0.6510118246078491,
"learning_rate": 0.00012077988795484831,
"loss": 0.0888,
"step": 807
},
{
"epoch": 1.7342047930283224,
"grad_norm": 0.4980412423610687,
"learning_rate": 0.0001206106379676809,
"loss": 0.0515,
"step": 808
},
{
"epoch": 1.736383442265795,
"grad_norm": 0.6158175468444824,
"learning_rate": 0.00012044132629838052,
"loss": 0.0688,
"step": 809
},
{
"epoch": 1.738562091503268,
"grad_norm": 0.6391656398773193,
"learning_rate": 0.00012027195345365167,
"loss": 0.0785,
"step": 810
},
{
"epoch": 1.7407407407407407,
"grad_norm": 0.5952157378196716,
"learning_rate": 0.00012010251994038211,
"loss": 0.0642,
"step": 811
},
{
"epoch": 1.7429193899782134,
"grad_norm": 0.5489711165428162,
"learning_rate": 0.00011993302626564102,
"loss": 0.0803,
"step": 812
},
{
"epoch": 1.7450980392156863,
"grad_norm": 0.5591238737106323,
"learning_rate": 0.00011976347293667769,
"loss": 0.0786,
"step": 813
},
{
"epoch": 1.7472766884531592,
"grad_norm": 0.5260274410247803,
"learning_rate": 0.00011959386046091998,
"loss": 0.0684,
"step": 814
},
{
"epoch": 1.7494553376906317,
"grad_norm": 0.46458926796913147,
"learning_rate": 0.00011942418934597266,
"loss": 0.0628,
"step": 815
},
{
"epoch": 1.7516339869281046,
"grad_norm": 0.6219300031661987,
"learning_rate": 0.00011925446009961607,
"loss": 0.0826,
"step": 816
},
{
"epoch": 1.7538126361655775,
"grad_norm": 0.5895767211914062,
"learning_rate": 0.0001190846732298045,
"loss": 0.0757,
"step": 817
},
{
"epoch": 1.7559912854030502,
"grad_norm": 0.45634791254997253,
"learning_rate": 0.00011891482924466471,
"loss": 0.0576,
"step": 818
},
{
"epoch": 1.7581699346405228,
"grad_norm": 0.5139732956886292,
"learning_rate": 0.00011874492865249434,
"loss": 0.051,
"step": 819
},
{
"epoch": 1.7603485838779958,
"grad_norm": 0.503699004650116,
"learning_rate": 0.00011857497196176049,
"loss": 0.0565,
"step": 820
},
{
"epoch": 1.7625272331154684,
"grad_norm": 0.6864826083183289,
"learning_rate": 0.0001184049596810981,
"loss": 0.109,
"step": 821
},
{
"epoch": 1.7647058823529411,
"grad_norm": 0.4041982889175415,
"learning_rate": 0.00011823489231930854,
"loss": 0.0531,
"step": 822
},
{
"epoch": 1.766884531590414,
"grad_norm": 0.6555854678153992,
"learning_rate": 0.00011806477038535799,
"loss": 0.0923,
"step": 823
},
{
"epoch": 1.7690631808278867,
"grad_norm": 0.48353853821754456,
"learning_rate": 0.00011789459438837589,
"loss": 0.0569,
"step": 824
},
{
"epoch": 1.7712418300653594,
"grad_norm": 0.5575915575027466,
"learning_rate": 0.00011772436483765363,
"loss": 0.0815,
"step": 825
},
{
"epoch": 1.7734204793028323,
"grad_norm": 0.5701248645782471,
"learning_rate": 0.00011755408224264269,
"loss": 0.0685,
"step": 826
},
{
"epoch": 1.775599128540305,
"grad_norm": 0.5434954166412354,
"learning_rate": 0.00011738374711295341,
"loss": 0.0621,
"step": 827
},
{
"epoch": 1.7777777777777777,
"grad_norm": 0.6199434399604797,
"learning_rate": 0.00011721335995835336,
"loss": 0.0657,
"step": 828
},
{
"epoch": 1.7799564270152506,
"grad_norm": 0.7478623390197754,
"learning_rate": 0.00011704292128876573,
"loss": 0.1091,
"step": 829
},
{
"epoch": 1.7821350762527233,
"grad_norm": 0.5505065321922302,
"learning_rate": 0.00011687243161426793,
"loss": 0.0773,
"step": 830
},
{
"epoch": 1.784313725490196,
"grad_norm": 0.611953616142273,
"learning_rate": 0.00011670189144509003,
"loss": 0.0756,
"step": 831
},
{
"epoch": 1.7864923747276689,
"grad_norm": 0.5525700449943542,
"learning_rate": 0.00011653130129161316,
"loss": 0.0643,
"step": 832
},
{
"epoch": 1.7886710239651418,
"grad_norm": 0.4467664659023285,
"learning_rate": 0.0001163606616643681,
"loss": 0.0552,
"step": 833
},
{
"epoch": 1.7908496732026142,
"grad_norm": 0.689069926738739,
"learning_rate": 0.00011618997307403367,
"loss": 0.0717,
"step": 834
},
{
"epoch": 1.7930283224400871,
"grad_norm": 0.5094537138938904,
"learning_rate": 0.00011601923603143519,
"loss": 0.0654,
"step": 835
},
{
"epoch": 1.79520697167756,
"grad_norm": 0.6739009618759155,
"learning_rate": 0.00011584845104754304,
"loss": 0.0843,
"step": 836
},
{
"epoch": 1.7973856209150327,
"grad_norm": 0.49725601077079773,
"learning_rate": 0.00011567761863347107,
"loss": 0.0661,
"step": 837
},
{
"epoch": 1.7995642701525054,
"grad_norm": 0.46219804883003235,
"learning_rate": 0.00011550673930047498,
"loss": 0.056,
"step": 838
},
{
"epoch": 1.8017429193899783,
"grad_norm": 0.4114397466182709,
"learning_rate": 0.00011533581355995102,
"loss": 0.046,
"step": 839
},
{
"epoch": 1.803921568627451,
"grad_norm": 0.6808457970619202,
"learning_rate": 0.00011516484192343425,
"loss": 0.1065,
"step": 840
},
{
"epoch": 1.8061002178649237,
"grad_norm": 0.4873061776161194,
"learning_rate": 0.00011499382490259709,
"loss": 0.0501,
"step": 841
},
{
"epoch": 1.8082788671023966,
"grad_norm": 0.5296838283538818,
"learning_rate": 0.00011482276300924782,
"loss": 0.0516,
"step": 842
},
{
"epoch": 1.8104575163398693,
"grad_norm": 0.5966465473175049,
"learning_rate": 0.00011465165675532898,
"loss": 0.0785,
"step": 843
},
{
"epoch": 1.812636165577342,
"grad_norm": 0.5794910192489624,
"learning_rate": 0.00011448050665291587,
"loss": 0.0704,
"step": 844
},
{
"epoch": 1.8148148148148149,
"grad_norm": 0.37284550070762634,
"learning_rate": 0.00011430931321421499,
"loss": 0.0415,
"step": 845
},
{
"epoch": 1.8169934640522876,
"grad_norm": 0.5628785490989685,
"learning_rate": 0.00011413807695156262,
"loss": 0.0711,
"step": 846
},
{
"epoch": 1.8191721132897603,
"grad_norm": 0.3898080289363861,
"learning_rate": 0.0001139667983774231,
"loss": 0.0453,
"step": 847
},
{
"epoch": 1.8213507625272332,
"grad_norm": 0.6457688212394714,
"learning_rate": 0.00011379547800438747,
"loss": 0.0814,
"step": 848
},
{
"epoch": 1.8235294117647058,
"grad_norm": 0.5886730551719666,
"learning_rate": 0.00011362411634517183,
"loss": 0.0719,
"step": 849
},
{
"epoch": 1.8257080610021785,
"grad_norm": 0.5808268785476685,
"learning_rate": 0.00011345271391261584,
"loss": 0.0669,
"step": 850
},
{
"epoch": 1.8278867102396514,
"grad_norm": 0.6759737133979797,
"learning_rate": 0.0001132812712196812,
"loss": 0.0844,
"step": 851
},
{
"epoch": 1.8300653594771243,
"grad_norm": 0.4220186173915863,
"learning_rate": 0.00011310978877945007,
"loss": 0.0509,
"step": 852
},
{
"epoch": 1.8322440087145968,
"grad_norm": 0.4730389714241028,
"learning_rate": 0.00011293826710512359,
"loss": 0.0495,
"step": 853
},
{
"epoch": 1.8344226579520697,
"grad_norm": 0.49538376927375793,
"learning_rate": 0.00011276670671002028,
"loss": 0.0561,
"step": 854
},
{
"epoch": 1.8366013071895426,
"grad_norm": 0.5948292016983032,
"learning_rate": 0.00011259510810757461,
"loss": 0.0766,
"step": 855
},
{
"epoch": 1.8387799564270153,
"grad_norm": 0.5298624038696289,
"learning_rate": 0.00011242347181133533,
"loss": 0.0794,
"step": 856
},
{
"epoch": 1.840958605664488,
"grad_norm": 0.5494588017463684,
"learning_rate": 0.00011225179833496402,
"loss": 0.065,
"step": 857
},
{
"epoch": 1.843137254901961,
"grad_norm": 0.4604671597480774,
"learning_rate": 0.00011208008819223354,
"loss": 0.0484,
"step": 858
},
{
"epoch": 1.8453159041394336,
"grad_norm": 0.44082626700401306,
"learning_rate": 0.00011190834189702646,
"loss": 0.0493,
"step": 859
},
{
"epoch": 1.8474945533769063,
"grad_norm": 0.5379127264022827,
"learning_rate": 0.00011173655996333357,
"loss": 0.0594,
"step": 860
},
{
"epoch": 1.8496732026143792,
"grad_norm": 0.5449596047401428,
"learning_rate": 0.00011156474290525227,
"loss": 0.0623,
"step": 861
},
{
"epoch": 1.8518518518518519,
"grad_norm": 0.5305776000022888,
"learning_rate": 0.00011139289123698518,
"loss": 0.0548,
"step": 862
},
{
"epoch": 1.8540305010893245,
"grad_norm": 0.5683318376541138,
"learning_rate": 0.00011122100547283834,
"loss": 0.0625,
"step": 863
},
{
"epoch": 1.8562091503267975,
"grad_norm": 0.5460466146469116,
"learning_rate": 0.00011104908612722001,
"loss": 0.0583,
"step": 864
},
{
"epoch": 1.8583877995642701,
"grad_norm": 0.5440402030944824,
"learning_rate": 0.00011087713371463881,
"loss": 0.0594,
"step": 865
},
{
"epoch": 1.8605664488017428,
"grad_norm": 0.6470115780830383,
"learning_rate": 0.00011070514874970237,
"loss": 0.0937,
"step": 866
},
{
"epoch": 1.8627450980392157,
"grad_norm": 0.48046019673347473,
"learning_rate": 0.00011053313174711575,
"loss": 0.0543,
"step": 867
},
{
"epoch": 1.8649237472766884,
"grad_norm": 0.42647895216941833,
"learning_rate": 0.00011036108322167988,
"loss": 0.0529,
"step": 868
},
{
"epoch": 1.867102396514161,
"grad_norm": 0.5126241445541382,
"learning_rate": 0.00011018900368829006,
"loss": 0.06,
"step": 869
},
{
"epoch": 1.869281045751634,
"grad_norm": 0.56479412317276,
"learning_rate": 0.00011001689366193433,
"loss": 0.0774,
"step": 870
},
{
"epoch": 1.871459694989107,
"grad_norm": 0.6062801480293274,
"learning_rate": 0.000109844753657692,
"loss": 0.0688,
"step": 871
},
{
"epoch": 1.8736383442265794,
"grad_norm": 0.4999787211418152,
"learning_rate": 0.00010967258419073217,
"loss": 0.0461,
"step": 872
},
{
"epoch": 1.8758169934640523,
"grad_norm": 0.5203256607055664,
"learning_rate": 0.00010950038577631198,
"loss": 0.0575,
"step": 873
},
{
"epoch": 1.8779956427015252,
"grad_norm": 0.4616885185241699,
"learning_rate": 0.00010932815892977535,
"loss": 0.0409,
"step": 874
},
{
"epoch": 1.8801742919389977,
"grad_norm": 0.488798588514328,
"learning_rate": 0.00010915590416655117,
"loss": 0.0529,
"step": 875
},
{
"epoch": 1.8823529411764706,
"grad_norm": 0.49445784091949463,
"learning_rate": 0.00010898362200215197,
"loss": 0.0613,
"step": 876
},
{
"epoch": 1.8845315904139435,
"grad_norm": 0.5372164249420166,
"learning_rate": 0.00010881131295217225,
"loss": 0.0603,
"step": 877
},
{
"epoch": 1.8867102396514162,
"grad_norm": 0.4770764708518982,
"learning_rate": 0.00010863897753228687,
"loss": 0.0518,
"step": 878
},
{
"epoch": 1.8888888888888888,
"grad_norm": 0.616121232509613,
"learning_rate": 0.00010846661625824978,
"loss": 0.0892,
"step": 879
},
{
"epoch": 1.8910675381263617,
"grad_norm": 0.5405257344245911,
"learning_rate": 0.0001082942296458922,
"loss": 0.0783,
"step": 880
},
{
"epoch": 1.8932461873638344,
"grad_norm": 0.505785346031189,
"learning_rate": 0.00010812181821112122,
"loss": 0.0616,
"step": 881
},
{
"epoch": 1.8954248366013071,
"grad_norm": 0.509480357170105,
"learning_rate": 0.00010794938246991817,
"loss": 0.07,
"step": 882
},
{
"epoch": 1.89760348583878,
"grad_norm": 0.5401344299316406,
"learning_rate": 0.00010777692293833718,
"loss": 0.059,
"step": 883
},
{
"epoch": 1.8997821350762527,
"grad_norm": 0.3899208903312683,
"learning_rate": 0.0001076044401325036,
"loss": 0.0435,
"step": 884
},
{
"epoch": 1.9019607843137254,
"grad_norm": 0.5744214653968811,
"learning_rate": 0.00010743193456861227,
"loss": 0.0677,
"step": 885
},
{
"epoch": 1.9041394335511983,
"grad_norm": 0.5708267092704773,
"learning_rate": 0.00010725940676292636,
"loss": 0.0669,
"step": 886
},
{
"epoch": 1.906318082788671,
"grad_norm": 0.49386221170425415,
"learning_rate": 0.00010708685723177543,
"loss": 0.0623,
"step": 887
},
{
"epoch": 1.9084967320261437,
"grad_norm": 0.47566959261894226,
"learning_rate": 0.0001069142864915542,
"loss": 0.0582,
"step": 888
},
{
"epoch": 1.9106753812636166,
"grad_norm": 0.5212053656578064,
"learning_rate": 0.00010674169505872072,
"loss": 0.0549,
"step": 889
},
{
"epoch": 1.9128540305010895,
"grad_norm": 0.5509231686592102,
"learning_rate": 0.00010656908344979506,
"loss": 0.0588,
"step": 890
},
{
"epoch": 1.915032679738562,
"grad_norm": 0.48301365971565247,
"learning_rate": 0.0001063964521813577,
"loss": 0.0623,
"step": 891
},
{
"epoch": 1.9172113289760349,
"grad_norm": 0.5671520829200745,
"learning_rate": 0.0001062238017700478,
"loss": 0.058,
"step": 892
},
{
"epoch": 1.9193899782135078,
"grad_norm": 0.4387785792350769,
"learning_rate": 0.00010605113273256205,
"loss": 0.0448,
"step": 893
},
{
"epoch": 1.9215686274509802,
"grad_norm": 0.48259374499320984,
"learning_rate": 0.00010587844558565261,
"loss": 0.0589,
"step": 894
},
{
"epoch": 1.9237472766884531,
"grad_norm": 0.530150294303894,
"learning_rate": 0.00010570574084612608,
"loss": 0.0551,
"step": 895
},
{
"epoch": 1.925925925925926,
"grad_norm": 0.42356571555137634,
"learning_rate": 0.00010553301903084157,
"loss": 0.0462,
"step": 896
},
{
"epoch": 1.9281045751633987,
"grad_norm": 0.6400770545005798,
"learning_rate": 0.00010536028065670929,
"loss": 0.0794,
"step": 897
},
{
"epoch": 1.9302832244008714,
"grad_norm": 0.46982961893081665,
"learning_rate": 0.00010518752624068911,
"loss": 0.0532,
"step": 898
},
{
"epoch": 1.9324618736383443,
"grad_norm": 0.5801010131835938,
"learning_rate": 0.00010501475629978878,
"loss": 0.0647,
"step": 899
},
{
"epoch": 1.934640522875817,
"grad_norm": 0.47689372301101685,
"learning_rate": 0.00010484197135106263,
"loss": 0.0524,
"step": 900
},
{
"epoch": 1.9368191721132897,
"grad_norm": 0.695038914680481,
"learning_rate": 0.00010466917191160981,
"loss": 0.0859,
"step": 901
},
{
"epoch": 1.9389978213507626,
"grad_norm": 0.46906334161758423,
"learning_rate": 0.0001044963584985729,
"loss": 0.0459,
"step": 902
},
{
"epoch": 1.9411764705882353,
"grad_norm": 0.49414071440696716,
"learning_rate": 0.0001043235316291363,
"loss": 0.0534,
"step": 903
},
{
"epoch": 1.943355119825708,
"grad_norm": 0.5636811852455139,
"learning_rate": 0.0001041506918205246,
"loss": 0.0695,
"step": 904
},
{
"epoch": 1.9455337690631809,
"grad_norm": 0.6023524403572083,
"learning_rate": 0.0001039778395900012,
"loss": 0.0805,
"step": 905
},
{
"epoch": 1.9477124183006536,
"grad_norm": 0.4797222912311554,
"learning_rate": 0.00010380497545486663,
"loss": 0.0592,
"step": 906
},
{
"epoch": 1.9498910675381262,
"grad_norm": 0.366100013256073,
"learning_rate": 0.00010363209993245708,
"loss": 0.0343,
"step": 907
},
{
"epoch": 1.9520697167755992,
"grad_norm": 0.5525081157684326,
"learning_rate": 0.00010345921354014279,
"loss": 0.0679,
"step": 908
},
{
"epoch": 1.954248366013072,
"grad_norm": 0.3858024477958679,
"learning_rate": 0.00010328631679532658,
"loss": 0.0397,
"step": 909
},
{
"epoch": 1.9564270152505445,
"grad_norm": 0.4831399619579315,
"learning_rate": 0.00010311341021544218,
"loss": 0.0618,
"step": 910
},
{
"epoch": 1.9586056644880174,
"grad_norm": 0.487746924161911,
"learning_rate": 0.00010294049431795278,
"loss": 0.0522,
"step": 911
},
{
"epoch": 1.9607843137254903,
"grad_norm": 0.4599170982837677,
"learning_rate": 0.0001027675696203495,
"loss": 0.0456,
"step": 912
},
{
"epoch": 1.9629629629629628,
"grad_norm": 0.6550068855285645,
"learning_rate": 0.00010259463664014972,
"loss": 0.0781,
"step": 913
},
{
"epoch": 1.9651416122004357,
"grad_norm": 0.5491052269935608,
"learning_rate": 0.00010242169589489568,
"loss": 0.055,
"step": 914
},
{
"epoch": 1.9673202614379086,
"grad_norm": 0.5342620611190796,
"learning_rate": 0.0001022487479021528,
"loss": 0.051,
"step": 915
},
{
"epoch": 1.9694989106753813,
"grad_norm": 0.5309066772460938,
"learning_rate": 0.00010207579317950827,
"loss": 0.0596,
"step": 916
},
{
"epoch": 1.971677559912854,
"grad_norm": 0.4537419080734253,
"learning_rate": 0.00010190283224456931,
"loss": 0.0451,
"step": 917
},
{
"epoch": 1.973856209150327,
"grad_norm": 0.573647677898407,
"learning_rate": 0.0001017298656149618,
"loss": 0.0712,
"step": 918
},
{
"epoch": 1.9760348583877996,
"grad_norm": 0.5618070363998413,
"learning_rate": 0.00010155689380832869,
"loss": 0.0602,
"step": 919
},
{
"epoch": 1.9782135076252723,
"grad_norm": 0.5742269158363342,
"learning_rate": 0.00010138391734232832,
"loss": 0.0817,
"step": 920
},
{
"epoch": 1.9782135076252723,
"eval_loss": 0.36630603671073914,
"eval_runtime": 0.9367,
"eval_samples_per_second": 182.551,
"eval_steps_per_second": 13.878,
"step": 920
},
{
"epoch": 1.9803921568627452,
"grad_norm": 0.45455044507980347,
"learning_rate": 0.00010121093673463311,
"loss": 0.0515,
"step": 921
},
{
"epoch": 1.9825708061002179,
"grad_norm": 0.4593486189842224,
"learning_rate": 0.00010103795250292778,
"loss": 0.0606,
"step": 922
},
{
"epoch": 1.9847494553376905,
"grad_norm": 0.46525895595550537,
"learning_rate": 0.00010086496516490786,
"loss": 0.0494,
"step": 923
},
{
"epoch": 1.9869281045751634,
"grad_norm": 0.5621113777160645,
"learning_rate": 0.00010069197523827833,
"loss": 0.0458,
"step": 924
},
{
"epoch": 1.9891067538126361,
"grad_norm": 0.5291484594345093,
"learning_rate": 0.00010051898324075177,
"loss": 0.0465,
"step": 925
},
{
"epoch": 1.9912854030501088,
"grad_norm": 0.6122869849205017,
"learning_rate": 0.00010034598969004705,
"loss": 0.0656,
"step": 926
},
{
"epoch": 1.9934640522875817,
"grad_norm": 0.38288819789886475,
"learning_rate": 0.00010017299510388759,
"loss": 0.0364,
"step": 927
},
{
"epoch": 1.9956427015250546,
"grad_norm": 0.5283601880073547,
"learning_rate": 0.0001,
"loss": 0.0607,
"step": 928
},
{
"epoch": 1.997821350762527,
"grad_norm": 0.5128113031387329,
"learning_rate": 9.982700489611244e-05,
"loss": 0.0651,
"step": 929
},
{
"epoch": 2.0,
"grad_norm": 0.5397202372550964,
"learning_rate": 9.965401030995301e-05,
"loss": 0.0639,
"step": 930
},
{
"epoch": 2.0021786492374725,
"grad_norm": 0.42536309361457825,
"learning_rate": 9.948101675924822e-05,
"loss": 0.0493,
"step": 931
},
{
"epoch": 2.004357298474946,
"grad_norm": 0.5398836135864258,
"learning_rate": 9.930802476172169e-05,
"loss": 0.0522,
"step": 932
},
{
"epoch": 2.0065359477124183,
"grad_norm": 0.4630374014377594,
"learning_rate": 9.913503483509216e-05,
"loss": 0.0492,
"step": 933
},
{
"epoch": 2.0087145969498907,
"grad_norm": 0.43915867805480957,
"learning_rate": 9.896204749707228e-05,
"loss": 0.0415,
"step": 934
},
{
"epoch": 2.010893246187364,
"grad_norm": 0.5192863941192627,
"learning_rate": 9.878906326536694e-05,
"loss": 0.06,
"step": 935
},
{
"epoch": 2.0130718954248366,
"grad_norm": 0.460500031709671,
"learning_rate": 9.861608265767167e-05,
"loss": 0.0487,
"step": 936
},
{
"epoch": 2.0152505446623095,
"grad_norm": 0.4268781244754791,
"learning_rate": 9.844310619167133e-05,
"loss": 0.0471,
"step": 937
},
{
"epoch": 2.0174291938997824,
"grad_norm": 0.5911117792129517,
"learning_rate": 9.827013438503822e-05,
"loss": 0.0713,
"step": 938
},
{
"epoch": 2.019607843137255,
"grad_norm": 0.49901121854782104,
"learning_rate": 9.809716775543073e-05,
"loss": 0.049,
"step": 939
},
{
"epoch": 2.0217864923747277,
"grad_norm": 0.5817520022392273,
"learning_rate": 9.792420682049174e-05,
"loss": 0.0622,
"step": 940
},
{
"epoch": 2.0239651416122006,
"grad_norm": 0.49429771304130554,
"learning_rate": 9.775125209784719e-05,
"loss": 0.0493,
"step": 941
},
{
"epoch": 2.026143790849673,
"grad_norm": 0.5530015230178833,
"learning_rate": 9.757830410510433e-05,
"loss": 0.0574,
"step": 942
},
{
"epoch": 2.028322440087146,
"grad_norm": 0.46509528160095215,
"learning_rate": 9.740536335985031e-05,
"loss": 0.0532,
"step": 943
},
{
"epoch": 2.002178649237473,
"grad_norm": 0.4386727809906006,
"learning_rate": 9.723243037965056e-05,
"loss": 0.0455,
"step": 944
},
{
"epoch": 2.0043572984749454,
"grad_norm": 0.3973652422428131,
"learning_rate": 9.705950568204723e-05,
"loss": 0.0321,
"step": 945
},
{
"epoch": 2.0065359477124183,
"grad_norm": 0.42888402938842773,
"learning_rate": 9.688658978455784e-05,
"loss": 0.0538,
"step": 946
},
{
"epoch": 2.008714596949891,
"grad_norm": 0.48195895552635193,
"learning_rate": 9.671368320467344e-05,
"loss": 0.0407,
"step": 947
},
{
"epoch": 2.0108932461873636,
"grad_norm": 0.45109862089157104,
"learning_rate": 9.654078645985722e-05,
"loss": 0.0439,
"step": 948
},
{
"epoch": 2.0130718954248366,
"grad_norm": 0.4311431646347046,
"learning_rate": 9.636790006754297e-05,
"loss": 0.0444,
"step": 949
},
{
"epoch": 2.0152505446623095,
"grad_norm": 0.5313979387283325,
"learning_rate": 9.619502454513338e-05,
"loss": 0.049,
"step": 950
},
{
"epoch": 2.017429193899782,
"grad_norm": 0.436300665140152,
"learning_rate": 9.602216040999882e-05,
"loss": 0.0437,
"step": 951
},
{
"epoch": 2.019607843137255,
"grad_norm": 0.38083919882774353,
"learning_rate": 9.584930817947544e-05,
"loss": 0.0359,
"step": 952
},
{
"epoch": 2.0217864923747277,
"grad_norm": 0.38039639592170715,
"learning_rate": 9.567646837086375e-05,
"loss": 0.038,
"step": 953
},
{
"epoch": 2.0239651416122006,
"grad_norm": 0.3970465362071991,
"learning_rate": 9.550364150142713e-05,
"loss": 0.0372,
"step": 954
},
{
"epoch": 2.026143790849673,
"grad_norm": 0.4727162718772888,
"learning_rate": 9.533082808839019e-05,
"loss": 0.0439,
"step": 955
},
{
"epoch": 2.028322440087146,
"grad_norm": 0.43574225902557373,
"learning_rate": 9.515802864893739e-05,
"loss": 0.0372,
"step": 956
},
{
"epoch": 2.030501089324619,
"grad_norm": 0.4407714605331421,
"learning_rate": 9.498524370021124e-05,
"loss": 0.0396,
"step": 957
},
{
"epoch": 2.0326797385620914,
"grad_norm": 0.3953377604484558,
"learning_rate": 9.481247375931094e-05,
"loss": 0.0405,
"step": 958
},
{
"epoch": 2.0348583877995643,
"grad_norm": 0.46343356370925903,
"learning_rate": 9.463971934329072e-05,
"loss": 0.0424,
"step": 959
},
{
"epoch": 2.037037037037037,
"grad_norm": 0.4700873792171478,
"learning_rate": 9.446698096915847e-05,
"loss": 0.0478,
"step": 960
},
{
"epoch": 2.0392156862745097,
"grad_norm": 0.35322168469429016,
"learning_rate": 9.429425915387395e-05,
"loss": 0.0298,
"step": 961
},
{
"epoch": 2.0413943355119826,
"grad_norm": 0.4052344560623169,
"learning_rate": 9.412155441434741e-05,
"loss": 0.0395,
"step": 962
},
{
"epoch": 2.0435729847494555,
"grad_norm": 0.39179301261901855,
"learning_rate": 9.394886726743802e-05,
"loss": 0.0326,
"step": 963
},
{
"epoch": 2.045751633986928,
"grad_norm": 0.39701372385025024,
"learning_rate": 9.377619822995219e-05,
"loss": 0.0407,
"step": 964
},
{
"epoch": 2.047930283224401,
"grad_norm": 0.36004355549812317,
"learning_rate": 9.360354781864233e-05,
"loss": 0.0387,
"step": 965
},
{
"epoch": 2.0501089324618738,
"grad_norm": 0.37780487537384033,
"learning_rate": 9.343091655020495e-05,
"loss": 0.0396,
"step": 966
},
{
"epoch": 2.052287581699346,
"grad_norm": 0.35388338565826416,
"learning_rate": 9.325830494127932e-05,
"loss": 0.0324,
"step": 967
},
{
"epoch": 2.054466230936819,
"grad_norm": 0.450967937707901,
"learning_rate": 9.308571350844584e-05,
"loss": 0.0387,
"step": 968
},
{
"epoch": 2.056644880174292,
"grad_norm": 0.36347848176956177,
"learning_rate": 9.291314276822455e-05,
"loss": 0.0365,
"step": 969
},
{
"epoch": 2.0588235294117645,
"grad_norm": 0.49571454524993896,
"learning_rate": 9.274059323707366e-05,
"loss": 0.0419,
"step": 970
},
{
"epoch": 2.0610021786492374,
"grad_norm": 0.3792153596878052,
"learning_rate": 9.256806543138775e-05,
"loss": 0.0321,
"step": 971
},
{
"epoch": 2.0631808278867103,
"grad_norm": 0.4003947377204895,
"learning_rate": 9.239555986749645e-05,
"loss": 0.0388,
"step": 972
},
{
"epoch": 2.065359477124183,
"grad_norm": 0.5354496240615845,
"learning_rate": 9.22230770616628e-05,
"loss": 0.0445,
"step": 973
},
{
"epoch": 2.0675381263616557,
"grad_norm": 0.3640938997268677,
"learning_rate": 9.205061753008183e-05,
"loss": 0.0383,
"step": 974
},
{
"epoch": 2.0697167755991286,
"grad_norm": 0.4125429689884186,
"learning_rate": 9.187818178887881e-05,
"loss": 0.0336,
"step": 975
},
{
"epoch": 2.0718954248366015,
"grad_norm": 0.3906521499156952,
"learning_rate": 9.170577035410783e-05,
"loss": 0.0405,
"step": 976
},
{
"epoch": 2.074074074074074,
"grad_norm": 0.4133945405483246,
"learning_rate": 9.153338374175027e-05,
"loss": 0.0358,
"step": 977
},
{
"epoch": 2.076252723311547,
"grad_norm": 0.46391046047210693,
"learning_rate": 9.136102246771314e-05,
"loss": 0.0544,
"step": 978
},
{
"epoch": 2.0784313725490198,
"grad_norm": 0.36051511764526367,
"learning_rate": 9.118868704782779e-05,
"loss": 0.0335,
"step": 979
},
{
"epoch": 2.0806100217864922,
"grad_norm": 0.3989335000514984,
"learning_rate": 9.101637799784804e-05,
"loss": 0.0389,
"step": 980
},
{
"epoch": 2.082788671023965,
"grad_norm": 0.3435482680797577,
"learning_rate": 9.084409583344886e-05,
"loss": 0.036,
"step": 981
},
{
"epoch": 2.084967320261438,
"grad_norm": 0.4233006536960602,
"learning_rate": 9.06718410702247e-05,
"loss": 0.0485,
"step": 982
},
{
"epoch": 2.0871459694989105,
"grad_norm": 0.38224849104881287,
"learning_rate": 9.049961422368803e-05,
"loss": 0.0359,
"step": 983
},
{
"epoch": 2.0893246187363834,
"grad_norm": 0.36905547976493835,
"learning_rate": 9.032741580926787e-05,
"loss": 0.0312,
"step": 984
},
{
"epoch": 2.0915032679738563,
"grad_norm": 0.41927042603492737,
"learning_rate": 9.015524634230804e-05,
"loss": 0.0435,
"step": 985
},
{
"epoch": 2.093681917211329,
"grad_norm": 0.503204882144928,
"learning_rate": 8.998310633806571e-05,
"loss": 0.0465,
"step": 986
},
{
"epoch": 2.0958605664488017,
"grad_norm": 0.5310385823249817,
"learning_rate": 8.981099631171e-05,
"loss": 0.0399,
"step": 987
},
{
"epoch": 2.0980392156862746,
"grad_norm": 0.35375770926475525,
"learning_rate": 8.963891677832011e-05,
"loss": 0.0302,
"step": 988
},
{
"epoch": 2.100217864923747,
"grad_norm": 0.3928884267807007,
"learning_rate": 8.946686825288426e-05,
"loss": 0.0388,
"step": 989
},
{
"epoch": 2.10239651416122,
"grad_norm": 0.41044366359710693,
"learning_rate": 8.929485125029766e-05,
"loss": 0.0334,
"step": 990
},
{
"epoch": 2.104575163398693,
"grad_norm": 0.38677412271499634,
"learning_rate": 8.912286628536123e-05,
"loss": 0.0347,
"step": 991
},
{
"epoch": 2.106753812636166,
"grad_norm": 0.3688180446624756,
"learning_rate": 8.895091387277999e-05,
"loss": 0.034,
"step": 992
},
{
"epoch": 2.1089324618736383,
"grad_norm": 0.43808403611183167,
"learning_rate": 8.877899452716166e-05,
"loss": 0.0448,
"step": 993
},
{
"epoch": 2.111111111111111,
"grad_norm": 0.3778168261051178,
"learning_rate": 8.860710876301484e-05,
"loss": 0.0331,
"step": 994
},
{
"epoch": 2.113289760348584,
"grad_norm": 0.28699907660484314,
"learning_rate": 8.843525709474774e-05,
"loss": 0.0302,
"step": 995
},
{
"epoch": 2.1154684095860565,
"grad_norm": 0.3738473951816559,
"learning_rate": 8.826344003666647e-05,
"loss": 0.0337,
"step": 996
},
{
"epoch": 2.1176470588235294,
"grad_norm": 0.375796914100647,
"learning_rate": 8.809165810297355e-05,
"loss": 0.0415,
"step": 997
},
{
"epoch": 2.1198257080610023,
"grad_norm": 0.5249943137168884,
"learning_rate": 8.791991180776648e-05,
"loss": 0.0506,
"step": 998
},
{
"epoch": 2.122004357298475,
"grad_norm": 0.33527278900146484,
"learning_rate": 8.7748201665036e-05,
"loss": 0.0325,
"step": 999
},
{
"epoch": 2.1241830065359477,
"grad_norm": 0.30959898233413696,
"learning_rate": 8.757652818866471e-05,
"loss": 0.0273,
"step": 1000
},
{
"epoch": 2.1263616557734206,
"grad_norm": 0.3552459180355072,
"learning_rate": 8.740489189242541e-05,
"loss": 0.0371,
"step": 1001
},
{
"epoch": 2.128540305010893,
"grad_norm": 0.351105660200119,
"learning_rate": 8.723329328997973e-05,
"loss": 0.0308,
"step": 1002
},
{
"epoch": 2.130718954248366,
"grad_norm": 0.4374692142009735,
"learning_rate": 8.706173289487645e-05,
"loss": 0.0434,
"step": 1003
},
{
"epoch": 2.132897603485839,
"grad_norm": 0.3640350103378296,
"learning_rate": 8.689021122054996e-05,
"loss": 0.0363,
"step": 1004
},
{
"epoch": 2.1350762527233114,
"grad_norm": 0.3713133633136749,
"learning_rate": 8.671872878031884e-05,
"loss": 0.0333,
"step": 1005
},
{
"epoch": 2.1372549019607843,
"grad_norm": 0.3855540454387665,
"learning_rate": 8.654728608738418e-05,
"loss": 0.033,
"step": 1006
},
{
"epoch": 2.139433551198257,
"grad_norm": 0.30965426564216614,
"learning_rate": 8.637588365482818e-05,
"loss": 0.0321,
"step": 1007
},
{
"epoch": 2.1416122004357296,
"grad_norm": 0.4266233742237091,
"learning_rate": 8.620452199561254e-05,
"loss": 0.045,
"step": 1008
},
{
"epoch": 2.1437908496732025,
"grad_norm": 0.40546220541000366,
"learning_rate": 8.603320162257692e-05,
"loss": 0.0335,
"step": 1009
},
{
"epoch": 2.1459694989106755,
"grad_norm": 0.41686323285102844,
"learning_rate": 8.58619230484374e-05,
"loss": 0.0408,
"step": 1010
},
{
"epoch": 2.148148148148148,
"grad_norm": 0.44075897336006165,
"learning_rate": 8.5690686785785e-05,
"loss": 0.038,
"step": 1011
},
{
"epoch": 2.150326797385621,
"grad_norm": 0.3187354505062103,
"learning_rate": 8.551949334708415e-05,
"loss": 0.0279,
"step": 1012
},
{
"epoch": 2.1525054466230937,
"grad_norm": 0.49098312854766846,
"learning_rate": 8.534834324467105e-05,
"loss": 0.0383,
"step": 1013
},
{
"epoch": 2.1546840958605666,
"grad_norm": 0.43957433104515076,
"learning_rate": 8.51772369907522e-05,
"loss": 0.0387,
"step": 1014
},
{
"epoch": 2.156862745098039,
"grad_norm": 0.3748759329319,
"learning_rate": 8.500617509740292e-05,
"loss": 0.0343,
"step": 1015
},
{
"epoch": 2.159041394335512,
"grad_norm": 0.3676223158836365,
"learning_rate": 8.483515807656576e-05,
"loss": 0.0362,
"step": 1016
},
{
"epoch": 2.161220043572985,
"grad_norm": 0.3659650981426239,
"learning_rate": 8.466418644004902e-05,
"loss": 0.0349,
"step": 1017
},
{
"epoch": 2.1633986928104574,
"grad_norm": 0.4233817160129547,
"learning_rate": 8.449326069952506e-05,
"loss": 0.0356,
"step": 1018
},
{
"epoch": 2.1655773420479303,
"grad_norm": 0.35764414072036743,
"learning_rate": 8.432238136652897e-05,
"loss": 0.0339,
"step": 1019
},
{
"epoch": 2.167755991285403,
"grad_norm": 0.41857850551605225,
"learning_rate": 8.415154895245697e-05,
"loss": 0.0482,
"step": 1020
},
{
"epoch": 2.1699346405228757,
"grad_norm": 0.3641180694103241,
"learning_rate": 8.398076396856481e-05,
"loss": 0.0365,
"step": 1021
},
{
"epoch": 2.1721132897603486,
"grad_norm": 0.3954843282699585,
"learning_rate": 8.381002692596635e-05,
"loss": 0.0319,
"step": 1022
},
{
"epoch": 2.1742919389978215,
"grad_norm": 0.3095298707485199,
"learning_rate": 8.363933833563192e-05,
"loss": 0.0253,
"step": 1023
},
{
"epoch": 2.176470588235294,
"grad_norm": 0.4632478654384613,
"learning_rate": 8.346869870838685e-05,
"loss": 0.0426,
"step": 1024
},
{
"epoch": 2.178649237472767,
"grad_norm": 0.3950076103210449,
"learning_rate": 8.329810855490998e-05,
"loss": 0.0355,
"step": 1025
},
{
"epoch": 2.1808278867102397,
"grad_norm": 0.34780746698379517,
"learning_rate": 8.312756838573208e-05,
"loss": 0.0374,
"step": 1026
},
{
"epoch": 2.183006535947712,
"grad_norm": 0.37381961941719055,
"learning_rate": 8.295707871123429e-05,
"loss": 0.0302,
"step": 1027
},
{
"epoch": 2.185185185185185,
"grad_norm": 0.38402146100997925,
"learning_rate": 8.278664004164665e-05,
"loss": 0.0346,
"step": 1028
},
{
"epoch": 2.187363834422658,
"grad_norm": 0.3673423230648041,
"learning_rate": 8.26162528870466e-05,
"loss": 0.0429,
"step": 1029
},
{
"epoch": 2.189542483660131,
"grad_norm": 0.4263920783996582,
"learning_rate": 8.244591775735732e-05,
"loss": 0.0409,
"step": 1030
},
{
"epoch": 2.1917211328976034,
"grad_norm": 0.33678480982780457,
"learning_rate": 8.22756351623464e-05,
"loss": 0.0268,
"step": 1031
},
{
"epoch": 2.1938997821350763,
"grad_norm": 0.29408445954322815,
"learning_rate": 8.210540561162412e-05,
"loss": 0.0254,
"step": 1032
},
{
"epoch": 2.196078431372549,
"grad_norm": 0.3326764404773712,
"learning_rate": 8.193522961464205e-05,
"loss": 0.0323,
"step": 1033
},
{
"epoch": 2.1982570806100217,
"grad_norm": 0.41492709517478943,
"learning_rate": 8.176510768069147e-05,
"loss": 0.0375,
"step": 1034
},
{
"epoch": 2.2004357298474946,
"grad_norm": 0.3688419759273529,
"learning_rate": 8.159504031890191e-05,
"loss": 0.0315,
"step": 1035
},
{
"epoch": 2.2004357298474946,
"eval_loss": 0.3887942135334015,
"eval_runtime": 0.9371,
"eval_samples_per_second": 182.48,
"eval_steps_per_second": 13.873,
"step": 1035
},
{
"epoch": 2.2026143790849675,
"grad_norm": 0.4513092041015625,
"learning_rate": 8.142502803823955e-05,
"loss": 0.044,
"step": 1036
},
{
"epoch": 2.20479302832244,
"grad_norm": 0.35365647077560425,
"learning_rate": 8.125507134750567e-05,
"loss": 0.0297,
"step": 1037
},
{
"epoch": 2.206971677559913,
"grad_norm": 0.3306067883968353,
"learning_rate": 8.108517075533531e-05,
"loss": 0.0325,
"step": 1038
},
{
"epoch": 2.2091503267973858,
"grad_norm": 0.331192284822464,
"learning_rate": 8.091532677019552e-05,
"loss": 0.0284,
"step": 1039
},
{
"epoch": 2.2113289760348582,
"grad_norm": 0.3133065402507782,
"learning_rate": 8.074553990038395e-05,
"loss": 0.0298,
"step": 1040
},
{
"epoch": 2.213507625272331,
"grad_norm": 0.376301646232605,
"learning_rate": 8.057581065402738e-05,
"loss": 0.0418,
"step": 1041
},
{
"epoch": 2.215686274509804,
"grad_norm": 0.4943493604660034,
"learning_rate": 8.040613953908005e-05,
"loss": 0.0475,
"step": 1042
},
{
"epoch": 2.2178649237472765,
"grad_norm": 0.34015706181526184,
"learning_rate": 8.023652706332232e-05,
"loss": 0.0309,
"step": 1043
},
{
"epoch": 2.2200435729847494,
"grad_norm": 0.3141465187072754,
"learning_rate": 8.0066973734359e-05,
"loss": 0.033,
"step": 1044
},
{
"epoch": 2.2222222222222223,
"grad_norm": 0.36095380783081055,
"learning_rate": 7.989748005961791e-05,
"loss": 0.032,
"step": 1045
},
{
"epoch": 2.224400871459695,
"grad_norm": 0.38740071654319763,
"learning_rate": 7.972804654634834e-05,
"loss": 0.0336,
"step": 1046
},
{
"epoch": 2.2265795206971677,
"grad_norm": 0.38274049758911133,
"learning_rate": 7.955867370161952e-05,
"loss": 0.0347,
"step": 1047
},
{
"epoch": 2.2287581699346406,
"grad_norm": 0.3969612717628479,
"learning_rate": 7.938936203231912e-05,
"loss": 0.0459,
"step": 1048
},
{
"epoch": 2.230936819172113,
"grad_norm": 0.47027838230133057,
"learning_rate": 7.922011204515171e-05,
"loss": 0.0355,
"step": 1049
},
{
"epoch": 2.233115468409586,
"grad_norm": 0.5254383087158203,
"learning_rate": 7.905092424663735e-05,
"loss": 0.0364,
"step": 1050
},
{
"epoch": 2.235294117647059,
"grad_norm": 0.3129919469356537,
"learning_rate": 7.888179914310986e-05,
"loss": 0.029,
"step": 1051
},
{
"epoch": 2.237472766884532,
"grad_norm": 0.4359627664089203,
"learning_rate": 7.871273724071553e-05,
"loss": 0.0425,
"step": 1052
},
{
"epoch": 2.2396514161220042,
"grad_norm": 0.377096563577652,
"learning_rate": 7.854373904541144e-05,
"loss": 0.0398,
"step": 1053
},
{
"epoch": 2.241830065359477,
"grad_norm": 0.37962231040000916,
"learning_rate": 7.837480506296404e-05,
"loss": 0.036,
"step": 1054
},
{
"epoch": 2.24400871459695,
"grad_norm": 0.41788601875305176,
"learning_rate": 7.820593579894762e-05,
"loss": 0.0379,
"step": 1055
},
{
"epoch": 2.2461873638344225,
"grad_norm": 0.35980916023254395,
"learning_rate": 7.803713175874275e-05,
"loss": 0.0355,
"step": 1056
},
{
"epoch": 2.2483660130718954,
"grad_norm": 0.3931993246078491,
"learning_rate": 7.786839344753487e-05,
"loss": 0.0423,
"step": 1057
},
{
"epoch": 2.2505446623093683,
"grad_norm": 0.3325110077857971,
"learning_rate": 7.769972137031262e-05,
"loss": 0.0341,
"step": 1058
},
{
"epoch": 2.252723311546841,
"grad_norm": 0.3445272147655487,
"learning_rate": 7.753111603186647e-05,
"loss": 0.0299,
"step": 1059
},
{
"epoch": 2.2549019607843137,
"grad_norm": 0.3202429711818695,
"learning_rate": 7.736257793678714e-05,
"loss": 0.0302,
"step": 1060
},
{
"epoch": 2.2570806100217866,
"grad_norm": 0.3890281319618225,
"learning_rate": 7.719410758946417e-05,
"loss": 0.0353,
"step": 1061
},
{
"epoch": 2.259259259259259,
"grad_norm": 0.3726596236228943,
"learning_rate": 7.702570549408428e-05,
"loss": 0.0354,
"step": 1062
},
{
"epoch": 2.261437908496732,
"grad_norm": 0.3483957052230835,
"learning_rate": 7.685737215462992e-05,
"loss": 0.0323,
"step": 1063
},
{
"epoch": 2.263616557734205,
"grad_norm": 0.474739670753479,
"learning_rate": 7.668910807487783e-05,
"loss": 0.0413,
"step": 1064
},
{
"epoch": 2.265795206971678,
"grad_norm": 0.35884329676628113,
"learning_rate": 7.652091375839744e-05,
"loss": 0.0321,
"step": 1065
},
{
"epoch": 2.2679738562091503,
"grad_norm": 0.431550532579422,
"learning_rate": 7.635278970854943e-05,
"loss": 0.0401,
"step": 1066
},
{
"epoch": 2.270152505446623,
"grad_norm": 0.3722434937953949,
"learning_rate": 7.618473642848411e-05,
"loss": 0.0338,
"step": 1067
},
{
"epoch": 2.272331154684096,
"grad_norm": 0.3986685276031494,
"learning_rate": 7.601675442114009e-05,
"loss": 0.0372,
"step": 1068
},
{
"epoch": 2.2745098039215685,
"grad_norm": 0.3737069368362427,
"learning_rate": 7.584884418924261e-05,
"loss": 0.0349,
"step": 1069
},
{
"epoch": 2.2766884531590414,
"grad_norm": 0.2568318843841553,
"learning_rate": 7.568100623530217e-05,
"loss": 0.0223,
"step": 1070
},
{
"epoch": 2.2788671023965144,
"grad_norm": 0.3369297385215759,
"learning_rate": 7.551324106161293e-05,
"loss": 0.0309,
"step": 1071
},
{
"epoch": 2.281045751633987,
"grad_norm": 0.3036141097545624,
"learning_rate": 7.534554917025119e-05,
"loss": 0.0262,
"step": 1072
},
{
"epoch": 2.2832244008714597,
"grad_norm": 0.419545978307724,
"learning_rate": 7.517793106307399e-05,
"loss": 0.0353,
"step": 1073
},
{
"epoch": 2.2854030501089326,
"grad_norm": 0.2924548387527466,
"learning_rate": 7.501038724171756e-05,
"loss": 0.0223,
"step": 1074
},
{
"epoch": 2.287581699346405,
"grad_norm": 0.3383108675479889,
"learning_rate": 7.484291820759581e-05,
"loss": 0.0273,
"step": 1075
},
{
"epoch": 2.289760348583878,
"grad_norm": 0.41079169511795044,
"learning_rate": 7.46755244618988e-05,
"loss": 0.033,
"step": 1076
},
{
"epoch": 2.291938997821351,
"grad_norm": 0.3486713767051697,
"learning_rate": 7.450820650559126e-05,
"loss": 0.0333,
"step": 1077
},
{
"epoch": 2.2941176470588234,
"grad_norm": 0.2914646863937378,
"learning_rate": 7.434096483941115e-05,
"loss": 0.026,
"step": 1078
},
{
"epoch": 2.2962962962962963,
"grad_norm": 0.31533393263816833,
"learning_rate": 7.417379996386815e-05,
"loss": 0.0291,
"step": 1079
},
{
"epoch": 2.298474945533769,
"grad_norm": 0.38414740562438965,
"learning_rate": 7.400671237924202e-05,
"loss": 0.03,
"step": 1080
},
{
"epoch": 2.3006535947712417,
"grad_norm": 0.3862841725349426,
"learning_rate": 7.383970258558126e-05,
"loss": 0.0301,
"step": 1081
},
{
"epoch": 2.3028322440087146,
"grad_norm": 0.33388271927833557,
"learning_rate": 7.367277108270156e-05,
"loss": 0.028,
"step": 1082
},
{
"epoch": 2.3050108932461875,
"grad_norm": 0.39220672845840454,
"learning_rate": 7.350591837018432e-05,
"loss": 0.0309,
"step": 1083
},
{
"epoch": 2.30718954248366,
"grad_norm": 0.3731820583343506,
"learning_rate": 7.333914494737514e-05,
"loss": 0.032,
"step": 1084
},
{
"epoch": 2.309368191721133,
"grad_norm": 0.3434200584888458,
"learning_rate": 7.317245131338235e-05,
"loss": 0.0304,
"step": 1085
},
{
"epoch": 2.3115468409586057,
"grad_norm": 0.43714702129364014,
"learning_rate": 7.300583796707539e-05,
"loss": 0.0333,
"step": 1086
},
{
"epoch": 2.313725490196078,
"grad_norm": 0.3225899934768677,
"learning_rate": 7.28393054070835e-05,
"loss": 0.0282,
"step": 1087
},
{
"epoch": 2.315904139433551,
"grad_norm": 0.37685656547546387,
"learning_rate": 7.267285413179421e-05,
"loss": 0.0365,
"step": 1088
},
{
"epoch": 2.318082788671024,
"grad_norm": 0.42235344648361206,
"learning_rate": 7.250648463935165e-05,
"loss": 0.0339,
"step": 1089
},
{
"epoch": 2.3202614379084965,
"grad_norm": 0.26639267802238464,
"learning_rate": 7.234019742765532e-05,
"loss": 0.0224,
"step": 1090
},
{
"epoch": 2.3224400871459694,
"grad_norm": 0.4348394572734833,
"learning_rate": 7.217399299435837e-05,
"loss": 0.0395,
"step": 1091
},
{
"epoch": 2.3246187363834423,
"grad_norm": 0.41142308712005615,
"learning_rate": 7.200787183686625e-05,
"loss": 0.0344,
"step": 1092
},
{
"epoch": 2.326797385620915,
"grad_norm": 0.3467880189418793,
"learning_rate": 7.184183445233527e-05,
"loss": 0.0323,
"step": 1093
},
{
"epoch": 2.3289760348583877,
"grad_norm": 0.3533194959163666,
"learning_rate": 7.167588133767091e-05,
"loss": 0.035,
"step": 1094
},
{
"epoch": 2.3311546840958606,
"grad_norm": 0.4386812746524811,
"learning_rate": 7.151001298952655e-05,
"loss": 0.0414,
"step": 1095
},
{
"epoch": 2.3333333333333335,
"grad_norm": 0.42185086011886597,
"learning_rate": 7.134422990430176e-05,
"loss": 0.0344,
"step": 1096
},
{
"epoch": 2.335511982570806,
"grad_norm": 0.38302865624427795,
"learning_rate": 7.117853257814111e-05,
"loss": 0.0368,
"step": 1097
},
{
"epoch": 2.337690631808279,
"grad_norm": 0.30734696984291077,
"learning_rate": 7.101292150693241e-05,
"loss": 0.0324,
"step": 1098
},
{
"epoch": 2.3398692810457518,
"grad_norm": 0.40257543325424194,
"learning_rate": 7.084739718630534e-05,
"loss": 0.0303,
"step": 1099
},
{
"epoch": 2.342047930283224,
"grad_norm": 0.34611865878105164,
"learning_rate": 7.068196011162994e-05,
"loss": 0.0288,
"step": 1100
},
{
"epoch": 2.344226579520697,
"grad_norm": 0.35646459460258484,
"learning_rate": 7.051661077801517e-05,
"loss": 0.0358,
"step": 1101
},
{
"epoch": 2.34640522875817,
"grad_norm": 0.33653634786605835,
"learning_rate": 7.03513496803075e-05,
"loss": 0.0309,
"step": 1102
},
{
"epoch": 2.348583877995643,
"grad_norm": 0.3293280601501465,
"learning_rate": 7.018617731308917e-05,
"loss": 0.0318,
"step": 1103
},
{
"epoch": 2.3507625272331154,
"grad_norm": 0.30165064334869385,
"learning_rate": 7.002109417067697e-05,
"loss": 0.0249,
"step": 1104
},
{
"epoch": 2.3529411764705883,
"grad_norm": 0.3535101115703583,
"learning_rate": 6.985610074712064e-05,
"loss": 0.0277,
"step": 1105
},
{
"epoch": 2.355119825708061,
"grad_norm": 0.3292611837387085,
"learning_rate": 6.969119753620135e-05,
"loss": 0.0277,
"step": 1106
},
{
"epoch": 2.3572984749455337,
"grad_norm": 0.3185204565525055,
"learning_rate": 6.952638503143047e-05,
"loss": 0.0298,
"step": 1107
},
{
"epoch": 2.3594771241830066,
"grad_norm": 0.36010631918907166,
"learning_rate": 6.936166372604773e-05,
"loss": 0.0346,
"step": 1108
},
{
"epoch": 2.3616557734204795,
"grad_norm": 0.30484408140182495,
"learning_rate": 6.919703411302e-05,
"loss": 0.0271,
"step": 1109
},
{
"epoch": 2.363834422657952,
"grad_norm": 0.368277907371521,
"learning_rate": 6.903249668503972e-05,
"loss": 0.0341,
"step": 1110
},
{
"epoch": 2.366013071895425,
"grad_norm": 0.3846035897731781,
"learning_rate": 6.886805193452343e-05,
"loss": 0.0376,
"step": 1111
},
{
"epoch": 2.3681917211328978,
"grad_norm": 0.2508929669857025,
"learning_rate": 6.87037003536104e-05,
"loss": 0.0223,
"step": 1112
},
{
"epoch": 2.3703703703703702,
"grad_norm": 0.3861332833766937,
"learning_rate": 6.853944243416097e-05,
"loss": 0.0322,
"step": 1113
},
{
"epoch": 2.372549019607843,
"grad_norm": 0.4652532935142517,
"learning_rate": 6.837527866775522e-05,
"loss": 0.0402,
"step": 1114
},
{
"epoch": 2.374727668845316,
"grad_norm": 0.44406765699386597,
"learning_rate": 6.821120954569136e-05,
"loss": 0.0262,
"step": 1115
},
{
"epoch": 2.3769063180827885,
"grad_norm": 0.270708292722702,
"learning_rate": 6.804723555898458e-05,
"loss": 0.0269,
"step": 1116
},
{
"epoch": 2.3790849673202614,
"grad_norm": 0.35717475414276123,
"learning_rate": 6.788335719836512e-05,
"loss": 0.0338,
"step": 1117
},
{
"epoch": 2.3812636165577343,
"grad_norm": 0.29959729313850403,
"learning_rate": 6.771957495427716e-05,
"loss": 0.0282,
"step": 1118
},
{
"epoch": 2.383442265795207,
"grad_norm": 0.2950606048107147,
"learning_rate": 6.755588931687722e-05,
"loss": 0.0304,
"step": 1119
},
{
"epoch": 2.3856209150326797,
"grad_norm": 0.2565721273422241,
"learning_rate": 6.739230077603259e-05,
"loss": 0.0252,
"step": 1120
},
{
"epoch": 2.3877995642701526,
"grad_norm": 0.31635963916778564,
"learning_rate": 6.722880982132018e-05,
"loss": 0.0279,
"step": 1121
},
{
"epoch": 2.389978213507625,
"grad_norm": 0.2946345806121826,
"learning_rate": 6.706541694202471e-05,
"loss": 0.0271,
"step": 1122
},
{
"epoch": 2.392156862745098,
"grad_norm": 0.29301801323890686,
"learning_rate": 6.690212262713737e-05,
"loss": 0.0233,
"step": 1123
},
{
"epoch": 2.394335511982571,
"grad_norm": 0.3404824435710907,
"learning_rate": 6.673892736535448e-05,
"loss": 0.0284,
"step": 1124
},
{
"epoch": 2.3965141612200433,
"grad_norm": 0.3590019643306732,
"learning_rate": 6.657583164507575e-05,
"loss": 0.0261,
"step": 1125
},
{
"epoch": 2.3986928104575163,
"grad_norm": 0.2633257210254669,
"learning_rate": 6.641283595440323e-05,
"loss": 0.0237,
"step": 1126
},
{
"epoch": 2.400871459694989,
"grad_norm": 0.3774015009403229,
"learning_rate": 6.624994078113942e-05,
"loss": 0.0329,
"step": 1127
},
{
"epoch": 2.4030501089324616,
"grad_norm": 0.3947656750679016,
"learning_rate": 6.608714661278606e-05,
"loss": 0.0318,
"step": 1128
},
{
"epoch": 2.4052287581699345,
"grad_norm": 0.28098928928375244,
"learning_rate": 6.592445393654253e-05,
"loss": 0.0284,
"step": 1129
},
{
"epoch": 2.4074074074074074,
"grad_norm": 0.3270324468612671,
"learning_rate": 6.576186323930466e-05,
"loss": 0.0244,
"step": 1130
},
{
"epoch": 2.4095860566448803,
"grad_norm": 0.3575744032859802,
"learning_rate": 6.559937500766294e-05,
"loss": 0.03,
"step": 1131
},
{
"epoch": 2.411764705882353,
"grad_norm": 0.29993295669555664,
"learning_rate": 6.543698972790117e-05,
"loss": 0.0238,
"step": 1132
},
{
"epoch": 2.4139433551198257,
"grad_norm": 0.3671198785305023,
"learning_rate": 6.52747078859952e-05,
"loss": 0.0314,
"step": 1133
},
{
"epoch": 2.4161220043572986,
"grad_norm": 0.34642457962036133,
"learning_rate": 6.51125299676111e-05,
"loss": 0.0301,
"step": 1134
},
{
"epoch": 2.418300653594771,
"grad_norm": 0.2813946306705475,
"learning_rate": 6.49504564581042e-05,
"loss": 0.0228,
"step": 1135
},
{
"epoch": 2.420479302832244,
"grad_norm": 0.3748490512371063,
"learning_rate": 6.478848784251713e-05,
"loss": 0.0344,
"step": 1136
},
{
"epoch": 2.422657952069717,
"grad_norm": 0.2645992040634155,
"learning_rate": 6.462662460557871e-05,
"loss": 0.0239,
"step": 1137
},
{
"epoch": 2.4248366013071894,
"grad_norm": 0.3389866054058075,
"learning_rate": 6.446486723170236e-05,
"loss": 0.0331,
"step": 1138
},
{
"epoch": 2.4270152505446623,
"grad_norm": 0.3613586723804474,
"learning_rate": 6.43032162049846e-05,
"loss": 0.0278,
"step": 1139
},
{
"epoch": 2.429193899782135,
"grad_norm": 0.28584545850753784,
"learning_rate": 6.414167200920391e-05,
"loss": 0.0234,
"step": 1140
},
{
"epoch": 2.431372549019608,
"grad_norm": 0.3366835117340088,
"learning_rate": 6.398023512781886e-05,
"loss": 0.0288,
"step": 1141
},
{
"epoch": 2.4335511982570806,
"grad_norm": 0.34187620878219604,
"learning_rate": 6.381890604396687e-05,
"loss": 0.0295,
"step": 1142
},
{
"epoch": 2.4357298474945535,
"grad_norm": 0.36750370264053345,
"learning_rate": 6.36576852404628e-05,
"loss": 0.0323,
"step": 1143
},
{
"epoch": 2.4379084967320264,
"grad_norm": 0.39871886372566223,
"learning_rate": 6.349657319979742e-05,
"loss": 0.0338,
"step": 1144
},
{
"epoch": 2.440087145969499,
"grad_norm": 0.3220905661582947,
"learning_rate": 6.333557040413608e-05,
"loss": 0.0282,
"step": 1145
},
{
"epoch": 2.4422657952069717,
"grad_norm": 0.3052578866481781,
"learning_rate": 6.317467733531712e-05,
"loss": 0.0274,
"step": 1146
},
{
"epoch": 2.4444444444444446,
"grad_norm": 0.33906757831573486,
"learning_rate": 6.301389447485049e-05,
"loss": 0.0305,
"step": 1147
},
{
"epoch": 2.446623093681917,
"grad_norm": 0.31006646156311035,
"learning_rate": 6.28532223039163e-05,
"loss": 0.0255,
"step": 1148
},
{
"epoch": 2.44880174291939,
"grad_norm": 0.38327571749687195,
"learning_rate": 6.269266130336351e-05,
"loss": 0.031,
"step": 1149
},
{
"epoch": 2.450980392156863,
"grad_norm": 0.3514636754989624,
"learning_rate": 6.253221195370826e-05,
"loss": 0.0331,
"step": 1150
},
{
"epoch": 2.450980392156863,
"eval_loss": 0.4003376364707947,
"eval_runtime": 0.9361,
"eval_samples_per_second": 182.669,
"eval_steps_per_second": 13.887,
"step": 1150
},
{
"epoch": 2.4531590413943354,
"grad_norm": 0.32665351033210754,
"learning_rate": 6.23718747351326e-05,
"loss": 0.0284,
"step": 1151
},
{
"epoch": 2.4553376906318083,
"grad_norm": 0.3169025182723999,
"learning_rate": 6.221165012748297e-05,
"loss": 0.0306,
"step": 1152
},
{
"epoch": 2.457516339869281,
"grad_norm": 0.30815449357032776,
"learning_rate": 6.205153861026878e-05,
"loss": 0.0253,
"step": 1153
},
{
"epoch": 2.4596949891067537,
"grad_norm": 0.4452240765094757,
"learning_rate": 6.189154066266112e-05,
"loss": 0.0355,
"step": 1154
},
{
"epoch": 2.4618736383442266,
"grad_norm": 0.3148786127567291,
"learning_rate": 6.173165676349103e-05,
"loss": 0.0272,
"step": 1155
},
{
"epoch": 2.4640522875816995,
"grad_norm": 0.32313981652259827,
"learning_rate": 6.157188739124834e-05,
"loss": 0.0261,
"step": 1156
},
{
"epoch": 2.466230936819172,
"grad_norm": 0.2976452112197876,
"learning_rate": 6.141223302408008e-05,
"loss": 0.0265,
"step": 1157
},
{
"epoch": 2.468409586056645,
"grad_norm": 0.28973153233528137,
"learning_rate": 6.125269413978907e-05,
"loss": 0.0256,
"step": 1158
},
{
"epoch": 2.4705882352941178,
"grad_norm": 0.2961597740650177,
"learning_rate": 6.109327121583266e-05,
"loss": 0.0275,
"step": 1159
},
{
"epoch": 2.47276688453159,
"grad_norm": 0.2728593647480011,
"learning_rate": 6.093396472932103e-05,
"loss": 0.0251,
"step": 1160
},
{
"epoch": 2.474945533769063,
"grad_norm": 0.29524633288383484,
"learning_rate": 6.0774775157015905e-05,
"loss": 0.0229,
"step": 1161
},
{
"epoch": 2.477124183006536,
"grad_norm": 0.3359307050704956,
"learning_rate": 6.0615702975329194e-05,
"loss": 0.0269,
"step": 1162
},
{
"epoch": 2.4793028322440085,
"grad_norm": 0.34090349078178406,
"learning_rate": 6.045674866032135e-05,
"loss": 0.027,
"step": 1163
},
{
"epoch": 2.4814814814814814,
"grad_norm": 0.3288646936416626,
"learning_rate": 6.029791268770029e-05,
"loss": 0.0265,
"step": 1164
},
{
"epoch": 2.4836601307189543,
"grad_norm": 0.35542285442352295,
"learning_rate": 6.013919553281958e-05,
"loss": 0.028,
"step": 1165
},
{
"epoch": 2.4858387799564268,
"grad_norm": 0.312814325094223,
"learning_rate": 5.998059767067728e-05,
"loss": 0.0245,
"step": 1166
},
{
"epoch": 2.4880174291938997,
"grad_norm": 0.2617066502571106,
"learning_rate": 5.9822119575914346e-05,
"loss": 0.0253,
"step": 1167
},
{
"epoch": 2.4901960784313726,
"grad_norm": 0.3561308979988098,
"learning_rate": 5.9663761722813495e-05,
"loss": 0.0315,
"step": 1168
},
{
"epoch": 2.4923747276688455,
"grad_norm": 0.40100762248039246,
"learning_rate": 5.950552458529742e-05,
"loss": 0.0316,
"step": 1169
},
{
"epoch": 2.494553376906318,
"grad_norm": 0.3167724013328552,
"learning_rate": 5.934740863692759e-05,
"loss": 0.0251,
"step": 1170
},
{
"epoch": 2.496732026143791,
"grad_norm": 0.2904264032840729,
"learning_rate": 5.91894143509028e-05,
"loss": 0.0248,
"step": 1171
},
{
"epoch": 2.4989106753812638,
"grad_norm": 0.3041043281555176,
"learning_rate": 5.903154220005771e-05,
"loss": 0.0242,
"step": 1172
},
{
"epoch": 2.5010893246187362,
"grad_norm": 0.3186579644680023,
"learning_rate": 5.8873792656861545e-05,
"loss": 0.0254,
"step": 1173
},
{
"epoch": 2.503267973856209,
"grad_norm": 0.31996387243270874,
"learning_rate": 5.871616619341653e-05,
"loss": 0.0291,
"step": 1174
},
{
"epoch": 2.505446623093682,
"grad_norm": 0.2908783555030823,
"learning_rate": 5.855866328145654e-05,
"loss": 0.029,
"step": 1175
},
{
"epoch": 2.507625272331155,
"grad_norm": 0.3205404579639435,
"learning_rate": 5.840128439234571e-05,
"loss": 0.0273,
"step": 1176
},
{
"epoch": 2.5098039215686274,
"grad_norm": 0.28629106283187866,
"learning_rate": 5.8244029997076986e-05,
"loss": 0.0206,
"step": 1177
},
{
"epoch": 2.5119825708061003,
"grad_norm": 0.22277586162090302,
"learning_rate": 5.80869005662708e-05,
"loss": 0.0174,
"step": 1178
},
{
"epoch": 2.5141612200435732,
"grad_norm": 0.3303561210632324,
"learning_rate": 5.792989657017359e-05,
"loss": 0.0288,
"step": 1179
},
{
"epoch": 2.5163398692810457,
"grad_norm": 0.3861919641494751,
"learning_rate": 5.777301847865629e-05,
"loss": 0.0285,
"step": 1180
},
{
"epoch": 2.5185185185185186,
"grad_norm": 0.3277258574962616,
"learning_rate": 5.76162667612132e-05,
"loss": 0.0312,
"step": 1181
},
{
"epoch": 2.5206971677559915,
"grad_norm": 0.31656593084335327,
"learning_rate": 5.7459641886960244e-05,
"loss": 0.0289,
"step": 1182
},
{
"epoch": 2.522875816993464,
"grad_norm": 0.3240686357021332,
"learning_rate": 5.7303144324633865e-05,
"loss": 0.0253,
"step": 1183
},
{
"epoch": 2.525054466230937,
"grad_norm": 0.3360879123210907,
"learning_rate": 5.714677454258947e-05,
"loss": 0.0382,
"step": 1184
},
{
"epoch": 2.52723311546841,
"grad_norm": 0.3368277847766876,
"learning_rate": 5.699053300880006e-05,
"loss": 0.0274,
"step": 1185
},
{
"epoch": 2.5294117647058822,
"grad_norm": 0.2984015941619873,
"learning_rate": 5.6834420190854745e-05,
"loss": 0.0267,
"step": 1186
},
{
"epoch": 2.531590413943355,
"grad_norm": 0.3210262060165405,
"learning_rate": 5.66784365559575e-05,
"loss": 0.0251,
"step": 1187
},
{
"epoch": 2.533769063180828,
"grad_norm": 0.35521429777145386,
"learning_rate": 5.652258257092569e-05,
"loss": 0.0304,
"step": 1188
},
{
"epoch": 2.5359477124183005,
"grad_norm": 0.3135312497615814,
"learning_rate": 5.6366858702188676e-05,
"loss": 0.024,
"step": 1189
},
{
"epoch": 2.5381263616557734,
"grad_norm": 0.32191547751426697,
"learning_rate": 5.621126541578632e-05,
"loss": 0.0274,
"step": 1190
},
{
"epoch": 2.5403050108932463,
"grad_norm": 0.37003350257873535,
"learning_rate": 5.605580317736782e-05,
"loss": 0.0279,
"step": 1191
},
{
"epoch": 2.542483660130719,
"grad_norm": 0.299571692943573,
"learning_rate": 5.590047245219009e-05,
"loss": 0.0244,
"step": 1192
},
{
"epoch": 2.5446623093681917,
"grad_norm": 0.27935001254081726,
"learning_rate": 5.574527370511655e-05,
"loss": 0.0283,
"step": 1193
},
{
"epoch": 2.5468409586056646,
"grad_norm": 0.27740544080734253,
"learning_rate": 5.559020740061549e-05,
"loss": 0.0225,
"step": 1194
},
{
"epoch": 2.549019607843137,
"grad_norm": 0.42784780263900757,
"learning_rate": 5.543527400275904e-05,
"loss": 0.034,
"step": 1195
},
{
"epoch": 2.55119825708061,
"grad_norm": 0.3304250240325928,
"learning_rate": 5.528047397522133e-05,
"loss": 0.0283,
"step": 1196
},
{
"epoch": 2.553376906318083,
"grad_norm": 0.2996106445789337,
"learning_rate": 5.512580778127763e-05,
"loss": 0.0231,
"step": 1197
},
{
"epoch": 2.5555555555555554,
"grad_norm": 0.2966853976249695,
"learning_rate": 5.497127588380244e-05,
"loss": 0.0235,
"step": 1198
},
{
"epoch": 2.5577342047930283,
"grad_norm": 0.37098780274391174,
"learning_rate": 5.481687874526853e-05,
"loss": 0.0289,
"step": 1199
},
{
"epoch": 2.559912854030501,
"grad_norm": 0.34960904717445374,
"learning_rate": 5.4662616827745185e-05,
"loss": 0.0378,
"step": 1200
},
{
"epoch": 2.5620915032679736,
"grad_norm": 0.34042587876319885,
"learning_rate": 5.4508490592897145e-05,
"loss": 0.03,
"step": 1201
},
{
"epoch": 2.5642701525054465,
"grad_norm": 0.3108261823654175,
"learning_rate": 5.4354500501983074e-05,
"loss": 0.0285,
"step": 1202
},
{
"epoch": 2.5664488017429194,
"grad_norm": 0.2840803861618042,
"learning_rate": 5.420064701585421e-05,
"loss": 0.0236,
"step": 1203
},
{
"epoch": 2.568627450980392,
"grad_norm": 0.3302132785320282,
"learning_rate": 5.404693059495285e-05,
"loss": 0.0273,
"step": 1204
},
{
"epoch": 2.570806100217865,
"grad_norm": 0.2904808819293976,
"learning_rate": 5.389335169931122e-05,
"loss": 0.0245,
"step": 1205
},
{
"epoch": 2.5729847494553377,
"grad_norm": 0.3794270157814026,
"learning_rate": 5.373991078854992e-05,
"loss": 0.0314,
"step": 1206
},
{
"epoch": 2.57516339869281,
"grad_norm": 0.26546499133110046,
"learning_rate": 5.358660832187663e-05,
"loss": 0.024,
"step": 1207
},
{
"epoch": 2.577342047930283,
"grad_norm": 0.3150874376296997,
"learning_rate": 5.3433444758084604e-05,
"loss": 0.0284,
"step": 1208
},
{
"epoch": 2.579520697167756,
"grad_norm": 0.2919199466705322,
"learning_rate": 5.3280420555551556e-05,
"loss": 0.0291,
"step": 1209
},
{
"epoch": 2.581699346405229,
"grad_norm": 0.25668779015541077,
"learning_rate": 5.312753617223794e-05,
"loss": 0.0211,
"step": 1210
},
{
"epoch": 2.5838779956427014,
"grad_norm": 0.2591281831264496,
"learning_rate": 5.297479206568597e-05,
"loss": 0.018,
"step": 1211
},
{
"epoch": 2.5860566448801743,
"grad_norm": 0.34996140003204346,
"learning_rate": 5.282218869301788e-05,
"loss": 0.0334,
"step": 1212
},
{
"epoch": 2.588235294117647,
"grad_norm": 0.28990140557289124,
"learning_rate": 5.266972651093487e-05,
"loss": 0.0226,
"step": 1213
},
{
"epoch": 2.59041394335512,
"grad_norm": 0.3653867244720459,
"learning_rate": 5.251740597571542e-05,
"loss": 0.0258,
"step": 1214
},
{
"epoch": 2.5925925925925926,
"grad_norm": 0.32424435019493103,
"learning_rate": 5.2365227543214235e-05,
"loss": 0.0272,
"step": 1215
},
{
"epoch": 2.5947712418300655,
"grad_norm": 0.3388940095901489,
"learning_rate": 5.221319166886073e-05,
"loss": 0.0245,
"step": 1216
},
{
"epoch": 2.5969498910675384,
"grad_norm": 0.3433808982372284,
"learning_rate": 5.206129880765769e-05,
"loss": 0.0308,
"step": 1217
},
{
"epoch": 2.599128540305011,
"grad_norm": 0.30091357231140137,
"learning_rate": 5.190954941417977e-05,
"loss": 0.0258,
"step": 1218
},
{
"epoch": 2.6013071895424837,
"grad_norm": 0.30587130784988403,
"learning_rate": 5.175794394257243e-05,
"loss": 0.0271,
"step": 1219
},
{
"epoch": 2.6034858387799567,
"grad_norm": 0.2603703737258911,
"learning_rate": 5.160648284655032e-05,
"loss": 0.0238,
"step": 1220
},
{
"epoch": 2.605664488017429,
"grad_norm": 0.24872830510139465,
"learning_rate": 5.1455166579396084e-05,
"loss": 0.0248,
"step": 1221
},
{
"epoch": 2.607843137254902,
"grad_norm": 0.33000561594963074,
"learning_rate": 5.1303995593958824e-05,
"loss": 0.0234,
"step": 1222
},
{
"epoch": 2.610021786492375,
"grad_norm": 0.325867623090744,
"learning_rate": 5.115297034265295e-05,
"loss": 0.0266,
"step": 1223
},
{
"epoch": 2.6122004357298474,
"grad_norm": 0.29010075330734253,
"learning_rate": 5.100209127745661e-05,
"loss": 0.0214,
"step": 1224
},
{
"epoch": 2.6143790849673203,
"grad_norm": 0.2876839339733124,
"learning_rate": 5.085135884991067e-05,
"loss": 0.0243,
"step": 1225
},
{
"epoch": 2.616557734204793,
"grad_norm": 0.3246232569217682,
"learning_rate": 5.0700773511116906e-05,
"loss": 0.0262,
"step": 1226
},
{
"epoch": 2.6187363834422657,
"grad_norm": 0.35674700140953064,
"learning_rate": 5.05503357117371e-05,
"loss": 0.0247,
"step": 1227
},
{
"epoch": 2.6209150326797386,
"grad_norm": 0.27732157707214355,
"learning_rate": 5.040004590199128e-05,
"loss": 0.0183,
"step": 1228
},
{
"epoch": 2.6230936819172115,
"grad_norm": 0.3178655803203583,
"learning_rate": 5.024990453165677e-05,
"loss": 0.027,
"step": 1229
},
{
"epoch": 2.625272331154684,
"grad_norm": 0.36049678921699524,
"learning_rate": 5.0099912050066556e-05,
"loss": 0.0245,
"step": 1230
},
{
"epoch": 2.627450980392157,
"grad_norm": 0.38455018401145935,
"learning_rate": 4.99500689061081e-05,
"loss": 0.027,
"step": 1231
},
{
"epoch": 2.6296296296296298,
"grad_norm": 0.28991568088531494,
"learning_rate": 4.9800375548221845e-05,
"loss": 0.0215,
"step": 1232
},
{
"epoch": 2.6318082788671022,
"grad_norm": 0.28535112738609314,
"learning_rate": 4.965083242440008e-05,
"loss": 0.027,
"step": 1233
},
{
"epoch": 2.633986928104575,
"grad_norm": 0.36667653918266296,
"learning_rate": 4.950143998218531e-05,
"loss": 0.027,
"step": 1234
},
{
"epoch": 2.636165577342048,
"grad_norm": 0.2421015053987503,
"learning_rate": 4.935219866866935e-05,
"loss": 0.0233,
"step": 1235
},
{
"epoch": 2.6383442265795205,
"grad_norm": 0.3010506331920624,
"learning_rate": 4.920310893049146e-05,
"loss": 0.0243,
"step": 1236
},
{
"epoch": 2.6405228758169934,
"grad_norm": 0.2629561722278595,
"learning_rate": 4.905417121383749e-05,
"loss": 0.0234,
"step": 1237
},
{
"epoch": 2.6427015250544663,
"grad_norm": 0.2663710117340088,
"learning_rate": 4.89053859644381e-05,
"loss": 0.0207,
"step": 1238
},
{
"epoch": 2.644880174291939,
"grad_norm": 0.28878337144851685,
"learning_rate": 4.875675362756796e-05,
"loss": 0.0251,
"step": 1239
},
{
"epoch": 2.6470588235294117,
"grad_norm": 0.27310487627983093,
"learning_rate": 4.860827464804383e-05,
"loss": 0.0206,
"step": 1240
},
{
"epoch": 2.6492374727668846,
"grad_norm": 0.28431037068367004,
"learning_rate": 4.84599494702237e-05,
"loss": 0.0223,
"step": 1241
},
{
"epoch": 2.651416122004357,
"grad_norm": 0.3771039545536041,
"learning_rate": 4.831177853800511e-05,
"loss": 0.0338,
"step": 1242
},
{
"epoch": 2.65359477124183,
"grad_norm": 0.2601141035556793,
"learning_rate": 4.816376229482413e-05,
"loss": 0.0221,
"step": 1243
},
{
"epoch": 2.655773420479303,
"grad_norm": 0.36854109168052673,
"learning_rate": 4.801590118365383e-05,
"loss": 0.0275,
"step": 1244
},
{
"epoch": 2.6579520697167753,
"grad_norm": 0.26293808221817017,
"learning_rate": 4.7868195647003044e-05,
"loss": 0.0249,
"step": 1245
},
{
"epoch": 2.6601307189542482,
"grad_norm": 0.2990938723087311,
"learning_rate": 4.77206461269149e-05,
"loss": 0.0251,
"step": 1246
},
{
"epoch": 2.662309368191721,
"grad_norm": 0.2481040358543396,
"learning_rate": 4.757325306496579e-05,
"loss": 0.0214,
"step": 1247
},
{
"epoch": 2.664488017429194,
"grad_norm": 0.27054113149642944,
"learning_rate": 4.7426016902263636e-05,
"loss": 0.0247,
"step": 1248
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.31726422905921936,
"learning_rate": 4.72789380794471e-05,
"loss": 0.0256,
"step": 1249
},
{
"epoch": 2.6688453159041394,
"grad_norm": 0.3075810372829437,
"learning_rate": 4.713201703668367e-05,
"loss": 0.0229,
"step": 1250
},
{
"epoch": 2.6710239651416123,
"grad_norm": 0.2827533781528473,
"learning_rate": 4.6985254213668885e-05,
"loss": 0.0294,
"step": 1251
},
{
"epoch": 2.6732026143790852,
"grad_norm": 0.23534324765205383,
"learning_rate": 4.683865004962452e-05,
"loss": 0.0199,
"step": 1252
},
{
"epoch": 2.6753812636165577,
"grad_norm": 0.37100929021835327,
"learning_rate": 4.669220498329784e-05,
"loss": 0.027,
"step": 1253
},
{
"epoch": 2.6775599128540306,
"grad_norm": 0.3112419545650482,
"learning_rate": 4.654591945295969e-05,
"loss": 0.0288,
"step": 1254
},
{
"epoch": 2.6797385620915035,
"grad_norm": 0.2796306610107422,
"learning_rate": 4.639979389640364e-05,
"loss": 0.0225,
"step": 1255
},
{
"epoch": 2.681917211328976,
"grad_norm": 0.2607952654361725,
"learning_rate": 4.6253828750944375e-05,
"loss": 0.0209,
"step": 1256
},
{
"epoch": 2.684095860566449,
"grad_norm": 0.2551371157169342,
"learning_rate": 4.610802445341662e-05,
"loss": 0.0231,
"step": 1257
},
{
"epoch": 2.686274509803922,
"grad_norm": 0.25890782475471497,
"learning_rate": 4.596238144017369e-05,
"loss": 0.0212,
"step": 1258
},
{
"epoch": 2.6884531590413943,
"grad_norm": 0.2630198001861572,
"learning_rate": 4.5816900147086226e-05,
"loss": 0.0187,
"step": 1259
},
{
"epoch": 2.690631808278867,
"grad_norm": 0.25908374786376953,
"learning_rate": 4.567158100954083e-05,
"loss": 0.0217,
"step": 1260
},
{
"epoch": 2.69281045751634,
"grad_norm": 0.2860732972621918,
"learning_rate": 4.55264244624389e-05,
"loss": 0.0244,
"step": 1261
},
{
"epoch": 2.6949891067538125,
"grad_norm": 0.24638508260250092,
"learning_rate": 4.53814309401951e-05,
"loss": 0.0211,
"step": 1262
},
{
"epoch": 2.6971677559912854,
"grad_norm": 0.24620142579078674,
"learning_rate": 4.523660087673647e-05,
"loss": 0.0188,
"step": 1263
},
{
"epoch": 2.6993464052287583,
"grad_norm": 0.321836918592453,
"learning_rate": 4.509193470550056e-05,
"loss": 0.0234,
"step": 1264
},
{
"epoch": 2.701525054466231,
"grad_norm": 0.3046250343322754,
"learning_rate": 4.494743285943466e-05,
"loss": 0.0222,
"step": 1265
},
{
"epoch": 2.701525054466231,
"eval_loss": 0.41452252864837646,
"eval_runtime": 0.9371,
"eval_samples_per_second": 182.487,
"eval_steps_per_second": 13.873,
"step": 1265
},
{
"epoch": 2.7037037037037037,
"grad_norm": 0.2803155779838562,
"learning_rate": 4.4803095770994106e-05,
"loss": 0.0283,
"step": 1266
},
{
"epoch": 2.7058823529411766,
"grad_norm": 0.29396378993988037,
"learning_rate": 4.465892387214129e-05,
"loss": 0.0246,
"step": 1267
},
{
"epoch": 2.708061002178649,
"grad_norm": 0.2330588549375534,
"learning_rate": 4.4514917594344184e-05,
"loss": 0.0189,
"step": 1268
},
{
"epoch": 2.710239651416122,
"grad_norm": 0.26576220989227295,
"learning_rate": 4.437107736857514e-05,
"loss": 0.0208,
"step": 1269
},
{
"epoch": 2.712418300653595,
"grad_norm": 0.27354955673217773,
"learning_rate": 4.422740362530945e-05,
"loss": 0.0256,
"step": 1270
},
{
"epoch": 2.7145969498910674,
"grad_norm": 0.28396570682525635,
"learning_rate": 4.408389679452428e-05,
"loss": 0.0234,
"step": 1271
},
{
"epoch": 2.7167755991285403,
"grad_norm": 0.2573300898075104,
"learning_rate": 4.3940557305697226e-05,
"loss": 0.0221,
"step": 1272
},
{
"epoch": 2.718954248366013,
"grad_norm": 0.3373596668243408,
"learning_rate": 4.379738558780514e-05,
"loss": 0.0286,
"step": 1273
},
{
"epoch": 2.7211328976034856,
"grad_norm": 0.2618468999862671,
"learning_rate": 4.3654382069322644e-05,
"loss": 0.0253,
"step": 1274
},
{
"epoch": 2.7233115468409586,
"grad_norm": 0.2613149881362915,
"learning_rate": 4.351154717822111e-05,
"loss": 0.0233,
"step": 1275
},
{
"epoch": 2.7254901960784315,
"grad_norm": 0.2562900483608246,
"learning_rate": 4.3368881341967135e-05,
"loss": 0.023,
"step": 1276
},
{
"epoch": 2.727668845315904,
"grad_norm": 0.2618182301521301,
"learning_rate": 4.322638498752159e-05,
"loss": 0.0198,
"step": 1277
},
{
"epoch": 2.729847494553377,
"grad_norm": 0.31674104928970337,
"learning_rate": 4.308405854133786e-05,
"loss": 0.0222,
"step": 1278
},
{
"epoch": 2.7320261437908497,
"grad_norm": 0.29369550943374634,
"learning_rate": 4.294190242936107e-05,
"loss": 0.028,
"step": 1279
},
{
"epoch": 2.734204793028322,
"grad_norm": 0.26428335905075073,
"learning_rate": 4.2799917077026394e-05,
"loss": 0.0228,
"step": 1280
},
{
"epoch": 2.736383442265795,
"grad_norm": 0.36297959089279175,
"learning_rate": 4.265810290925811e-05,
"loss": 0.0298,
"step": 1281
},
{
"epoch": 2.738562091503268,
"grad_norm": 0.31978175044059753,
"learning_rate": 4.251646035046814e-05,
"loss": 0.024,
"step": 1282
},
{
"epoch": 2.7407407407407405,
"grad_norm": 0.2728327512741089,
"learning_rate": 4.237498982455483e-05,
"loss": 0.0215,
"step": 1283
},
{
"epoch": 2.7429193899782134,
"grad_norm": 0.36189430952072144,
"learning_rate": 4.223369175490162e-05,
"loss": 0.0275,
"step": 1284
},
{
"epoch": 2.7450980392156863,
"grad_norm": 0.262774258852005,
"learning_rate": 4.209256656437594e-05,
"loss": 0.0217,
"step": 1285
},
{
"epoch": 2.747276688453159,
"grad_norm": 0.2569471299648285,
"learning_rate": 4.195161467532769e-05,
"loss": 0.0215,
"step": 1286
},
{
"epoch": 2.7494553376906317,
"grad_norm": 0.3299883306026459,
"learning_rate": 4.1810836509588345e-05,
"loss": 0.0287,
"step": 1287
},
{
"epoch": 2.7516339869281046,
"grad_norm": 0.2865789830684662,
"learning_rate": 4.167023248846925e-05,
"loss": 0.0249,
"step": 1288
},
{
"epoch": 2.7538126361655775,
"grad_norm": 0.22566261887550354,
"learning_rate": 4.152980303276074e-05,
"loss": 0.021,
"step": 1289
},
{
"epoch": 2.7559912854030504,
"grad_norm": 0.3007291257381439,
"learning_rate": 4.138954856273054e-05,
"loss": 0.024,
"step": 1290
},
{
"epoch": 2.758169934640523,
"grad_norm": 0.281529039144516,
"learning_rate": 4.1249469498122974e-05,
"loss": 0.023,
"step": 1291
},
{
"epoch": 2.7603485838779958,
"grad_norm": 0.3368334472179413,
"learning_rate": 4.110956625815713e-05,
"loss": 0.0288,
"step": 1292
},
{
"epoch": 2.7625272331154687,
"grad_norm": 0.28486526012420654,
"learning_rate": 4.0969839261526114e-05,
"loss": 0.0207,
"step": 1293
},
{
"epoch": 2.764705882352941,
"grad_norm": 0.3560362160205841,
"learning_rate": 4.083028892639541e-05,
"loss": 0.0265,
"step": 1294
},
{
"epoch": 2.766884531590414,
"grad_norm": 0.2939550280570984,
"learning_rate": 4.069091567040194e-05,
"loss": 0.0239,
"step": 1295
},
{
"epoch": 2.769063180827887,
"grad_norm": 0.27641457319259644,
"learning_rate": 4.055171991065262e-05,
"loss": 0.0243,
"step": 1296
},
{
"epoch": 2.7712418300653594,
"grad_norm": 0.2875332534313202,
"learning_rate": 4.0412702063723196e-05,
"loss": 0.0233,
"step": 1297
},
{
"epoch": 2.7734204793028323,
"grad_norm": 0.3004039227962494,
"learning_rate": 4.027386254565688e-05,
"loss": 0.0232,
"step": 1298
},
{
"epoch": 2.775599128540305,
"grad_norm": 0.3261055648326874,
"learning_rate": 4.0135201771963335e-05,
"loss": 0.0249,
"step": 1299
},
{
"epoch": 2.7777777777777777,
"grad_norm": 0.2898596227169037,
"learning_rate": 3.9996720157617094e-05,
"loss": 0.0267,
"step": 1300
},
{
"epoch": 2.7799564270152506,
"grad_norm": 0.2966534495353699,
"learning_rate": 3.985841811705678e-05,
"loss": 0.0206,
"step": 1301
},
{
"epoch": 2.7821350762527235,
"grad_norm": 0.23390159010887146,
"learning_rate": 3.972029606418335e-05,
"loss": 0.0204,
"step": 1302
},
{
"epoch": 2.784313725490196,
"grad_norm": 0.2751654088497162,
"learning_rate": 3.958235441235926e-05,
"loss": 0.0251,
"step": 1303
},
{
"epoch": 2.786492374727669,
"grad_norm": 0.2754038870334625,
"learning_rate": 3.9444593574406915e-05,
"loss": 0.0206,
"step": 1304
},
{
"epoch": 2.7886710239651418,
"grad_norm": 0.23519861698150635,
"learning_rate": 3.9307013962607866e-05,
"loss": 0.0204,
"step": 1305
},
{
"epoch": 2.7908496732026142,
"grad_norm": 0.2893458306789398,
"learning_rate": 3.9169615988701e-05,
"loss": 0.0275,
"step": 1306
},
{
"epoch": 2.793028322440087,
"grad_norm": 0.2331874668598175,
"learning_rate": 3.903240006388184e-05,
"loss": 0.0233,
"step": 1307
},
{
"epoch": 2.79520697167756,
"grad_norm": 0.299674391746521,
"learning_rate": 3.8895366598800896e-05,
"loss": 0.0221,
"step": 1308
},
{
"epoch": 2.7973856209150325,
"grad_norm": 0.3127885162830353,
"learning_rate": 3.875851600356277e-05,
"loss": 0.0269,
"step": 1309
},
{
"epoch": 2.7995642701525054,
"grad_norm": 0.2942737638950348,
"learning_rate": 3.862184868772473e-05,
"loss": 0.0243,
"step": 1310
},
{
"epoch": 2.8017429193899783,
"grad_norm": 0.2755415141582489,
"learning_rate": 3.8485365060295566e-05,
"loss": 0.0218,
"step": 1311
},
{
"epoch": 2.803921568627451,
"grad_norm": 0.23431426286697388,
"learning_rate": 3.834906552973424e-05,
"loss": 0.0202,
"step": 1312
},
{
"epoch": 2.8061002178649237,
"grad_norm": 0.2319639027118683,
"learning_rate": 3.821295050394892e-05,
"loss": 0.0186,
"step": 1313
},
{
"epoch": 2.8082788671023966,
"grad_norm": 0.24743026494979858,
"learning_rate": 3.807702039029539e-05,
"loss": 0.0224,
"step": 1314
},
{
"epoch": 2.810457516339869,
"grad_norm": 0.25894078612327576,
"learning_rate": 3.794127559557632e-05,
"loss": 0.0233,
"step": 1315
},
{
"epoch": 2.812636165577342,
"grad_norm": 0.26663923263549805,
"learning_rate": 3.780571652603949e-05,
"loss": 0.0221,
"step": 1316
},
{
"epoch": 2.814814814814815,
"grad_norm": 0.2803906500339508,
"learning_rate": 3.767034358737708e-05,
"loss": 0.0203,
"step": 1317
},
{
"epoch": 2.8169934640522873,
"grad_norm": 0.3296569287776947,
"learning_rate": 3.753515718472402e-05,
"loss": 0.0276,
"step": 1318
},
{
"epoch": 2.8191721132897603,
"grad_norm": 0.2767476439476013,
"learning_rate": 3.740015772265716e-05,
"loss": 0.0237,
"step": 1319
},
{
"epoch": 2.821350762527233,
"grad_norm": 0.2812361717224121,
"learning_rate": 3.726534560519381e-05,
"loss": 0.0215,
"step": 1320
},
{
"epoch": 2.8235294117647056,
"grad_norm": 0.22307142615318298,
"learning_rate": 3.713072123579065e-05,
"loss": 0.0214,
"step": 1321
},
{
"epoch": 2.8257080610021785,
"grad_norm": 0.2116055190563202,
"learning_rate": 3.6996285017342406e-05,
"loss": 0.0158,
"step": 1322
},
{
"epoch": 2.8278867102396514,
"grad_norm": 0.30326512455940247,
"learning_rate": 3.686203735218078e-05,
"loss": 0.0223,
"step": 1323
},
{
"epoch": 2.8300653594771243,
"grad_norm": 0.2670634388923645,
"learning_rate": 3.672797864207316e-05,
"loss": 0.0228,
"step": 1324
},
{
"epoch": 2.832244008714597,
"grad_norm": 0.263776034116745,
"learning_rate": 3.65941092882215e-05,
"loss": 0.0213,
"step": 1325
},
{
"epoch": 2.8344226579520697,
"grad_norm": 0.28281012177467346,
"learning_rate": 3.646042969126093e-05,
"loss": 0.0208,
"step": 1326
},
{
"epoch": 2.8366013071895426,
"grad_norm": 0.252146452665329,
"learning_rate": 3.632694025125885e-05,
"loss": 0.019,
"step": 1327
},
{
"epoch": 2.8387799564270155,
"grad_norm": 0.25043606758117676,
"learning_rate": 3.619364136771337e-05,
"loss": 0.0209,
"step": 1328
},
{
"epoch": 2.840958605664488,
"grad_norm": 0.2979956865310669,
"learning_rate": 3.6060533439552605e-05,
"loss": 0.0236,
"step": 1329
},
{
"epoch": 2.843137254901961,
"grad_norm": 0.22742527723312378,
"learning_rate": 3.5927616865132884e-05,
"loss": 0.0166,
"step": 1330
},
{
"epoch": 2.845315904139434,
"grad_norm": 0.24627971649169922,
"learning_rate": 3.579489204223808e-05,
"loss": 0.0211,
"step": 1331
},
{
"epoch": 2.8474945533769063,
"grad_norm": 0.2819809019565582,
"learning_rate": 3.566235936807808e-05,
"loss": 0.0195,
"step": 1332
},
{
"epoch": 2.849673202614379,
"grad_norm": 0.24710224568843842,
"learning_rate": 3.5530019239287796e-05,
"loss": 0.0224,
"step": 1333
},
{
"epoch": 2.851851851851852,
"grad_norm": 0.28278109431266785,
"learning_rate": 3.539787205192586e-05,
"loss": 0.0232,
"step": 1334
},
{
"epoch": 2.8540305010893245,
"grad_norm": 0.24668852984905243,
"learning_rate": 3.526591820147355e-05,
"loss": 0.0209,
"step": 1335
},
{
"epoch": 2.8562091503267975,
"grad_norm": 0.3158888816833496,
"learning_rate": 3.513415808283341e-05,
"loss": 0.0236,
"step": 1336
},
{
"epoch": 2.8583877995642704,
"grad_norm": 0.2513194680213928,
"learning_rate": 3.500259209032831e-05,
"loss": 0.0248,
"step": 1337
},
{
"epoch": 2.860566448801743,
"grad_norm": 0.2679371237754822,
"learning_rate": 3.4871220617700126e-05,
"loss": 0.0255,
"step": 1338
},
{
"epoch": 2.8627450980392157,
"grad_norm": 0.27874305844306946,
"learning_rate": 3.47400440581086e-05,
"loss": 0.0235,
"step": 1339
},
{
"epoch": 2.8649237472766886,
"grad_norm": 0.28599897027015686,
"learning_rate": 3.460906280413007e-05,
"loss": 0.0218,
"step": 1340
},
{
"epoch": 2.867102396514161,
"grad_norm": 0.2900089621543884,
"learning_rate": 3.447827724775649e-05,
"loss": 0.0226,
"step": 1341
},
{
"epoch": 2.869281045751634,
"grad_norm": 0.3410820960998535,
"learning_rate": 3.4347687780394e-05,
"loss": 0.0292,
"step": 1342
},
{
"epoch": 2.871459694989107,
"grad_norm": 0.25538358092308044,
"learning_rate": 3.4217294792862106e-05,
"loss": 0.0191,
"step": 1343
},
{
"epoch": 2.8736383442265794,
"grad_norm": 0.2789970636367798,
"learning_rate": 3.4087098675392104e-05,
"loss": 0.0218,
"step": 1344
},
{
"epoch": 2.8758169934640523,
"grad_norm": 0.319162517786026,
"learning_rate": 3.3957099817626245e-05,
"loss": 0.0228,
"step": 1345
},
{
"epoch": 2.877995642701525,
"grad_norm": 0.32539108395576477,
"learning_rate": 3.382729860861632e-05,
"loss": 0.026,
"step": 1346
},
{
"epoch": 2.8801742919389977,
"grad_norm": 0.33139485120773315,
"learning_rate": 3.36976954368227e-05,
"loss": 0.0226,
"step": 1347
},
{
"epoch": 2.8823529411764706,
"grad_norm": 0.2609541714191437,
"learning_rate": 3.3568290690113034e-05,
"loss": 0.0218,
"step": 1348
},
{
"epoch": 2.8845315904139435,
"grad_norm": 0.2965582311153412,
"learning_rate": 3.3439084755761216e-05,
"loss": 0.0212,
"step": 1349
},
{
"epoch": 2.886710239651416,
"grad_norm": 0.325886070728302,
"learning_rate": 3.331007802044601e-05,
"loss": 0.0243,
"step": 1350
},
{
"epoch": 2.888888888888889,
"grad_norm": 0.3131248652935028,
"learning_rate": 3.3181270870250146e-05,
"loss": 0.0221,
"step": 1351
},
{
"epoch": 2.8910675381263617,
"grad_norm": 0.2677004039287567,
"learning_rate": 3.305266369065901e-05,
"loss": 0.0177,
"step": 1352
},
{
"epoch": 2.893246187363834,
"grad_norm": 0.23544171452522278,
"learning_rate": 3.292425686655957e-05,
"loss": 0.0191,
"step": 1353
},
{
"epoch": 2.895424836601307,
"grad_norm": 0.24077573418617249,
"learning_rate": 3.279605078223906e-05,
"loss": 0.0206,
"step": 1354
},
{
"epoch": 2.89760348583878,
"grad_norm": 0.23531267046928406,
"learning_rate": 3.266804582138409e-05,
"loss": 0.02,
"step": 1355
},
{
"epoch": 2.8997821350762525,
"grad_norm": 0.3007153570652008,
"learning_rate": 3.25402423670793e-05,
"loss": 0.0252,
"step": 1356
},
{
"epoch": 2.9019607843137254,
"grad_norm": 0.2383011132478714,
"learning_rate": 3.2412640801806326e-05,
"loss": 0.0194,
"step": 1357
},
{
"epoch": 2.9041394335511983,
"grad_norm": 0.2859378159046173,
"learning_rate": 3.228524150744249e-05,
"loss": 0.02,
"step": 1358
},
{
"epoch": 2.9063180827886708,
"grad_norm": 0.23625333607196808,
"learning_rate": 3.2158044865259903e-05,
"loss": 0.0197,
"step": 1359
},
{
"epoch": 2.9084967320261437,
"grad_norm": 0.2601231634616852,
"learning_rate": 3.2031051255924085e-05,
"loss": 0.0198,
"step": 1360
},
{
"epoch": 2.9106753812636166,
"grad_norm": 0.23496107757091522,
"learning_rate": 3.1904261059493004e-05,
"loss": 0.0198,
"step": 1361
},
{
"epoch": 2.9128540305010895,
"grad_norm": 0.24590462446212769,
"learning_rate": 3.1777674655415834e-05,
"loss": 0.0197,
"step": 1362
},
{
"epoch": 2.915032679738562,
"grad_norm": 0.2683832347393036,
"learning_rate": 3.165129242253191e-05,
"loss": 0.0211,
"step": 1363
},
{
"epoch": 2.917211328976035,
"grad_norm": 0.22841046750545502,
"learning_rate": 3.1525114739069415e-05,
"loss": 0.0191,
"step": 1364
},
{
"epoch": 2.9193899782135078,
"grad_norm": 0.22903601825237274,
"learning_rate": 3.1399141982644495e-05,
"loss": 0.0196,
"step": 1365
},
{
"epoch": 2.9215686274509802,
"grad_norm": 0.24044066667556763,
"learning_rate": 3.127337453025994e-05,
"loss": 0.0198,
"step": 1366
},
{
"epoch": 2.923747276688453,
"grad_norm": 0.2484930455684662,
"learning_rate": 3.1147812758304165e-05,
"loss": 0.0196,
"step": 1367
},
{
"epoch": 2.925925925925926,
"grad_norm": 0.25770458579063416,
"learning_rate": 3.102245704254995e-05,
"loss": 0.0217,
"step": 1368
},
{
"epoch": 2.928104575163399,
"grad_norm": 0.32166433334350586,
"learning_rate": 3.089730775815348e-05,
"loss": 0.0274,
"step": 1369
},
{
"epoch": 2.9302832244008714,
"grad_norm": 0.24577151238918304,
"learning_rate": 3.077236527965318e-05,
"loss": 0.0221,
"step": 1370
},
{
"epoch": 2.9324618736383443,
"grad_norm": 0.2804732024669647,
"learning_rate": 3.064762998096844e-05,
"loss": 0.0232,
"step": 1371
},
{
"epoch": 2.9346405228758172,
"grad_norm": 0.2377091497182846,
"learning_rate": 3.0523102235398714e-05,
"loss": 0.0213,
"step": 1372
},
{
"epoch": 2.9368191721132897,
"grad_norm": 0.2534312605857849,
"learning_rate": 3.0398782415622264e-05,
"loss": 0.0211,
"step": 1373
},
{
"epoch": 2.9389978213507626,
"grad_norm": 0.2798876464366913,
"learning_rate": 3.0274670893695147e-05,
"loss": 0.022,
"step": 1374
},
{
"epoch": 2.9411764705882355,
"grad_norm": 0.21441318094730377,
"learning_rate": 3.0150768041049905e-05,
"loss": 0.0188,
"step": 1375
},
{
"epoch": 2.943355119825708,
"grad_norm": 0.2599216401576996,
"learning_rate": 3.002707422849472e-05,
"loss": 0.0196,
"step": 1376
},
{
"epoch": 2.945533769063181,
"grad_norm": 0.24009159207344055,
"learning_rate": 2.9903589826212165e-05,
"loss": 0.0206,
"step": 1377
}
],
"logging_steps": 1,
"max_steps": 1836,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 459,
"total_flos": 3.329359091859456e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}