esunn's picture
Upload folder using huggingface_hub
bbddc40 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 115,
"global_step": 459,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002178649237472767,
"grad_norm": 0.4049851596355438,
"learning_rate": 1e-05,
"loss": 1.9628,
"step": 1
},
{
"epoch": 0.002178649237472767,
"eval_loss": 1.915004849433899,
"eval_runtime": 0.9396,
"eval_samples_per_second": 181.996,
"eval_steps_per_second": 13.836,
"step": 1
},
{
"epoch": 0.004357298474945534,
"grad_norm": 0.38593971729278564,
"learning_rate": 2e-05,
"loss": 2.0388,
"step": 2
},
{
"epoch": 0.006535947712418301,
"grad_norm": 0.38570573925971985,
"learning_rate": 3e-05,
"loss": 1.932,
"step": 3
},
{
"epoch": 0.008714596949891068,
"grad_norm": 0.35365748405456543,
"learning_rate": 4e-05,
"loss": 1.866,
"step": 4
},
{
"epoch": 0.010893246187363835,
"grad_norm": 0.4407881796360016,
"learning_rate": 5e-05,
"loss": 1.9959,
"step": 5
},
{
"epoch": 0.013071895424836602,
"grad_norm": 0.3517284095287323,
"learning_rate": 6e-05,
"loss": 1.8822,
"step": 6
},
{
"epoch": 0.015250544662309368,
"grad_norm": 0.45338165760040283,
"learning_rate": 7e-05,
"loss": 1.8933,
"step": 7
},
{
"epoch": 0.017429193899782137,
"grad_norm": 0.3475826680660248,
"learning_rate": 8e-05,
"loss": 1.8337,
"step": 8
},
{
"epoch": 0.0196078431372549,
"grad_norm": 0.4348187744617462,
"learning_rate": 9e-05,
"loss": 1.9261,
"step": 9
},
{
"epoch": 0.02178649237472767,
"grad_norm": 0.3879510164260864,
"learning_rate": 0.0001,
"loss": 1.7648,
"step": 10
},
{
"epoch": 0.023965141612200435,
"grad_norm": 0.3244905471801758,
"learning_rate": 0.00011000000000000002,
"loss": 1.7833,
"step": 11
},
{
"epoch": 0.026143790849673203,
"grad_norm": 0.3566845953464508,
"learning_rate": 0.00012,
"loss": 1.786,
"step": 12
},
{
"epoch": 0.02832244008714597,
"grad_norm": 0.35482919216156006,
"learning_rate": 0.00013000000000000002,
"loss": 1.6593,
"step": 13
},
{
"epoch": 0.030501089324618737,
"grad_norm": 0.3647037744522095,
"learning_rate": 0.00014,
"loss": 1.696,
"step": 14
},
{
"epoch": 0.032679738562091505,
"grad_norm": 0.41642501950263977,
"learning_rate": 0.00015000000000000001,
"loss": 1.5964,
"step": 15
},
{
"epoch": 0.034858387799564274,
"grad_norm": 0.3616989254951477,
"learning_rate": 0.00016,
"loss": 1.7121,
"step": 16
},
{
"epoch": 0.037037037037037035,
"grad_norm": 0.39412033557891846,
"learning_rate": 0.00017,
"loss": 1.7392,
"step": 17
},
{
"epoch": 0.0392156862745098,
"grad_norm": 0.3788229823112488,
"learning_rate": 0.00018,
"loss": 1.793,
"step": 18
},
{
"epoch": 0.04139433551198257,
"grad_norm": 0.3376384377479553,
"learning_rate": 0.00019,
"loss": 1.6145,
"step": 19
},
{
"epoch": 0.04357298474945534,
"grad_norm": 0.3674631714820862,
"learning_rate": 0.0002,
"loss": 1.6802,
"step": 20
},
{
"epoch": 0.0457516339869281,
"grad_norm": 0.406034916639328,
"learning_rate": 0.00019999985036335823,
"loss": 1.7272,
"step": 21
},
{
"epoch": 0.04793028322440087,
"grad_norm": 0.34544336795806885,
"learning_rate": 0.00019999940145388063,
"loss": 1.592,
"step": 22
},
{
"epoch": 0.05010893246187364,
"grad_norm": 0.4000962972640991,
"learning_rate": 0.00019999865327291073,
"loss": 1.6177,
"step": 23
},
{
"epoch": 0.05228758169934641,
"grad_norm": 0.36951783299446106,
"learning_rate": 0.00019999760582268763,
"loss": 1.6261,
"step": 24
},
{
"epoch": 0.054466230936819175,
"grad_norm": 0.3870888948440552,
"learning_rate": 0.00019999625910634605,
"loss": 1.544,
"step": 25
},
{
"epoch": 0.05664488017429194,
"grad_norm": 0.4127906560897827,
"learning_rate": 0.00019999461312791638,
"loss": 1.5375,
"step": 26
},
{
"epoch": 0.058823529411764705,
"grad_norm": 0.43752938508987427,
"learning_rate": 0.00019999266789232455,
"loss": 1.4055,
"step": 27
},
{
"epoch": 0.06100217864923747,
"grad_norm": 0.44983282685279846,
"learning_rate": 0.0001999904234053922,
"loss": 1.5742,
"step": 28
},
{
"epoch": 0.06318082788671024,
"grad_norm": 0.4332844614982605,
"learning_rate": 0.0001999878796738364,
"loss": 1.5264,
"step": 29
},
{
"epoch": 0.06535947712418301,
"grad_norm": 0.4228737950325012,
"learning_rate": 0.00019998503670526994,
"loss": 1.4985,
"step": 30
},
{
"epoch": 0.06753812636165578,
"grad_norm": 0.4225306808948517,
"learning_rate": 0.000199981894508201,
"loss": 1.447,
"step": 31
},
{
"epoch": 0.06971677559912855,
"grad_norm": 0.5055080056190491,
"learning_rate": 0.00019997845309203334,
"loss": 1.4575,
"step": 32
},
{
"epoch": 0.0718954248366013,
"grad_norm": 0.4757756292819977,
"learning_rate": 0.0001999747124670662,
"loss": 1.3472,
"step": 33
},
{
"epoch": 0.07407407407407407,
"grad_norm": 0.4340977966785431,
"learning_rate": 0.00019997067264449433,
"loss": 1.3273,
"step": 34
},
{
"epoch": 0.07625272331154684,
"grad_norm": 0.4556865692138672,
"learning_rate": 0.0001999663336364078,
"loss": 1.43,
"step": 35
},
{
"epoch": 0.0784313725490196,
"grad_norm": 0.5178071856498718,
"learning_rate": 0.00019996169545579207,
"loss": 1.3286,
"step": 36
},
{
"epoch": 0.08061002178649238,
"grad_norm": 0.5154844522476196,
"learning_rate": 0.00019995675811652802,
"loss": 1.2845,
"step": 37
},
{
"epoch": 0.08278867102396514,
"grad_norm": 0.5944285988807678,
"learning_rate": 0.00019995152163339178,
"loss": 1.4411,
"step": 38
},
{
"epoch": 0.08496732026143791,
"grad_norm": 0.5691947340965271,
"learning_rate": 0.00019994598602205473,
"loss": 1.3807,
"step": 39
},
{
"epoch": 0.08714596949891068,
"grad_norm": 0.575366199016571,
"learning_rate": 0.00019994015129908346,
"loss": 1.3347,
"step": 40
},
{
"epoch": 0.08932461873638345,
"grad_norm": 0.5233891010284424,
"learning_rate": 0.00019993401748193978,
"loss": 1.303,
"step": 41
},
{
"epoch": 0.0915032679738562,
"grad_norm": 0.5643051862716675,
"learning_rate": 0.00019992758458898055,
"loss": 1.2618,
"step": 42
},
{
"epoch": 0.09368191721132897,
"grad_norm": 0.6836549043655396,
"learning_rate": 0.0001999208526394577,
"loss": 1.3218,
"step": 43
},
{
"epoch": 0.09586056644880174,
"grad_norm": 0.6471132040023804,
"learning_rate": 0.00019991382165351814,
"loss": 1.1933,
"step": 44
},
{
"epoch": 0.09803921568627451,
"grad_norm": 0.5644765496253967,
"learning_rate": 0.00019990649165220375,
"loss": 1.1135,
"step": 45
},
{
"epoch": 0.10021786492374728,
"grad_norm": 0.7101904153823853,
"learning_rate": 0.00019989886265745128,
"loss": 1.1919,
"step": 46
},
{
"epoch": 0.10239651416122005,
"grad_norm": 0.706234872341156,
"learning_rate": 0.00019989093469209224,
"loss": 1.1607,
"step": 47
},
{
"epoch": 0.10457516339869281,
"grad_norm": 0.6854044795036316,
"learning_rate": 0.00019988270777985292,
"loss": 1.1441,
"step": 48
},
{
"epoch": 0.10675381263616558,
"grad_norm": 0.6608173251152039,
"learning_rate": 0.00019987418194535427,
"loss": 1.0626,
"step": 49
},
{
"epoch": 0.10893246187363835,
"grad_norm": 0.7540091276168823,
"learning_rate": 0.00019986535721411186,
"loss": 1.1346,
"step": 50
},
{
"epoch": 0.1111111111111111,
"grad_norm": 0.783423125743866,
"learning_rate": 0.00019985623361253572,
"loss": 1.2105,
"step": 51
},
{
"epoch": 0.11328976034858387,
"grad_norm": 0.7029076814651489,
"learning_rate": 0.00019984681116793038,
"loss": 0.9689,
"step": 52
},
{
"epoch": 0.11546840958605664,
"grad_norm": 0.8416129350662231,
"learning_rate": 0.00019983708990849468,
"loss": 1.1176,
"step": 53
},
{
"epoch": 0.11764705882352941,
"grad_norm": 0.7312731146812439,
"learning_rate": 0.00019982706986332175,
"loss": 1.1695,
"step": 54
},
{
"epoch": 0.11982570806100218,
"grad_norm": 0.8590166568756104,
"learning_rate": 0.00019981675106239895,
"loss": 1.016,
"step": 55
},
{
"epoch": 0.12200435729847495,
"grad_norm": 0.8634907603263855,
"learning_rate": 0.00019980613353660763,
"loss": 1.0777,
"step": 56
},
{
"epoch": 0.12418300653594772,
"grad_norm": 0.6608163714408875,
"learning_rate": 0.00019979521731772323,
"loss": 0.9661,
"step": 57
},
{
"epoch": 0.12636165577342048,
"grad_norm": 0.9486388564109802,
"learning_rate": 0.00019978400243841508,
"loss": 0.8715,
"step": 58
},
{
"epoch": 0.12854030501089325,
"grad_norm": 0.8431762456893921,
"learning_rate": 0.00019977248893224636,
"loss": 1.0458,
"step": 59
},
{
"epoch": 0.13071895424836602,
"grad_norm": 1.00847589969635,
"learning_rate": 0.00019976067683367385,
"loss": 0.9081,
"step": 60
},
{
"epoch": 0.1328976034858388,
"grad_norm": 1.3647116422653198,
"learning_rate": 0.00019974856617804807,
"loss": 1.0181,
"step": 61
},
{
"epoch": 0.13507625272331156,
"grad_norm": 1.2597001791000366,
"learning_rate": 0.0001997361570016129,
"loss": 0.9373,
"step": 62
},
{
"epoch": 0.13725490196078433,
"grad_norm": 1.238145351409912,
"learning_rate": 0.00019972344934150577,
"loss": 0.9464,
"step": 63
},
{
"epoch": 0.1394335511982571,
"grad_norm": 1.114610195159912,
"learning_rate": 0.00019971044323575728,
"loss": 0.9163,
"step": 64
},
{
"epoch": 0.14161220043572983,
"grad_norm": 0.9760491847991943,
"learning_rate": 0.0001996971387232912,
"loss": 0.8424,
"step": 65
},
{
"epoch": 0.1437908496732026,
"grad_norm": 0.999609649181366,
"learning_rate": 0.0001996835358439244,
"loss": 0.8027,
"step": 66
},
{
"epoch": 0.14596949891067537,
"grad_norm": 0.9615645408630371,
"learning_rate": 0.00019966963463836668,
"loss": 0.9491,
"step": 67
},
{
"epoch": 0.14814814814814814,
"grad_norm": 0.9067331552505493,
"learning_rate": 0.00019965543514822062,
"loss": 0.9756,
"step": 68
},
{
"epoch": 0.1503267973856209,
"grad_norm": 1.0316940546035767,
"learning_rate": 0.00019964093741598152,
"loss": 0.7276,
"step": 69
},
{
"epoch": 0.15250544662309368,
"grad_norm": 0.7774396538734436,
"learning_rate": 0.00019962614148503718,
"loss": 0.8904,
"step": 70
},
{
"epoch": 0.15468409586056645,
"grad_norm": 1.0500309467315674,
"learning_rate": 0.0001996110473996679,
"loss": 0.8801,
"step": 71
},
{
"epoch": 0.1568627450980392,
"grad_norm": 0.8712791800498962,
"learning_rate": 0.00019959565520504623,
"loss": 0.992,
"step": 72
},
{
"epoch": 0.15904139433551198,
"grad_norm": 1.006437063217163,
"learning_rate": 0.0001995799649472369,
"loss": 0.6761,
"step": 73
},
{
"epoch": 0.16122004357298475,
"grad_norm": 1.0199809074401855,
"learning_rate": 0.00019956397667319668,
"loss": 0.7066,
"step": 74
},
{
"epoch": 0.16339869281045752,
"grad_norm": 1.2605611085891724,
"learning_rate": 0.0001995476904307742,
"loss": 0.6546,
"step": 75
},
{
"epoch": 0.1655773420479303,
"grad_norm": 0.9553707242012024,
"learning_rate": 0.00019953110626870979,
"loss": 0.9392,
"step": 76
},
{
"epoch": 0.16775599128540306,
"grad_norm": 0.909253716468811,
"learning_rate": 0.00019951422423663547,
"loss": 0.8757,
"step": 77
},
{
"epoch": 0.16993464052287582,
"grad_norm": 1.007814645767212,
"learning_rate": 0.00019949704438507459,
"loss": 0.877,
"step": 78
},
{
"epoch": 0.1721132897603486,
"grad_norm": 1.341426968574524,
"learning_rate": 0.00019947956676544192,
"loss": 0.8002,
"step": 79
},
{
"epoch": 0.17429193899782136,
"grad_norm": 1.153745174407959,
"learning_rate": 0.00019946179143004325,
"loss": 0.714,
"step": 80
},
{
"epoch": 0.17647058823529413,
"grad_norm": 1.0699673891067505,
"learning_rate": 0.00019944371843207546,
"loss": 0.9575,
"step": 81
},
{
"epoch": 0.1786492374727669,
"grad_norm": 0.9054269194602966,
"learning_rate": 0.0001994253478256262,
"loss": 0.8967,
"step": 82
},
{
"epoch": 0.18082788671023964,
"grad_norm": 1.3790533542633057,
"learning_rate": 0.0001994066796656737,
"loss": 1.0535,
"step": 83
},
{
"epoch": 0.1830065359477124,
"grad_norm": 1.1256961822509766,
"learning_rate": 0.0001993877140080869,
"loss": 0.7872,
"step": 84
},
{
"epoch": 0.18518518518518517,
"grad_norm": 0.8870573043823242,
"learning_rate": 0.0001993684509096249,
"loss": 0.9137,
"step": 85
},
{
"epoch": 0.18736383442265794,
"grad_norm": 1.1747201681137085,
"learning_rate": 0.000199348890427937,
"loss": 0.779,
"step": 86
},
{
"epoch": 0.1895424836601307,
"grad_norm": 0.8280813694000244,
"learning_rate": 0.00019932903262156245,
"loss": 0.8289,
"step": 87
},
{
"epoch": 0.19172113289760348,
"grad_norm": 0.984609842300415,
"learning_rate": 0.00019930887754993044,
"loss": 0.8238,
"step": 88
},
{
"epoch": 0.19389978213507625,
"grad_norm": 1.030261516571045,
"learning_rate": 0.00019928842527335968,
"loss": 0.7061,
"step": 89
},
{
"epoch": 0.19607843137254902,
"grad_norm": 0.8822032809257507,
"learning_rate": 0.00019926767585305835,
"loss": 0.8622,
"step": 90
},
{
"epoch": 0.19825708061002179,
"grad_norm": 0.996427059173584,
"learning_rate": 0.00019924662935112393,
"loss": 0.5348,
"step": 91
},
{
"epoch": 0.20043572984749455,
"grad_norm": 1.0308480262756348,
"learning_rate": 0.000199225285830543,
"loss": 0.8325,
"step": 92
},
{
"epoch": 0.20261437908496732,
"grad_norm": 0.8959431648254395,
"learning_rate": 0.000199203645355191,
"loss": 0.6136,
"step": 93
},
{
"epoch": 0.2047930283224401,
"grad_norm": 0.8773916363716125,
"learning_rate": 0.00019918170798983211,
"loss": 0.577,
"step": 94
},
{
"epoch": 0.20697167755991286,
"grad_norm": 1.091194748878479,
"learning_rate": 0.00019915947380011898,
"loss": 0.7751,
"step": 95
},
{
"epoch": 0.20915032679738563,
"grad_norm": 0.8473864197731018,
"learning_rate": 0.00019913694285259256,
"loss": 0.5831,
"step": 96
},
{
"epoch": 0.2113289760348584,
"grad_norm": 0.801262378692627,
"learning_rate": 0.00019911411521468205,
"loss": 0.6089,
"step": 97
},
{
"epoch": 0.21350762527233116,
"grad_norm": 0.9437965154647827,
"learning_rate": 0.00019909099095470444,
"loss": 0.7343,
"step": 98
},
{
"epoch": 0.21568627450980393,
"grad_norm": 1.1255544424057007,
"learning_rate": 0.00019906757014186442,
"loss": 0.6728,
"step": 99
},
{
"epoch": 0.2178649237472767,
"grad_norm": 0.930216372013092,
"learning_rate": 0.00019904385284625424,
"loss": 0.5675,
"step": 100
},
{
"epoch": 0.22004357298474944,
"grad_norm": 0.8021939396858215,
"learning_rate": 0.00019901983913885344,
"loss": 0.4423,
"step": 101
},
{
"epoch": 0.2222222222222222,
"grad_norm": 1.5028183460235596,
"learning_rate": 0.00019899552909152866,
"loss": 0.9797,
"step": 102
},
{
"epoch": 0.22440087145969498,
"grad_norm": 0.7115923762321472,
"learning_rate": 0.00019897092277703333,
"loss": 0.4128,
"step": 103
},
{
"epoch": 0.22657952069716775,
"grad_norm": 0.9592722058296204,
"learning_rate": 0.00019894602026900758,
"loss": 0.8714,
"step": 104
},
{
"epoch": 0.22875816993464052,
"grad_norm": 0.8745520114898682,
"learning_rate": 0.000198920821641978,
"loss": 0.5991,
"step": 105
},
{
"epoch": 0.23093681917211328,
"grad_norm": 0.7649117708206177,
"learning_rate": 0.00019889532697135734,
"loss": 0.5501,
"step": 106
},
{
"epoch": 0.23311546840958605,
"grad_norm": 1.1097913980484009,
"learning_rate": 0.0001988695363334443,
"loss": 0.5863,
"step": 107
},
{
"epoch": 0.23529411764705882,
"grad_norm": 0.9224969148635864,
"learning_rate": 0.00019884344980542338,
"loss": 0.5883,
"step": 108
},
{
"epoch": 0.2374727668845316,
"grad_norm": 0.7770025134086609,
"learning_rate": 0.00019881706746536462,
"loss": 0.8375,
"step": 109
},
{
"epoch": 0.23965141612200436,
"grad_norm": 0.8830885887145996,
"learning_rate": 0.00019879038939222329,
"loss": 0.6464,
"step": 110
},
{
"epoch": 0.24183006535947713,
"grad_norm": 0.8932918310165405,
"learning_rate": 0.00019876341566583977,
"loss": 0.4851,
"step": 111
},
{
"epoch": 0.2440087145969499,
"grad_norm": 0.8250621557235718,
"learning_rate": 0.0001987361463669392,
"loss": 0.5658,
"step": 112
},
{
"epoch": 0.24618736383442266,
"grad_norm": 0.9288647174835205,
"learning_rate": 0.00019870858157713123,
"loss": 0.5441,
"step": 113
},
{
"epoch": 0.24836601307189543,
"grad_norm": 0.8258922100067139,
"learning_rate": 0.00019868072137891002,
"loss": 0.764,
"step": 114
},
{
"epoch": 0.25054466230936817,
"grad_norm": 0.8087350726127625,
"learning_rate": 0.00019865256585565363,
"loss": 0.5816,
"step": 115
},
{
"epoch": 0.25054466230936817,
"eval_loss": 0.6156808733940125,
"eval_runtime": 0.937,
"eval_samples_per_second": 182.501,
"eval_steps_per_second": 13.874,
"step": 115
},
{
"epoch": 0.25272331154684097,
"grad_norm": 1.0198040008544922,
"learning_rate": 0.00019862411509162406,
"loss": 0.9471,
"step": 116
},
{
"epoch": 0.2549019607843137,
"grad_norm": 0.8376523852348328,
"learning_rate": 0.00019859536917196687,
"loss": 0.6166,
"step": 117
},
{
"epoch": 0.2570806100217865,
"grad_norm": 0.8766109347343445,
"learning_rate": 0.0001985663281827108,
"loss": 0.6737,
"step": 118
},
{
"epoch": 0.25925925925925924,
"grad_norm": 1.072192668914795,
"learning_rate": 0.00019853699221076792,
"loss": 0.6403,
"step": 119
},
{
"epoch": 0.26143790849673204,
"grad_norm": 0.8205565214157104,
"learning_rate": 0.00019850736134393286,
"loss": 0.4247,
"step": 120
},
{
"epoch": 0.2636165577342048,
"grad_norm": 1.0622146129608154,
"learning_rate": 0.00019847743567088293,
"loss": 0.6497,
"step": 121
},
{
"epoch": 0.2657952069716776,
"grad_norm": 0.8463292717933655,
"learning_rate": 0.00019844721528117766,
"loss": 0.6587,
"step": 122
},
{
"epoch": 0.2679738562091503,
"grad_norm": 0.9597845673561096,
"learning_rate": 0.0001984167002652586,
"loss": 0.4752,
"step": 123
},
{
"epoch": 0.2701525054466231,
"grad_norm": 0.8975586295127869,
"learning_rate": 0.00019838589071444903,
"loss": 0.7978,
"step": 124
},
{
"epoch": 0.27233115468409586,
"grad_norm": 0.8363540768623352,
"learning_rate": 0.00019835478672095374,
"loss": 0.7359,
"step": 125
},
{
"epoch": 0.27450980392156865,
"grad_norm": 1.0208615064620972,
"learning_rate": 0.00019832338837785863,
"loss": 0.518,
"step": 126
},
{
"epoch": 0.2766884531590414,
"grad_norm": 1.14145028591156,
"learning_rate": 0.0001982916957791306,
"loss": 0.7642,
"step": 127
},
{
"epoch": 0.2788671023965142,
"grad_norm": 0.9274200797080994,
"learning_rate": 0.00019825970901961705,
"loss": 0.5288,
"step": 128
},
{
"epoch": 0.28104575163398693,
"grad_norm": 0.8783562779426575,
"learning_rate": 0.0001982274281950459,
"loss": 0.5958,
"step": 129
},
{
"epoch": 0.28322440087145967,
"grad_norm": 0.9028067588806152,
"learning_rate": 0.000198194853402025,
"loss": 0.6075,
"step": 130
},
{
"epoch": 0.28540305010893247,
"grad_norm": 0.9846379160881042,
"learning_rate": 0.00019816198473804198,
"loss": 0.8254,
"step": 131
},
{
"epoch": 0.2875816993464052,
"grad_norm": 0.9409753680229187,
"learning_rate": 0.00019812882230146398,
"loss": 0.704,
"step": 132
},
{
"epoch": 0.289760348583878,
"grad_norm": 0.8969582915306091,
"learning_rate": 0.00019809536619153732,
"loss": 0.6107,
"step": 133
},
{
"epoch": 0.29193899782135074,
"grad_norm": 0.7812852263450623,
"learning_rate": 0.00019806161650838723,
"loss": 0.5671,
"step": 134
},
{
"epoch": 0.29411764705882354,
"grad_norm": 0.8860548734664917,
"learning_rate": 0.00019802757335301741,
"loss": 0.5248,
"step": 135
},
{
"epoch": 0.2962962962962963,
"grad_norm": 0.8217918276786804,
"learning_rate": 0.00019799323682731,
"loss": 0.4935,
"step": 136
},
{
"epoch": 0.2984749455337691,
"grad_norm": 0.7621735334396362,
"learning_rate": 0.00019795860703402505,
"loss": 0.5984,
"step": 137
},
{
"epoch": 0.3006535947712418,
"grad_norm": 0.9418565630912781,
"learning_rate": 0.00019792368407680025,
"loss": 0.558,
"step": 138
},
{
"epoch": 0.3028322440087146,
"grad_norm": 0.819114625453949,
"learning_rate": 0.00019788846806015066,
"loss": 0.3791,
"step": 139
},
{
"epoch": 0.30501089324618735,
"grad_norm": 0.9072156548500061,
"learning_rate": 0.00019785295908946848,
"loss": 0.4462,
"step": 140
},
{
"epoch": 0.30718954248366015,
"grad_norm": 0.8303220868110657,
"learning_rate": 0.00019781715727102252,
"loss": 0.4959,
"step": 141
},
{
"epoch": 0.3093681917211329,
"grad_norm": 0.8586477041244507,
"learning_rate": 0.00019778106271195806,
"loss": 0.4701,
"step": 142
},
{
"epoch": 0.3115468409586057,
"grad_norm": 0.7374873757362366,
"learning_rate": 0.00019774467552029646,
"loss": 0.407,
"step": 143
},
{
"epoch": 0.3137254901960784,
"grad_norm": 1.1180788278579712,
"learning_rate": 0.00019770799580493494,
"loss": 0.6304,
"step": 144
},
{
"epoch": 0.3159041394335512,
"grad_norm": 0.9823700189590454,
"learning_rate": 0.000197671023675646,
"loss": 0.5079,
"step": 145
},
{
"epoch": 0.31808278867102396,
"grad_norm": 0.8474340438842773,
"learning_rate": 0.00019763375924307735,
"loss": 0.5708,
"step": 146
},
{
"epoch": 0.3202614379084967,
"grad_norm": 0.9172300100326538,
"learning_rate": 0.00019759620261875155,
"loss": 0.418,
"step": 147
},
{
"epoch": 0.3224400871459695,
"grad_norm": 0.7413074374198914,
"learning_rate": 0.0001975583539150655,
"loss": 0.4531,
"step": 148
},
{
"epoch": 0.32461873638344224,
"grad_norm": 0.7417133450508118,
"learning_rate": 0.00019752021324529023,
"loss": 0.5158,
"step": 149
},
{
"epoch": 0.32679738562091504,
"grad_norm": 0.774067223072052,
"learning_rate": 0.00019748178072357065,
"loss": 0.5995,
"step": 150
},
{
"epoch": 0.3289760348583878,
"grad_norm": 0.9123216867446899,
"learning_rate": 0.00019744305646492497,
"loss": 0.6477,
"step": 151
},
{
"epoch": 0.3311546840958606,
"grad_norm": 0.8347046375274658,
"learning_rate": 0.00019740404058524457,
"loss": 0.6527,
"step": 152
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.9968402981758118,
"learning_rate": 0.00019736473320129352,
"loss": 0.5282,
"step": 153
},
{
"epoch": 0.3355119825708061,
"grad_norm": 0.7431806921958923,
"learning_rate": 0.00019732513443070836,
"loss": 0.5553,
"step": 154
},
{
"epoch": 0.33769063180827885,
"grad_norm": 0.9280023574829102,
"learning_rate": 0.0001972852443919976,
"loss": 0.7504,
"step": 155
},
{
"epoch": 0.33986928104575165,
"grad_norm": 0.9563156366348267,
"learning_rate": 0.00019724506320454153,
"loss": 0.4566,
"step": 156
},
{
"epoch": 0.3420479302832244,
"grad_norm": 0.744659960269928,
"learning_rate": 0.00019720459098859165,
"loss": 0.5929,
"step": 157
},
{
"epoch": 0.3442265795206972,
"grad_norm": 0.8504654765129089,
"learning_rate": 0.0001971638278652705,
"loss": 0.5152,
"step": 158
},
{
"epoch": 0.3464052287581699,
"grad_norm": 0.9335165023803711,
"learning_rate": 0.0001971227739565712,
"loss": 0.5115,
"step": 159
},
{
"epoch": 0.3485838779956427,
"grad_norm": 1.0398534536361694,
"learning_rate": 0.0001970814293853572,
"loss": 0.4626,
"step": 160
},
{
"epoch": 0.35076252723311546,
"grad_norm": 0.7855281233787537,
"learning_rate": 0.0001970397942753617,
"loss": 0.4245,
"step": 161
},
{
"epoch": 0.35294117647058826,
"grad_norm": 0.7897714972496033,
"learning_rate": 0.00019699786875118747,
"loss": 0.5555,
"step": 162
},
{
"epoch": 0.355119825708061,
"grad_norm": 0.8648003935813904,
"learning_rate": 0.0001969556529383064,
"loss": 0.5558,
"step": 163
},
{
"epoch": 0.3572984749455338,
"grad_norm": 1.0440359115600586,
"learning_rate": 0.00019691314696305913,
"loss": 0.4879,
"step": 164
},
{
"epoch": 0.35947712418300654,
"grad_norm": 0.8991299867630005,
"learning_rate": 0.00019687035095265475,
"loss": 0.4131,
"step": 165
},
{
"epoch": 0.3616557734204793,
"grad_norm": 1.070555329322815,
"learning_rate": 0.00019682726503517017,
"loss": 0.4959,
"step": 166
},
{
"epoch": 0.3638344226579521,
"grad_norm": 0.751699686050415,
"learning_rate": 0.00019678388933955015,
"loss": 0.4098,
"step": 167
},
{
"epoch": 0.3660130718954248,
"grad_norm": 0.7820857763290405,
"learning_rate": 0.00019674022399560648,
"loss": 0.4611,
"step": 168
},
{
"epoch": 0.3681917211328976,
"grad_norm": 0.7827489376068115,
"learning_rate": 0.00019669626913401792,
"loss": 0.3593,
"step": 169
},
{
"epoch": 0.37037037037037035,
"grad_norm": 0.8705342411994934,
"learning_rate": 0.00019665202488632956,
"loss": 0.4037,
"step": 170
},
{
"epoch": 0.37254901960784315,
"grad_norm": 0.9181383848190308,
"learning_rate": 0.00019660749138495268,
"loss": 0.5621,
"step": 171
},
{
"epoch": 0.3747276688453159,
"grad_norm": 0.7258014678955078,
"learning_rate": 0.0001965626687631641,
"loss": 0.3909,
"step": 172
},
{
"epoch": 0.3769063180827887,
"grad_norm": 0.7386276721954346,
"learning_rate": 0.00019651755715510602,
"loss": 0.5974,
"step": 173
},
{
"epoch": 0.3790849673202614,
"grad_norm": 0.7849751710891724,
"learning_rate": 0.00019647215669578536,
"loss": 0.4947,
"step": 174
},
{
"epoch": 0.3812636165577342,
"grad_norm": 0.7632936239242554,
"learning_rate": 0.00019642646752107362,
"loss": 0.4886,
"step": 175
},
{
"epoch": 0.38344226579520696,
"grad_norm": 0.8370786309242249,
"learning_rate": 0.00019638048976770628,
"loss": 0.4741,
"step": 176
},
{
"epoch": 0.38562091503267976,
"grad_norm": 0.8441713452339172,
"learning_rate": 0.00019633422357328239,
"loss": 0.4939,
"step": 177
},
{
"epoch": 0.3877995642701525,
"grad_norm": 0.7680661082267761,
"learning_rate": 0.00019628766907626446,
"loss": 0.5976,
"step": 178
},
{
"epoch": 0.3899782135076253,
"grad_norm": 0.8030869364738464,
"learning_rate": 0.00019624082641597754,
"loss": 0.4914,
"step": 179
},
{
"epoch": 0.39215686274509803,
"grad_norm": 0.8066624402999878,
"learning_rate": 0.00019619369573260924,
"loss": 0.4982,
"step": 180
},
{
"epoch": 0.39433551198257083,
"grad_norm": 0.7550255060195923,
"learning_rate": 0.00019614627716720912,
"loss": 0.3796,
"step": 181
},
{
"epoch": 0.39651416122004357,
"grad_norm": 0.761080265045166,
"learning_rate": 0.00019609857086168823,
"loss": 0.4118,
"step": 182
},
{
"epoch": 0.39869281045751637,
"grad_norm": 1.061673641204834,
"learning_rate": 0.00019605057695881885,
"loss": 0.4461,
"step": 183
},
{
"epoch": 0.4008714596949891,
"grad_norm": 0.8266555070877075,
"learning_rate": 0.00019600229560223388,
"loss": 0.4915,
"step": 184
},
{
"epoch": 0.40305010893246185,
"grad_norm": 0.769981861114502,
"learning_rate": 0.00019595372693642654,
"loss": 0.3993,
"step": 185
},
{
"epoch": 0.40522875816993464,
"grad_norm": 0.8316985368728638,
"learning_rate": 0.00019590487110674983,
"loss": 0.598,
"step": 186
},
{
"epoch": 0.4074074074074074,
"grad_norm": 0.7869564890861511,
"learning_rate": 0.00019585572825941627,
"loss": 0.5088,
"step": 187
},
{
"epoch": 0.4095860566448802,
"grad_norm": 0.7254141569137573,
"learning_rate": 0.0001958062985414972,
"loss": 0.3948,
"step": 188
},
{
"epoch": 0.4117647058823529,
"grad_norm": 0.7505261898040771,
"learning_rate": 0.00019575658210092259,
"loss": 0.3883,
"step": 189
},
{
"epoch": 0.4139433551198257,
"grad_norm": 0.7498146891593933,
"learning_rate": 0.00019570657908648048,
"loss": 0.4072,
"step": 190
},
{
"epoch": 0.41612200435729846,
"grad_norm": 0.9777516722679138,
"learning_rate": 0.00019565628964781647,
"loss": 0.4711,
"step": 191
},
{
"epoch": 0.41830065359477125,
"grad_norm": 0.719313383102417,
"learning_rate": 0.0001956057139354335,
"loss": 0.2645,
"step": 192
},
{
"epoch": 0.420479302832244,
"grad_norm": 0.826934814453125,
"learning_rate": 0.0001955548521006911,
"loss": 0.4608,
"step": 193
},
{
"epoch": 0.4226579520697168,
"grad_norm": 0.7773908376693726,
"learning_rate": 0.0001955037042958052,
"loss": 0.6364,
"step": 194
},
{
"epoch": 0.42483660130718953,
"grad_norm": 0.8829189538955688,
"learning_rate": 0.00019545227067384747,
"loss": 0.5166,
"step": 195
},
{
"epoch": 0.42701525054466233,
"grad_norm": 0.7444214820861816,
"learning_rate": 0.00019540055138874505,
"loss": 0.4784,
"step": 196
},
{
"epoch": 0.42919389978213507,
"grad_norm": 0.7535512447357178,
"learning_rate": 0.0001953485465952799,
"loss": 0.328,
"step": 197
},
{
"epoch": 0.43137254901960786,
"grad_norm": 0.867964506149292,
"learning_rate": 0.00019529625644908847,
"loss": 0.4954,
"step": 198
},
{
"epoch": 0.4335511982570806,
"grad_norm": 0.8096396923065186,
"learning_rate": 0.00019524368110666122,
"loss": 0.409,
"step": 199
},
{
"epoch": 0.4357298474945534,
"grad_norm": 0.6851803064346313,
"learning_rate": 0.0001951908207253421,
"loss": 0.3642,
"step": 200
},
{
"epoch": 0.43790849673202614,
"grad_norm": 1.0261396169662476,
"learning_rate": 0.00019513767546332813,
"loss": 0.5437,
"step": 201
},
{
"epoch": 0.4400871459694989,
"grad_norm": 0.6751096248626709,
"learning_rate": 0.00019508424547966884,
"loss": 0.3054,
"step": 202
},
{
"epoch": 0.4422657952069717,
"grad_norm": 0.9070213437080383,
"learning_rate": 0.00019503053093426593,
"loss": 0.5467,
"step": 203
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.7909525632858276,
"learning_rate": 0.00019497653198787264,
"loss": 0.4506,
"step": 204
},
{
"epoch": 0.4466230936819172,
"grad_norm": 0.8636406064033508,
"learning_rate": 0.00019492224880209344,
"loss": 0.4917,
"step": 205
},
{
"epoch": 0.44880174291938996,
"grad_norm": 0.8827958106994629,
"learning_rate": 0.00019486768153938338,
"loss": 0.6783,
"step": 206
},
{
"epoch": 0.45098039215686275,
"grad_norm": 0.8108576536178589,
"learning_rate": 0.00019481283036304768,
"loss": 0.5415,
"step": 207
},
{
"epoch": 0.4531590413943355,
"grad_norm": 0.700524091720581,
"learning_rate": 0.0001947576954372413,
"loss": 0.2578,
"step": 208
},
{
"epoch": 0.4553376906318083,
"grad_norm": 0.6894106268882751,
"learning_rate": 0.00019470227692696833,
"loss": 0.4008,
"step": 209
},
{
"epoch": 0.45751633986928103,
"grad_norm": 0.762399435043335,
"learning_rate": 0.00019464657499808152,
"loss": 0.448,
"step": 210
},
{
"epoch": 0.4596949891067538,
"grad_norm": 0.7530399560928345,
"learning_rate": 0.00019459058981728192,
"loss": 0.4793,
"step": 211
},
{
"epoch": 0.46187363834422657,
"grad_norm": 0.8698046803474426,
"learning_rate": 0.0001945343215521182,
"loss": 0.5627,
"step": 212
},
{
"epoch": 0.46405228758169936,
"grad_norm": 0.9562462568283081,
"learning_rate": 0.00019447777037098622,
"loss": 0.6461,
"step": 213
},
{
"epoch": 0.4662309368191721,
"grad_norm": 0.7317548990249634,
"learning_rate": 0.0001944209364431286,
"loss": 0.2913,
"step": 214
},
{
"epoch": 0.4684095860566449,
"grad_norm": 0.8016018271446228,
"learning_rate": 0.00019436381993863405,
"loss": 0.4218,
"step": 215
},
{
"epoch": 0.47058823529411764,
"grad_norm": 0.7906899452209473,
"learning_rate": 0.00019430642102843707,
"loss": 0.3951,
"step": 216
},
{
"epoch": 0.47276688453159044,
"grad_norm": 0.7488080859184265,
"learning_rate": 0.0001942487398843172,
"loss": 0.2522,
"step": 217
},
{
"epoch": 0.4749455337690632,
"grad_norm": 0.7537955641746521,
"learning_rate": 0.00019419077667889872,
"loss": 0.4182,
"step": 218
},
{
"epoch": 0.477124183006536,
"grad_norm": 1.090034008026123,
"learning_rate": 0.00019413253158565006,
"loss": 0.4396,
"step": 219
},
{
"epoch": 0.4793028322440087,
"grad_norm": 0.8850147724151611,
"learning_rate": 0.00019407400477888315,
"loss": 0.4225,
"step": 220
},
{
"epoch": 0.48148148148148145,
"grad_norm": 0.6652522683143616,
"learning_rate": 0.00019401519643375315,
"loss": 0.3252,
"step": 221
},
{
"epoch": 0.48366013071895425,
"grad_norm": 0.9398255348205566,
"learning_rate": 0.00019395610672625767,
"loss": 0.3825,
"step": 222
},
{
"epoch": 0.485838779956427,
"grad_norm": 0.7577664852142334,
"learning_rate": 0.00019389673583323645,
"loss": 0.4972,
"step": 223
},
{
"epoch": 0.4880174291938998,
"grad_norm": 0.7722988724708557,
"learning_rate": 0.00019383708393237075,
"loss": 0.3655,
"step": 224
},
{
"epoch": 0.49019607843137253,
"grad_norm": 0.9242033958435059,
"learning_rate": 0.0001937771512021827,
"loss": 0.5322,
"step": 225
},
{
"epoch": 0.4923747276688453,
"grad_norm": 0.7697513699531555,
"learning_rate": 0.00019371693782203498,
"loss": 0.3516,
"step": 226
},
{
"epoch": 0.49455337690631807,
"grad_norm": 1.0141658782958984,
"learning_rate": 0.00019365644397213014,
"loss": 0.4072,
"step": 227
},
{
"epoch": 0.49673202614379086,
"grad_norm": 0.8579118847846985,
"learning_rate": 0.00019359566983351013,
"loss": 0.5341,
"step": 228
},
{
"epoch": 0.4989106753812636,
"grad_norm": 0.9969218969345093,
"learning_rate": 0.0001935346155880557,
"loss": 0.5544,
"step": 229
},
{
"epoch": 0.5010893246187363,
"grad_norm": 0.7654063105583191,
"learning_rate": 0.0001934732814184859,
"loss": 0.3604,
"step": 230
},
{
"epoch": 0.5010893246187363,
"eval_loss": 0.430705189704895,
"eval_runtime": 0.9369,
"eval_samples_per_second": 182.508,
"eval_steps_per_second": 13.875,
"step": 230
},
{
"epoch": 0.5032679738562091,
"grad_norm": 0.8155052661895752,
"learning_rate": 0.00019341166750835748,
"loss": 0.3841,
"step": 231
},
{
"epoch": 0.5054466230936819,
"grad_norm": 1.005814790725708,
"learning_rate": 0.00019334977404206443,
"loss": 0.4976,
"step": 232
},
{
"epoch": 0.5076252723311547,
"grad_norm": 0.795301079750061,
"learning_rate": 0.00019328760120483743,
"loss": 0.347,
"step": 233
},
{
"epoch": 0.5098039215686274,
"grad_norm": 0.8294497728347778,
"learning_rate": 0.00019322514918274308,
"loss": 0.3366,
"step": 234
},
{
"epoch": 0.5119825708061002,
"grad_norm": 1.0151102542877197,
"learning_rate": 0.0001931624181626836,
"loss": 0.3217,
"step": 235
},
{
"epoch": 0.514161220043573,
"grad_norm": 0.8590940833091736,
"learning_rate": 0.00019309940833239626,
"loss": 0.4399,
"step": 236
},
{
"epoch": 0.5163398692810458,
"grad_norm": 0.8852725028991699,
"learning_rate": 0.00019303611988045257,
"loss": 0.3737,
"step": 237
},
{
"epoch": 0.5185185185185185,
"grad_norm": 0.8230773210525513,
"learning_rate": 0.00019297255299625797,
"loss": 0.4262,
"step": 238
},
{
"epoch": 0.5206971677559913,
"grad_norm": 0.6675543785095215,
"learning_rate": 0.00019290870787005114,
"loss": 0.291,
"step": 239
},
{
"epoch": 0.5228758169934641,
"grad_norm": 0.867770791053772,
"learning_rate": 0.00019284458469290354,
"loss": 0.4791,
"step": 240
},
{
"epoch": 0.5250544662309368,
"grad_norm": 0.6569726467132568,
"learning_rate": 0.0001927801836567187,
"loss": 0.3331,
"step": 241
},
{
"epoch": 0.5272331154684096,
"grad_norm": 0.9207850098609924,
"learning_rate": 0.00019271550495423168,
"loss": 0.3867,
"step": 242
},
{
"epoch": 0.5294117647058824,
"grad_norm": 0.9937344789505005,
"learning_rate": 0.00019265054877900858,
"loss": 0.4788,
"step": 243
},
{
"epoch": 0.5315904139433552,
"grad_norm": 0.6370213627815247,
"learning_rate": 0.00019258531532544585,
"loss": 0.2628,
"step": 244
},
{
"epoch": 0.5337690631808278,
"grad_norm": 0.7503904104232788,
"learning_rate": 0.00019251980478876985,
"loss": 0.3214,
"step": 245
},
{
"epoch": 0.5359477124183006,
"grad_norm": 0.8075821399688721,
"learning_rate": 0.00019245401736503608,
"loss": 0.4626,
"step": 246
},
{
"epoch": 0.5381263616557734,
"grad_norm": 0.7386597990989685,
"learning_rate": 0.0001923879532511287,
"loss": 0.366,
"step": 247
},
{
"epoch": 0.5403050108932462,
"grad_norm": 0.7394566535949707,
"learning_rate": 0.00019232161264475997,
"loss": 0.387,
"step": 248
},
{
"epoch": 0.5424836601307189,
"grad_norm": 0.7640907168388367,
"learning_rate": 0.0001922549957444696,
"loss": 0.58,
"step": 249
},
{
"epoch": 0.5446623093681917,
"grad_norm": 0.6614570021629333,
"learning_rate": 0.00019218810274962417,
"loss": 0.3073,
"step": 250
},
{
"epoch": 0.5468409586056645,
"grad_norm": 0.966230034828186,
"learning_rate": 0.0001921209338604166,
"loss": 0.5173,
"step": 251
},
{
"epoch": 0.5490196078431373,
"grad_norm": 0.822700560092926,
"learning_rate": 0.00019205348927786532,
"loss": 0.4205,
"step": 252
},
{
"epoch": 0.55119825708061,
"grad_norm": 0.7053878307342529,
"learning_rate": 0.00019198576920381405,
"loss": 0.3872,
"step": 253
},
{
"epoch": 0.5533769063180828,
"grad_norm": 0.7365344166755676,
"learning_rate": 0.00019191777384093081,
"loss": 0.2189,
"step": 254
},
{
"epoch": 0.5555555555555556,
"grad_norm": 0.738994300365448,
"learning_rate": 0.0001918495033927076,
"loss": 0.4104,
"step": 255
},
{
"epoch": 0.5577342047930284,
"grad_norm": 0.6301658153533936,
"learning_rate": 0.0001917809580634596,
"loss": 0.217,
"step": 256
},
{
"epoch": 0.5599128540305011,
"grad_norm": 0.6912908554077148,
"learning_rate": 0.0001917121380583247,
"loss": 0.2477,
"step": 257
},
{
"epoch": 0.5620915032679739,
"grad_norm": 0.7470822930335999,
"learning_rate": 0.00019164304358326275,
"loss": 0.2822,
"step": 258
},
{
"epoch": 0.5642701525054467,
"grad_norm": 0.8313736915588379,
"learning_rate": 0.0001915736748450551,
"loss": 0.4136,
"step": 259
},
{
"epoch": 0.5664488017429193,
"grad_norm": 0.9498727321624756,
"learning_rate": 0.00019150403205130383,
"loss": 0.4656,
"step": 260
},
{
"epoch": 0.5686274509803921,
"grad_norm": 0.7486637830734253,
"learning_rate": 0.0001914341154104312,
"loss": 0.3612,
"step": 261
},
{
"epoch": 0.5708061002178649,
"grad_norm": 0.7852402925491333,
"learning_rate": 0.00019136392513167903,
"loss": 0.3849,
"step": 262
},
{
"epoch": 0.5729847494553377,
"grad_norm": 0.7787222862243652,
"learning_rate": 0.00019129346142510812,
"loss": 0.3202,
"step": 263
},
{
"epoch": 0.5751633986928104,
"grad_norm": 0.7108213305473328,
"learning_rate": 0.00019122272450159745,
"loss": 0.2964,
"step": 264
},
{
"epoch": 0.5773420479302832,
"grad_norm": 0.7679167985916138,
"learning_rate": 0.00019115171457284382,
"loss": 0.3068,
"step": 265
},
{
"epoch": 0.579520697167756,
"grad_norm": 0.9409091472625732,
"learning_rate": 0.0001910804318513609,
"loss": 0.3865,
"step": 266
},
{
"epoch": 0.5816993464052288,
"grad_norm": 0.7475356459617615,
"learning_rate": 0.00019100887655047885,
"loss": 0.3727,
"step": 267
},
{
"epoch": 0.5838779956427015,
"grad_norm": 0.8223069310188293,
"learning_rate": 0.0001909370488843436,
"loss": 0.3594,
"step": 268
},
{
"epoch": 0.5860566448801743,
"grad_norm": 0.8433902263641357,
"learning_rate": 0.00019086494906791614,
"loss": 0.3627,
"step": 269
},
{
"epoch": 0.5882352941176471,
"grad_norm": 0.7213712334632874,
"learning_rate": 0.00019079257731697196,
"loss": 0.3216,
"step": 270
},
{
"epoch": 0.5904139433551199,
"grad_norm": 0.9422861337661743,
"learning_rate": 0.00019071993384810036,
"loss": 0.4973,
"step": 271
},
{
"epoch": 0.5925925925925926,
"grad_norm": 0.7964383363723755,
"learning_rate": 0.0001906470188787039,
"loss": 0.3558,
"step": 272
},
{
"epoch": 0.5947712418300654,
"grad_norm": 0.8275824785232544,
"learning_rate": 0.0001905738326269975,
"loss": 0.2985,
"step": 273
},
{
"epoch": 0.5969498910675382,
"grad_norm": 0.9769518971443176,
"learning_rate": 0.00019050037531200814,
"loss": 0.5299,
"step": 274
},
{
"epoch": 0.599128540305011,
"grad_norm": 0.6739898920059204,
"learning_rate": 0.0001904266471535739,
"loss": 0.2324,
"step": 275
},
{
"epoch": 0.6013071895424836,
"grad_norm": 0.78001868724823,
"learning_rate": 0.00019035264837234347,
"loss": 0.3311,
"step": 276
},
{
"epoch": 0.6034858387799564,
"grad_norm": 0.6510067582130432,
"learning_rate": 0.00019027837918977544,
"loss": 0.2284,
"step": 277
},
{
"epoch": 0.6056644880174292,
"grad_norm": 0.7791504859924316,
"learning_rate": 0.00019020383982813765,
"loss": 0.3247,
"step": 278
},
{
"epoch": 0.6078431372549019,
"grad_norm": 0.7968712449073792,
"learning_rate": 0.00019012903051050643,
"loss": 0.3379,
"step": 279
},
{
"epoch": 0.6100217864923747,
"grad_norm": 0.7802785038948059,
"learning_rate": 0.00019005395146076616,
"loss": 0.3113,
"step": 280
},
{
"epoch": 0.6122004357298475,
"grad_norm": 0.8003398180007935,
"learning_rate": 0.00018997860290360832,
"loss": 0.3467,
"step": 281
},
{
"epoch": 0.6143790849673203,
"grad_norm": 0.9283706545829773,
"learning_rate": 0.00018990298506453104,
"loss": 0.3301,
"step": 282
},
{
"epoch": 0.616557734204793,
"grad_norm": 0.95260089635849,
"learning_rate": 0.00018982709816983828,
"loss": 0.4446,
"step": 283
},
{
"epoch": 0.6187363834422658,
"grad_norm": 0.8746076226234436,
"learning_rate": 0.0001897509424466393,
"loss": 0.3264,
"step": 284
},
{
"epoch": 0.6209150326797386,
"grad_norm": 0.9784271717071533,
"learning_rate": 0.00018967451812284777,
"loss": 0.3103,
"step": 285
},
{
"epoch": 0.6230936819172114,
"grad_norm": 0.7417327165603638,
"learning_rate": 0.00018959782542718128,
"loss": 0.3489,
"step": 286
},
{
"epoch": 0.6252723311546841,
"grad_norm": 0.835861325263977,
"learning_rate": 0.00018952086458916064,
"loss": 0.4059,
"step": 287
},
{
"epoch": 0.6274509803921569,
"grad_norm": 0.7815294861793518,
"learning_rate": 0.000189443635839109,
"loss": 0.3252,
"step": 288
},
{
"epoch": 0.6296296296296297,
"grad_norm": 0.7762477993965149,
"learning_rate": 0.00018936613940815145,
"loss": 0.3905,
"step": 289
},
{
"epoch": 0.6318082788671024,
"grad_norm": 0.7671297192573547,
"learning_rate": 0.00018928837552821404,
"loss": 0.3945,
"step": 290
},
{
"epoch": 0.6339869281045751,
"grad_norm": 0.7193905711174011,
"learning_rate": 0.00018921034443202333,
"loss": 0.2897,
"step": 291
},
{
"epoch": 0.6361655773420479,
"grad_norm": 0.7806922793388367,
"learning_rate": 0.0001891320463531055,
"loss": 0.3675,
"step": 292
},
{
"epoch": 0.6383442265795207,
"grad_norm": 0.9675951600074768,
"learning_rate": 0.0001890534815257858,
"loss": 0.2949,
"step": 293
},
{
"epoch": 0.6405228758169934,
"grad_norm": 0.7414330840110779,
"learning_rate": 0.00018897465018518782,
"loss": 0.2468,
"step": 294
},
{
"epoch": 0.6427015250544662,
"grad_norm": 0.9123559594154358,
"learning_rate": 0.00018889555256723262,
"loss": 0.3618,
"step": 295
},
{
"epoch": 0.644880174291939,
"grad_norm": 0.8628408312797546,
"learning_rate": 0.0001888161889086383,
"loss": 0.4728,
"step": 296
},
{
"epoch": 0.6470588235294118,
"grad_norm": 0.9308063983917236,
"learning_rate": 0.00018873655944691902,
"loss": 0.3469,
"step": 297
},
{
"epoch": 0.6492374727668845,
"grad_norm": 0.7682824730873108,
"learning_rate": 0.00018865666442038456,
"loss": 0.4138,
"step": 298
},
{
"epoch": 0.6514161220043573,
"grad_norm": 0.8124529123306274,
"learning_rate": 0.00018857650406813937,
"loss": 0.3172,
"step": 299
},
{
"epoch": 0.6535947712418301,
"grad_norm": 1.0015543699264526,
"learning_rate": 0.00018849607863008193,
"loss": 0.3274,
"step": 300
},
{
"epoch": 0.6557734204793029,
"grad_norm": 0.8008652329444885,
"learning_rate": 0.0001884153883469041,
"loss": 0.2765,
"step": 301
},
{
"epoch": 0.6579520697167756,
"grad_norm": 0.8472908139228821,
"learning_rate": 0.0001883344334600904,
"loss": 0.362,
"step": 302
},
{
"epoch": 0.6601307189542484,
"grad_norm": 0.953235387802124,
"learning_rate": 0.0001882532142119171,
"loss": 0.383,
"step": 303
},
{
"epoch": 0.6623093681917211,
"grad_norm": 0.9220999479293823,
"learning_rate": 0.00018817173084545176,
"loss": 0.4209,
"step": 304
},
{
"epoch": 0.664488017429194,
"grad_norm": 0.7949219942092896,
"learning_rate": 0.00018808998360455233,
"loss": 0.3869,
"step": 305
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.851177990436554,
"learning_rate": 0.0001880079727338664,
"loss": 0.2691,
"step": 306
},
{
"epoch": 0.6688453159041394,
"grad_norm": 0.8438466787338257,
"learning_rate": 0.00018792569847883068,
"loss": 0.3314,
"step": 307
},
{
"epoch": 0.6710239651416122,
"grad_norm": 0.842267632484436,
"learning_rate": 0.00018784316108566996,
"loss": 0.3046,
"step": 308
},
{
"epoch": 0.673202614379085,
"grad_norm": 0.6835848689079285,
"learning_rate": 0.00018776036080139666,
"loss": 0.2026,
"step": 309
},
{
"epoch": 0.6753812636165577,
"grad_norm": 0.8685810565948486,
"learning_rate": 0.00018767729787380985,
"loss": 0.2611,
"step": 310
},
{
"epoch": 0.6775599128540305,
"grad_norm": 0.8736341595649719,
"learning_rate": 0.00018759397255149475,
"loss": 0.3135,
"step": 311
},
{
"epoch": 0.6797385620915033,
"grad_norm": 0.6718863844871521,
"learning_rate": 0.00018751038508382176,
"loss": 0.2202,
"step": 312
},
{
"epoch": 0.681917211328976,
"grad_norm": 0.686114490032196,
"learning_rate": 0.00018742653572094583,
"loss": 0.2861,
"step": 313
},
{
"epoch": 0.6840958605664488,
"grad_norm": 0.7779257297515869,
"learning_rate": 0.00018734242471380572,
"loss": 0.2375,
"step": 314
},
{
"epoch": 0.6862745098039216,
"grad_norm": 0.7334826588630676,
"learning_rate": 0.00018725805231412318,
"loss": 0.2855,
"step": 315
},
{
"epoch": 0.6884531590413944,
"grad_norm": 0.8215782046318054,
"learning_rate": 0.00018717341877440226,
"loss": 0.2827,
"step": 316
},
{
"epoch": 0.690631808278867,
"grad_norm": 0.7793420553207397,
"learning_rate": 0.00018708852434792857,
"loss": 0.3593,
"step": 317
},
{
"epoch": 0.6928104575163399,
"grad_norm": 0.6807507872581482,
"learning_rate": 0.0001870033692887684,
"loss": 0.2489,
"step": 318
},
{
"epoch": 0.6949891067538126,
"grad_norm": 0.8848825097084045,
"learning_rate": 0.00018691795385176815,
"loss": 0.3622,
"step": 319
},
{
"epoch": 0.6971677559912854,
"grad_norm": 0.7745735049247742,
"learning_rate": 0.00018683227829255334,
"loss": 0.2808,
"step": 320
},
{
"epoch": 0.6993464052287581,
"grad_norm": 0.8624871969223022,
"learning_rate": 0.00018674634286752805,
"loss": 0.326,
"step": 321
},
{
"epoch": 0.7015250544662309,
"grad_norm": 0.7500248551368713,
"learning_rate": 0.00018666014783387408,
"loss": 0.3413,
"step": 322
},
{
"epoch": 0.7037037037037037,
"grad_norm": 0.7700647115707397,
"learning_rate": 0.0001865736934495501,
"loss": 0.2878,
"step": 323
},
{
"epoch": 0.7058823529411765,
"grad_norm": 0.7682783603668213,
"learning_rate": 0.000186486979973291,
"loss": 0.3626,
"step": 324
},
{
"epoch": 0.7080610021786492,
"grad_norm": 0.8913851976394653,
"learning_rate": 0.00018640000766460704,
"loss": 0.3684,
"step": 325
},
{
"epoch": 0.710239651416122,
"grad_norm": 0.6541398763656616,
"learning_rate": 0.0001863127767837831,
"loss": 0.3203,
"step": 326
},
{
"epoch": 0.7124183006535948,
"grad_norm": 0.9459714889526367,
"learning_rate": 0.00018622528759187795,
"loss": 0.3794,
"step": 327
},
{
"epoch": 0.7145969498910676,
"grad_norm": 0.8284517526626587,
"learning_rate": 0.0001861375403507233,
"loss": 0.2859,
"step": 328
},
{
"epoch": 0.7167755991285403,
"grad_norm": 0.7966834306716919,
"learning_rate": 0.00018604953532292323,
"loss": 0.3299,
"step": 329
},
{
"epoch": 0.7189542483660131,
"grad_norm": 0.9105852842330933,
"learning_rate": 0.00018596127277185329,
"loss": 0.3489,
"step": 330
},
{
"epoch": 0.7211328976034859,
"grad_norm": 0.9086281657218933,
"learning_rate": 0.0001858727529616597,
"loss": 0.4104,
"step": 331
},
{
"epoch": 0.7233115468409586,
"grad_norm": 0.8128787875175476,
"learning_rate": 0.0001857839761572586,
"loss": 0.3057,
"step": 332
},
{
"epoch": 0.7254901960784313,
"grad_norm": 0.7139798402786255,
"learning_rate": 0.0001856949426243352,
"loss": 0.2226,
"step": 333
},
{
"epoch": 0.7276688453159041,
"grad_norm": 0.8051466941833496,
"learning_rate": 0.00018560565262934318,
"loss": 0.2778,
"step": 334
},
{
"epoch": 0.7298474945533769,
"grad_norm": 0.7806089520454407,
"learning_rate": 0.00018551610643950358,
"loss": 0.3074,
"step": 335
},
{
"epoch": 0.7320261437908496,
"grad_norm": 0.8499903678894043,
"learning_rate": 0.00018542630432280422,
"loss": 0.2625,
"step": 336
},
{
"epoch": 0.7342047930283224,
"grad_norm": 0.9724310040473938,
"learning_rate": 0.00018533624654799887,
"loss": 0.3267,
"step": 337
},
{
"epoch": 0.7363834422657952,
"grad_norm": 0.845237135887146,
"learning_rate": 0.00018524593338460635,
"loss": 0.3624,
"step": 338
},
{
"epoch": 0.738562091503268,
"grad_norm": 0.8381814360618591,
"learning_rate": 0.00018515536510290987,
"loss": 0.2973,
"step": 339
},
{
"epoch": 0.7407407407407407,
"grad_norm": 0.7939983606338501,
"learning_rate": 0.00018506454197395606,
"loss": 0.2893,
"step": 340
},
{
"epoch": 0.7429193899782135,
"grad_norm": 1.1554635763168335,
"learning_rate": 0.00018497346426955434,
"loss": 0.4355,
"step": 341
},
{
"epoch": 0.7450980392156863,
"grad_norm": 0.7291870713233948,
"learning_rate": 0.00018488213226227588,
"loss": 0.3172,
"step": 342
},
{
"epoch": 0.7472766884531591,
"grad_norm": 0.6162805557250977,
"learning_rate": 0.00018479054622545302,
"loss": 0.1914,
"step": 343
},
{
"epoch": 0.7494553376906318,
"grad_norm": 0.800554096698761,
"learning_rate": 0.0001846987064331783,
"loss": 0.3092,
"step": 344
},
{
"epoch": 0.7516339869281046,
"grad_norm": 0.8024484515190125,
"learning_rate": 0.00018460661316030365,
"loss": 0.2598,
"step": 345
},
{
"epoch": 0.7516339869281046,
"eval_loss": 0.35579976439476013,
"eval_runtime": 0.9369,
"eval_samples_per_second": 182.512,
"eval_steps_per_second": 13.875,
"step": 345
},
{
"epoch": 0.7538126361655774,
"grad_norm": 0.6010091304779053,
"learning_rate": 0.00018451426668243963,
"loss": 0.1268,
"step": 346
},
{
"epoch": 0.7559912854030502,
"grad_norm": 0.909592866897583,
"learning_rate": 0.0001844216672759546,
"loss": 0.2808,
"step": 347
},
{
"epoch": 0.7581699346405228,
"grad_norm": 0.8788052201271057,
"learning_rate": 0.0001843288152179739,
"loss": 0.3516,
"step": 348
},
{
"epoch": 0.7603485838779956,
"grad_norm": 0.987259030342102,
"learning_rate": 0.00018423571078637885,
"loss": 0.4607,
"step": 349
},
{
"epoch": 0.7625272331154684,
"grad_norm": 1.0777586698532104,
"learning_rate": 0.00018414235425980616,
"loss": 0.3367,
"step": 350
},
{
"epoch": 0.7647058823529411,
"grad_norm": 0.8233873844146729,
"learning_rate": 0.00018404874591764696,
"loss": 0.2616,
"step": 351
},
{
"epoch": 0.7668845315904139,
"grad_norm": 0.6623446345329285,
"learning_rate": 0.00018395488604004603,
"loss": 0.154,
"step": 352
},
{
"epoch": 0.7690631808278867,
"grad_norm": 0.9828007817268372,
"learning_rate": 0.0001838607749079009,
"loss": 0.3084,
"step": 353
},
{
"epoch": 0.7712418300653595,
"grad_norm": 0.7043982148170471,
"learning_rate": 0.00018376641280286107,
"loss": 0.2568,
"step": 354
},
{
"epoch": 0.7734204793028322,
"grad_norm": 0.7597530484199524,
"learning_rate": 0.00018367180000732706,
"loss": 0.306,
"step": 355
},
{
"epoch": 0.775599128540305,
"grad_norm": 0.8099778890609741,
"learning_rate": 0.00018357693680444976,
"loss": 0.2389,
"step": 356
},
{
"epoch": 0.7777777777777778,
"grad_norm": 0.7014642953872681,
"learning_rate": 0.00018348182347812931,
"loss": 0.2601,
"step": 357
},
{
"epoch": 0.7799564270152506,
"grad_norm": 0.8545548915863037,
"learning_rate": 0.00018338646031301458,
"loss": 0.3604,
"step": 358
},
{
"epoch": 0.7821350762527233,
"grad_norm": 0.7516512870788574,
"learning_rate": 0.00018329084759450192,
"loss": 0.2489,
"step": 359
},
{
"epoch": 0.7843137254901961,
"grad_norm": 0.8581446409225464,
"learning_rate": 0.00018319498560873476,
"loss": 0.28,
"step": 360
},
{
"epoch": 0.7864923747276689,
"grad_norm": 0.8704679608345032,
"learning_rate": 0.00018309887464260238,
"loss": 0.3917,
"step": 361
},
{
"epoch": 0.7886710239651417,
"grad_norm": 0.832797110080719,
"learning_rate": 0.00018300251498373923,
"loss": 0.3356,
"step": 362
},
{
"epoch": 0.7908496732026143,
"grad_norm": 0.787756085395813,
"learning_rate": 0.00018290590692052398,
"loss": 0.3156,
"step": 363
},
{
"epoch": 0.7930283224400871,
"grad_norm": 1.014853596687317,
"learning_rate": 0.00018280905074207884,
"loss": 0.3131,
"step": 364
},
{
"epoch": 0.7952069716775599,
"grad_norm": 0.7135484218597412,
"learning_rate": 0.00018271194673826838,
"loss": 0.1935,
"step": 365
},
{
"epoch": 0.7973856209150327,
"grad_norm": 0.6172918677330017,
"learning_rate": 0.000182614595199699,
"loss": 0.1916,
"step": 366
},
{
"epoch": 0.7995642701525054,
"grad_norm": 0.689791738986969,
"learning_rate": 0.00018251699641771784,
"loss": 0.1738,
"step": 367
},
{
"epoch": 0.8017429193899782,
"grad_norm": 0.8571555614471436,
"learning_rate": 0.00018241915068441196,
"loss": 0.2738,
"step": 368
},
{
"epoch": 0.803921568627451,
"grad_norm": 0.7348290681838989,
"learning_rate": 0.00018232105829260752,
"loss": 0.1617,
"step": 369
},
{
"epoch": 0.8061002178649237,
"grad_norm": 0.72556471824646,
"learning_rate": 0.00018222271953586883,
"loss": 0.2201,
"step": 370
},
{
"epoch": 0.8082788671023965,
"grad_norm": 0.915581226348877,
"learning_rate": 0.0001821241347084975,
"loss": 0.3508,
"step": 371
},
{
"epoch": 0.8104575163398693,
"grad_norm": 0.9371058344841003,
"learning_rate": 0.00018202530410553163,
"loss": 0.3495,
"step": 372
},
{
"epoch": 0.8126361655773421,
"grad_norm": 0.696180522441864,
"learning_rate": 0.00018192622802274476,
"loss": 0.2297,
"step": 373
},
{
"epoch": 0.8148148148148148,
"grad_norm": 1.2849663496017456,
"learning_rate": 0.00018182690675664514,
"loss": 0.3872,
"step": 374
},
{
"epoch": 0.8169934640522876,
"grad_norm": 0.7671440243721008,
"learning_rate": 0.00018172734060447482,
"loss": 0.2522,
"step": 375
},
{
"epoch": 0.8191721132897604,
"grad_norm": 0.7630666494369507,
"learning_rate": 0.00018162752986420868,
"loss": 0.1857,
"step": 376
},
{
"epoch": 0.8213507625272332,
"grad_norm": 0.8664875626564026,
"learning_rate": 0.00018152747483455358,
"loss": 0.3085,
"step": 377
},
{
"epoch": 0.8235294117647058,
"grad_norm": 0.7418748140335083,
"learning_rate": 0.0001814271758149475,
"loss": 0.2537,
"step": 378
},
{
"epoch": 0.8257080610021786,
"grad_norm": 0.9873255491256714,
"learning_rate": 0.0001813266331055586,
"loss": 0.335,
"step": 379
},
{
"epoch": 0.8278867102396514,
"grad_norm": 0.8407981991767883,
"learning_rate": 0.00018122584700728443,
"loss": 0.2625,
"step": 380
},
{
"epoch": 0.8300653594771242,
"grad_norm": 0.8493285179138184,
"learning_rate": 0.0001811248178217507,
"loss": 0.2167,
"step": 381
},
{
"epoch": 0.8322440087145969,
"grad_norm": 0.7906709909439087,
"learning_rate": 0.00018102354585131092,
"loss": 0.2919,
"step": 382
},
{
"epoch": 0.8344226579520697,
"grad_norm": 0.8033335208892822,
"learning_rate": 0.00018092203139904496,
"loss": 0.2571,
"step": 383
},
{
"epoch": 0.8366013071895425,
"grad_norm": 0.616438627243042,
"learning_rate": 0.00018082027476875847,
"loss": 0.2102,
"step": 384
},
{
"epoch": 0.8387799564270153,
"grad_norm": 0.6517376899719238,
"learning_rate": 0.00018071827626498185,
"loss": 0.2242,
"step": 385
},
{
"epoch": 0.840958605664488,
"grad_norm": 0.7315549254417419,
"learning_rate": 0.00018061603619296942,
"loss": 0.2594,
"step": 386
},
{
"epoch": 0.8431372549019608,
"grad_norm": 0.6937726736068726,
"learning_rate": 0.00018051355485869833,
"loss": 0.1991,
"step": 387
},
{
"epoch": 0.8453159041394336,
"grad_norm": 0.7155842185020447,
"learning_rate": 0.0001804108325688679,
"loss": 0.2051,
"step": 388
},
{
"epoch": 0.8474945533769063,
"grad_norm": 1.1797689199447632,
"learning_rate": 0.00018030786963089845,
"loss": 0.3421,
"step": 389
},
{
"epoch": 0.8496732026143791,
"grad_norm": 0.7603274583816528,
"learning_rate": 0.00018020466635293057,
"loss": 0.1985,
"step": 390
},
{
"epoch": 0.8518518518518519,
"grad_norm": 0.7939961552619934,
"learning_rate": 0.0001801012230438241,
"loss": 0.2753,
"step": 391
},
{
"epoch": 0.8540305010893247,
"grad_norm": 0.8357366323471069,
"learning_rate": 0.0001799975400131572,
"loss": 0.3453,
"step": 392
},
{
"epoch": 0.8562091503267973,
"grad_norm": 0.7716991305351257,
"learning_rate": 0.00017989361757122553,
"loss": 0.2441,
"step": 393
},
{
"epoch": 0.8583877995642701,
"grad_norm": 0.7844340205192566,
"learning_rate": 0.00017978945602904116,
"loss": 0.3635,
"step": 394
},
{
"epoch": 0.8605664488017429,
"grad_norm": 0.6692304611206055,
"learning_rate": 0.00017968505569833173,
"loss": 0.1895,
"step": 395
},
{
"epoch": 0.8627450980392157,
"grad_norm": 0.7669034004211426,
"learning_rate": 0.0001795804168915396,
"loss": 0.1833,
"step": 396
},
{
"epoch": 0.8649237472766884,
"grad_norm": 0.9231857657432556,
"learning_rate": 0.00017947553992182075,
"loss": 0.3172,
"step": 397
},
{
"epoch": 0.8671023965141612,
"grad_norm": 0.9847748875617981,
"learning_rate": 0.00017937042510304392,
"loss": 0.3479,
"step": 398
},
{
"epoch": 0.869281045751634,
"grad_norm": 0.719962477684021,
"learning_rate": 0.00017926507274978963,
"loss": 0.2653,
"step": 399
},
{
"epoch": 0.8714596949891068,
"grad_norm": 0.7490435242652893,
"learning_rate": 0.00017915948317734942,
"loss": 0.2387,
"step": 400
},
{
"epoch": 0.8736383442265795,
"grad_norm": 0.744804322719574,
"learning_rate": 0.00017905365670172458,
"loss": 0.2368,
"step": 401
},
{
"epoch": 0.8758169934640523,
"grad_norm": 0.6886241436004639,
"learning_rate": 0.00017894759363962554,
"loss": 0.2125,
"step": 402
},
{
"epoch": 0.8779956427015251,
"grad_norm": 0.7730022072792053,
"learning_rate": 0.0001788412943084707,
"loss": 0.2253,
"step": 403
},
{
"epoch": 0.8801742919389978,
"grad_norm": 0.8058857321739197,
"learning_rate": 0.00017873475902638553,
"loss": 0.2845,
"step": 404
},
{
"epoch": 0.8823529411764706,
"grad_norm": 0.6579281091690063,
"learning_rate": 0.0001786279881122017,
"loss": 0.1922,
"step": 405
},
{
"epoch": 0.8845315904139434,
"grad_norm": 0.6769328713417053,
"learning_rate": 0.00017852098188545602,
"loss": 0.1609,
"step": 406
},
{
"epoch": 0.8867102396514162,
"grad_norm": 0.7010467648506165,
"learning_rate": 0.0001784137406663895,
"loss": 0.2387,
"step": 407
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.7626838088035583,
"learning_rate": 0.00017830626477594654,
"loss": 0.237,
"step": 408
},
{
"epoch": 0.8910675381263616,
"grad_norm": 0.8782392144203186,
"learning_rate": 0.0001781985545357737,
"loss": 0.3278,
"step": 409
},
{
"epoch": 0.8932461873638344,
"grad_norm": 0.6178898215293884,
"learning_rate": 0.00017809061026821896,
"loss": 0.1584,
"step": 410
},
{
"epoch": 0.8954248366013072,
"grad_norm": 0.7101506590843201,
"learning_rate": 0.00017798243229633068,
"loss": 0.2384,
"step": 411
},
{
"epoch": 0.8976034858387799,
"grad_norm": 0.5807170867919922,
"learning_rate": 0.00017787402094385666,
"loss": 0.1256,
"step": 412
},
{
"epoch": 0.8997821350762527,
"grad_norm": 0.6202595829963684,
"learning_rate": 0.00017776537653524307,
"loss": 0.168,
"step": 413
},
{
"epoch": 0.9019607843137255,
"grad_norm": 0.5887298583984375,
"learning_rate": 0.00017765649939563365,
"loss": 0.1411,
"step": 414
},
{
"epoch": 0.9041394335511983,
"grad_norm": 0.745570957660675,
"learning_rate": 0.0001775473898508685,
"loss": 0.2842,
"step": 415
},
{
"epoch": 0.906318082788671,
"grad_norm": 0.8904745578765869,
"learning_rate": 0.00017743804822748345,
"loss": 0.3217,
"step": 416
},
{
"epoch": 0.9084967320261438,
"grad_norm": 0.8730976581573486,
"learning_rate": 0.0001773284748527087,
"loss": 0.3403,
"step": 417
},
{
"epoch": 0.9106753812636166,
"grad_norm": 0.7911598086357117,
"learning_rate": 0.00017721867005446806,
"loss": 0.2731,
"step": 418
},
{
"epoch": 0.9128540305010894,
"grad_norm": 0.6702750325202942,
"learning_rate": 0.00017710863416137805,
"loss": 0.2073,
"step": 419
},
{
"epoch": 0.9150326797385621,
"grad_norm": 0.5198416113853455,
"learning_rate": 0.00017699836750274662,
"loss": 0.129,
"step": 420
},
{
"epoch": 0.9172113289760349,
"grad_norm": 0.6400766372680664,
"learning_rate": 0.00017688787040857245,
"loss": 0.1682,
"step": 421
},
{
"epoch": 0.9193899782135077,
"grad_norm": 0.7851924896240234,
"learning_rate": 0.00017677714320954378,
"loss": 0.221,
"step": 422
},
{
"epoch": 0.9215686274509803,
"grad_norm": 0.7246972918510437,
"learning_rate": 0.0001766661862370376,
"loss": 0.2438,
"step": 423
},
{
"epoch": 0.9237472766884531,
"grad_norm": 0.7575098872184753,
"learning_rate": 0.00017655499982311847,
"loss": 0.2633,
"step": 424
},
{
"epoch": 0.9259259259259259,
"grad_norm": 0.902695894241333,
"learning_rate": 0.0001764435843005376,
"loss": 0.3567,
"step": 425
},
{
"epoch": 0.9281045751633987,
"grad_norm": 0.7910832762718201,
"learning_rate": 0.00017633194000273188,
"loss": 0.2578,
"step": 426
},
{
"epoch": 0.9302832244008714,
"grad_norm": 0.7221683263778687,
"learning_rate": 0.00017622006726382287,
"loss": 0.2075,
"step": 427
},
{
"epoch": 0.9324618736383442,
"grad_norm": 0.7584638595581055,
"learning_rate": 0.00017610796641861581,
"loss": 0.2313,
"step": 428
},
{
"epoch": 0.934640522875817,
"grad_norm": 0.6272245645523071,
"learning_rate": 0.00017599563780259858,
"loss": 0.1876,
"step": 429
},
{
"epoch": 0.9368191721132898,
"grad_norm": 0.8224185705184937,
"learning_rate": 0.0001758830817519407,
"loss": 0.2558,
"step": 430
},
{
"epoch": 0.9389978213507625,
"grad_norm": 0.7095481157302856,
"learning_rate": 0.00017577029860349233,
"loss": 0.2256,
"step": 431
},
{
"epoch": 0.9411764705882353,
"grad_norm": 0.7715668678283691,
"learning_rate": 0.00017565728869478337,
"loss": 0.2483,
"step": 432
},
{
"epoch": 0.9433551198257081,
"grad_norm": 0.905522882938385,
"learning_rate": 0.00017554405236402222,
"loss": 0.2525,
"step": 433
},
{
"epoch": 0.9455337690631809,
"grad_norm": 0.6268091797828674,
"learning_rate": 0.00017543058995009503,
"loss": 0.1703,
"step": 434
},
{
"epoch": 0.9477124183006536,
"grad_norm": 0.730185329914093,
"learning_rate": 0.0001753169017925644,
"loss": 0.2187,
"step": 435
},
{
"epoch": 0.9498910675381264,
"grad_norm": 0.7279319763183594,
"learning_rate": 0.00017520298823166873,
"loss": 0.2584,
"step": 436
},
{
"epoch": 0.9520697167755992,
"grad_norm": 0.7050124406814575,
"learning_rate": 0.00017508884960832076,
"loss": 0.1723,
"step": 437
},
{
"epoch": 0.954248366013072,
"grad_norm": 0.7544010281562805,
"learning_rate": 0.000174974486264107,
"loss": 0.1899,
"step": 438
},
{
"epoch": 0.9564270152505446,
"grad_norm": 0.8714267015457153,
"learning_rate": 0.00017485989854128627,
"loss": 0.2518,
"step": 439
},
{
"epoch": 0.9586056644880174,
"grad_norm": 0.7128406167030334,
"learning_rate": 0.00017474508678278915,
"loss": 0.1832,
"step": 440
},
{
"epoch": 0.9607843137254902,
"grad_norm": 0.7446244955062866,
"learning_rate": 0.00017463005133221645,
"loss": 0.2089,
"step": 441
},
{
"epoch": 0.9629629629629629,
"grad_norm": 0.7556454539299011,
"learning_rate": 0.00017451479253383857,
"loss": 0.2327,
"step": 442
},
{
"epoch": 0.9651416122004357,
"grad_norm": 0.5932292938232422,
"learning_rate": 0.00017439931073259427,
"loss": 0.1094,
"step": 443
},
{
"epoch": 0.9673202614379085,
"grad_norm": 0.909900426864624,
"learning_rate": 0.00017428360627408978,
"loss": 0.2175,
"step": 444
},
{
"epoch": 0.9694989106753813,
"grad_norm": 0.7743591666221619,
"learning_rate": 0.00017416767950459766,
"loss": 0.1818,
"step": 445
},
{
"epoch": 0.971677559912854,
"grad_norm": 0.7306420207023621,
"learning_rate": 0.0001740515307710557,
"loss": 0.1957,
"step": 446
},
{
"epoch": 0.9738562091503268,
"grad_norm": 0.5480270981788635,
"learning_rate": 0.00017393516042106603,
"loss": 0.1477,
"step": 447
},
{
"epoch": 0.9760348583877996,
"grad_norm": 0.7661142945289612,
"learning_rate": 0.000173818568802894,
"loss": 0.2177,
"step": 448
},
{
"epoch": 0.9782135076252724,
"grad_norm": 0.8411223292350769,
"learning_rate": 0.00017370175626546728,
"loss": 0.2104,
"step": 449
},
{
"epoch": 0.9803921568627451,
"grad_norm": 0.816074013710022,
"learning_rate": 0.00017358472315837447,
"loss": 0.2349,
"step": 450
},
{
"epoch": 0.9825708061002179,
"grad_norm": 1.0009855031967163,
"learning_rate": 0.00017346746983186442,
"loss": 0.2917,
"step": 451
},
{
"epoch": 0.9847494553376906,
"grad_norm": 0.7690507769584656,
"learning_rate": 0.00017334999663684504,
"loss": 0.2216,
"step": 452
},
{
"epoch": 0.9869281045751634,
"grad_norm": 0.7864702939987183,
"learning_rate": 0.00017323230392488222,
"loss": 0.2601,
"step": 453
},
{
"epoch": 0.9891067538126361,
"grad_norm": 0.8309422731399536,
"learning_rate": 0.00017311439204819874,
"loss": 0.2257,
"step": 454
},
{
"epoch": 0.9912854030501089,
"grad_norm": 0.7395288944244385,
"learning_rate": 0.00017299626135967343,
"loss": 0.1859,
"step": 455
},
{
"epoch": 0.9934640522875817,
"grad_norm": 0.6524999737739563,
"learning_rate": 0.00017287791221283984,
"loss": 0.1523,
"step": 456
},
{
"epoch": 0.9956427015250545,
"grad_norm": 0.6195773482322693,
"learning_rate": 0.00017275934496188534,
"loss": 0.1307,
"step": 457
},
{
"epoch": 0.9978213507625272,
"grad_norm": 0.8236491084098816,
"learning_rate": 0.00017264055996165007,
"loss": 0.2214,
"step": 458
},
{
"epoch": 1.0,
"grad_norm": 0.5916262865066528,
"learning_rate": 0.00017252155756762575,
"loss": 0.1633,
"step": 459
}
],
"logging_steps": 1,
"max_steps": 1836,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 459,
"total_flos": 1.1104996358828851e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}