finetuned_llama3-8b-instruct / trainer_state.json
tej0750's picture
Upload 11 files
1296402 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.0,
"eval_steps": 96,
"global_step": 480,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0125,
"grad_norm": 2.395050048828125,
"learning_rate": 2e-05,
"loss": 1.4995,
"step": 1
},
{
"epoch": 0.025,
"grad_norm": 2.28926944732666,
"learning_rate": 4e-05,
"loss": 1.4965,
"step": 2
},
{
"epoch": 0.0375,
"grad_norm": 2.798283815383911,
"learning_rate": 6e-05,
"loss": 1.4668,
"step": 3
},
{
"epoch": 0.05,
"grad_norm": 2.141174554824829,
"learning_rate": 8e-05,
"loss": 1.3202,
"step": 4
},
{
"epoch": 0.0625,
"grad_norm": 1.8937232494354248,
"learning_rate": 0.0001,
"loss": 1.1309,
"step": 5
},
{
"epoch": 0.075,
"grad_norm": 0.9302598834037781,
"learning_rate": 0.00012,
"loss": 1.0411,
"step": 6
},
{
"epoch": 0.0875,
"grad_norm": 1.0292965173721313,
"learning_rate": 0.00014,
"loss": 0.9394,
"step": 7
},
{
"epoch": 0.1,
"grad_norm": 1.2454479932785034,
"learning_rate": 0.00016,
"loss": 0.9301,
"step": 8
},
{
"epoch": 0.1125,
"grad_norm": 1.3369941711425781,
"learning_rate": 0.00018,
"loss": 0.8461,
"step": 9
},
{
"epoch": 0.125,
"grad_norm": 2.1479320526123047,
"learning_rate": 0.0002,
"loss": 0.7955,
"step": 10
},
{
"epoch": 0.1375,
"grad_norm": 0.8992676734924316,
"learning_rate": 0.0001995744680851064,
"loss": 0.8119,
"step": 11
},
{
"epoch": 0.15,
"grad_norm": 0.9127306938171387,
"learning_rate": 0.0001991489361702128,
"loss": 0.7874,
"step": 12
},
{
"epoch": 0.1625,
"grad_norm": 0.8198272585868835,
"learning_rate": 0.00019872340425531918,
"loss": 0.7407,
"step": 13
},
{
"epoch": 0.175,
"grad_norm": 1.111543893814087,
"learning_rate": 0.00019829787234042554,
"loss": 0.8275,
"step": 14
},
{
"epoch": 0.1875,
"grad_norm": 2.1975886821746826,
"learning_rate": 0.00019787234042553193,
"loss": 0.7143,
"step": 15
},
{
"epoch": 0.2,
"grad_norm": 0.7623311281204224,
"learning_rate": 0.00019744680851063832,
"loss": 0.7568,
"step": 16
},
{
"epoch": 0.2125,
"grad_norm": 0.8410439491271973,
"learning_rate": 0.0001970212765957447,
"loss": 0.6292,
"step": 17
},
{
"epoch": 0.225,
"grad_norm": 0.8105931878089905,
"learning_rate": 0.00019659574468085107,
"loss": 0.7748,
"step": 18
},
{
"epoch": 0.2375,
"grad_norm": 0.8514361381530762,
"learning_rate": 0.00019617021276595745,
"loss": 0.7081,
"step": 19
},
{
"epoch": 0.25,
"grad_norm": 1.0301614999771118,
"learning_rate": 0.00019574468085106384,
"loss": 0.6643,
"step": 20
},
{
"epoch": 0.2625,
"grad_norm": 0.7055068612098694,
"learning_rate": 0.00019531914893617023,
"loss": 0.7002,
"step": 21
},
{
"epoch": 0.275,
"grad_norm": 0.6796251535415649,
"learning_rate": 0.0001948936170212766,
"loss": 0.7437,
"step": 22
},
{
"epoch": 0.2875,
"grad_norm": 0.6966366767883301,
"learning_rate": 0.00019446808510638298,
"loss": 0.674,
"step": 23
},
{
"epoch": 0.3,
"grad_norm": 0.6405335068702698,
"learning_rate": 0.00019404255319148937,
"loss": 0.6369,
"step": 24
},
{
"epoch": 0.3125,
"grad_norm": 0.6977805495262146,
"learning_rate": 0.00019361702127659576,
"loss": 0.6369,
"step": 25
},
{
"epoch": 0.325,
"grad_norm": 0.6844583749771118,
"learning_rate": 0.00019319148936170212,
"loss": 0.6886,
"step": 26
},
{
"epoch": 0.3375,
"grad_norm": 0.6735361814498901,
"learning_rate": 0.0001927659574468085,
"loss": 0.7131,
"step": 27
},
{
"epoch": 0.35,
"grad_norm": 0.6853126883506775,
"learning_rate": 0.0001923404255319149,
"loss": 0.6361,
"step": 28
},
{
"epoch": 0.3625,
"grad_norm": 0.693722665309906,
"learning_rate": 0.0001919148936170213,
"loss": 0.6772,
"step": 29
},
{
"epoch": 0.375,
"grad_norm": 0.6884544491767883,
"learning_rate": 0.00019148936170212768,
"loss": 0.6403,
"step": 30
},
{
"epoch": 0.3875,
"grad_norm": 0.6849629878997803,
"learning_rate": 0.00019106382978723404,
"loss": 0.6859,
"step": 31
},
{
"epoch": 0.4,
"grad_norm": 0.7242761850357056,
"learning_rate": 0.00019063829787234045,
"loss": 0.6867,
"step": 32
},
{
"epoch": 0.4125,
"grad_norm": 0.9043200016021729,
"learning_rate": 0.00019021276595744682,
"loss": 0.6624,
"step": 33
},
{
"epoch": 0.425,
"grad_norm": 0.9550056457519531,
"learning_rate": 0.0001897872340425532,
"loss": 0.6395,
"step": 34
},
{
"epoch": 0.4375,
"grad_norm": 0.6916117668151855,
"learning_rate": 0.00018936170212765957,
"loss": 0.7046,
"step": 35
},
{
"epoch": 0.45,
"grad_norm": 0.6232885718345642,
"learning_rate": 0.00018893617021276598,
"loss": 0.6716,
"step": 36
},
{
"epoch": 0.4625,
"grad_norm": 0.6697956323623657,
"learning_rate": 0.00018851063829787234,
"loss": 0.7149,
"step": 37
},
{
"epoch": 0.475,
"grad_norm": 0.6431601643562317,
"learning_rate": 0.00018808510638297873,
"loss": 0.6996,
"step": 38
},
{
"epoch": 0.4875,
"grad_norm": 0.614162802696228,
"learning_rate": 0.00018765957446808512,
"loss": 0.6178,
"step": 39
},
{
"epoch": 0.5,
"grad_norm": 0.8608362078666687,
"learning_rate": 0.0001872340425531915,
"loss": 0.7055,
"step": 40
},
{
"epoch": 0.5125,
"grad_norm": 0.6234711408615112,
"learning_rate": 0.00018680851063829787,
"loss": 0.682,
"step": 41
},
{
"epoch": 0.525,
"grad_norm": 0.6244091987609863,
"learning_rate": 0.00018638297872340426,
"loss": 0.6767,
"step": 42
},
{
"epoch": 0.5375,
"grad_norm": 0.6198769807815552,
"learning_rate": 0.00018595744680851065,
"loss": 0.6993,
"step": 43
},
{
"epoch": 0.55,
"grad_norm": 0.6559383273124695,
"learning_rate": 0.00018553191489361704,
"loss": 0.6648,
"step": 44
},
{
"epoch": 0.5625,
"grad_norm": 0.6858519315719604,
"learning_rate": 0.0001851063829787234,
"loss": 0.6413,
"step": 45
},
{
"epoch": 0.575,
"grad_norm": 0.5903568863868713,
"learning_rate": 0.0001846808510638298,
"loss": 0.6755,
"step": 46
},
{
"epoch": 0.5875,
"grad_norm": 0.6439600586891174,
"learning_rate": 0.00018425531914893618,
"loss": 0.6677,
"step": 47
},
{
"epoch": 0.6,
"grad_norm": 0.6454241275787354,
"learning_rate": 0.00018382978723404257,
"loss": 0.6904,
"step": 48
},
{
"epoch": 0.6125,
"grad_norm": 0.5940038561820984,
"learning_rate": 0.00018340425531914896,
"loss": 0.6551,
"step": 49
},
{
"epoch": 0.625,
"grad_norm": 0.606603741645813,
"learning_rate": 0.00018297872340425532,
"loss": 0.5867,
"step": 50
},
{
"epoch": 0.6375,
"grad_norm": 0.616865873336792,
"learning_rate": 0.0001825531914893617,
"loss": 0.6865,
"step": 51
},
{
"epoch": 0.65,
"grad_norm": 0.6501201391220093,
"learning_rate": 0.0001821276595744681,
"loss": 0.5942,
"step": 52
},
{
"epoch": 0.6625,
"grad_norm": 0.6296024918556213,
"learning_rate": 0.00018170212765957448,
"loss": 0.6762,
"step": 53
},
{
"epoch": 0.675,
"grad_norm": 0.656863808631897,
"learning_rate": 0.00018127659574468084,
"loss": 0.7108,
"step": 54
},
{
"epoch": 0.6875,
"grad_norm": 0.5761714577674866,
"learning_rate": 0.00018085106382978726,
"loss": 0.6133,
"step": 55
},
{
"epoch": 0.7,
"grad_norm": 0.6318382620811462,
"learning_rate": 0.00018042553191489362,
"loss": 0.6654,
"step": 56
},
{
"epoch": 0.7125,
"grad_norm": 0.6335327625274658,
"learning_rate": 0.00018,
"loss": 0.6497,
"step": 57
},
{
"epoch": 0.725,
"grad_norm": 0.5412627458572388,
"learning_rate": 0.00017957446808510637,
"loss": 0.6578,
"step": 58
},
{
"epoch": 0.7375,
"grad_norm": 0.5853710770606995,
"learning_rate": 0.0001791489361702128,
"loss": 0.689,
"step": 59
},
{
"epoch": 0.75,
"grad_norm": 0.6023885607719421,
"learning_rate": 0.00017872340425531915,
"loss": 0.6605,
"step": 60
},
{
"epoch": 0.7625,
"grad_norm": 0.6343605518341064,
"learning_rate": 0.00017829787234042554,
"loss": 0.702,
"step": 61
},
{
"epoch": 0.775,
"grad_norm": 0.5401332378387451,
"learning_rate": 0.0001778723404255319,
"loss": 0.5743,
"step": 62
},
{
"epoch": 0.7875,
"grad_norm": 0.5706886649131775,
"learning_rate": 0.00017744680851063832,
"loss": 0.6362,
"step": 63
},
{
"epoch": 0.8,
"grad_norm": 0.5953827500343323,
"learning_rate": 0.00017702127659574468,
"loss": 0.6257,
"step": 64
},
{
"epoch": 0.8125,
"grad_norm": 0.5751234889030457,
"learning_rate": 0.00017659574468085107,
"loss": 0.5577,
"step": 65
},
{
"epoch": 0.825,
"grad_norm": 0.6519968509674072,
"learning_rate": 0.00017617021276595746,
"loss": 0.6904,
"step": 66
},
{
"epoch": 0.8375,
"grad_norm": 0.5914306640625,
"learning_rate": 0.00017574468085106384,
"loss": 0.6893,
"step": 67
},
{
"epoch": 0.85,
"grad_norm": 0.5872571468353271,
"learning_rate": 0.00017531914893617023,
"loss": 0.583,
"step": 68
},
{
"epoch": 0.8625,
"grad_norm": 0.6391447186470032,
"learning_rate": 0.0001748936170212766,
"loss": 0.6216,
"step": 69
},
{
"epoch": 0.875,
"grad_norm": 0.6009213924407959,
"learning_rate": 0.00017446808510638298,
"loss": 0.589,
"step": 70
},
{
"epoch": 0.8875,
"grad_norm": 0.5932193398475647,
"learning_rate": 0.00017404255319148937,
"loss": 0.6251,
"step": 71
},
{
"epoch": 0.9,
"grad_norm": 0.6414621472358704,
"learning_rate": 0.00017361702127659576,
"loss": 0.6258,
"step": 72
},
{
"epoch": 0.9125,
"grad_norm": 0.6453771591186523,
"learning_rate": 0.00017319148936170212,
"loss": 0.5774,
"step": 73
},
{
"epoch": 0.925,
"grad_norm": 0.6160643696784973,
"learning_rate": 0.0001727659574468085,
"loss": 0.6119,
"step": 74
},
{
"epoch": 0.9375,
"grad_norm": 0.6067684292793274,
"learning_rate": 0.0001723404255319149,
"loss": 0.652,
"step": 75
},
{
"epoch": 0.95,
"grad_norm": 0.623906135559082,
"learning_rate": 0.0001719148936170213,
"loss": 0.6086,
"step": 76
},
{
"epoch": 0.9625,
"grad_norm": 0.6175603270530701,
"learning_rate": 0.00017148936170212765,
"loss": 0.6485,
"step": 77
},
{
"epoch": 0.975,
"grad_norm": 0.6073062419891357,
"learning_rate": 0.00017106382978723404,
"loss": 0.5925,
"step": 78
},
{
"epoch": 0.9875,
"grad_norm": 0.5946528911590576,
"learning_rate": 0.00017063829787234043,
"loss": 0.58,
"step": 79
},
{
"epoch": 1.0,
"grad_norm": 0.9330272078514099,
"learning_rate": 0.00017021276595744682,
"loss": 0.7328,
"step": 80
},
{
"epoch": 1.0125,
"grad_norm": 0.5311029553413391,
"learning_rate": 0.00016978723404255318,
"loss": 0.4562,
"step": 81
},
{
"epoch": 1.025,
"grad_norm": 0.5370614528656006,
"learning_rate": 0.0001693617021276596,
"loss": 0.4304,
"step": 82
},
{
"epoch": 1.0375,
"grad_norm": 0.5361345410346985,
"learning_rate": 0.00016893617021276598,
"loss": 0.4604,
"step": 83
},
{
"epoch": 1.05,
"grad_norm": 0.6242905259132385,
"learning_rate": 0.00016851063829787235,
"loss": 0.4903,
"step": 84
},
{
"epoch": 1.0625,
"grad_norm": 0.6181880235671997,
"learning_rate": 0.00016808510638297873,
"loss": 0.4524,
"step": 85
},
{
"epoch": 1.075,
"grad_norm": 0.6893963813781738,
"learning_rate": 0.00016765957446808512,
"loss": 0.4964,
"step": 86
},
{
"epoch": 1.0875,
"grad_norm": 0.5981054902076721,
"learning_rate": 0.0001672340425531915,
"loss": 0.4067,
"step": 87
},
{
"epoch": 1.1,
"grad_norm": 0.6721552610397339,
"learning_rate": 0.00016680851063829787,
"loss": 0.5138,
"step": 88
},
{
"epoch": 1.1125,
"grad_norm": 0.7648325562477112,
"learning_rate": 0.00016638297872340426,
"loss": 0.4934,
"step": 89
},
{
"epoch": 1.125,
"grad_norm": 0.724721372127533,
"learning_rate": 0.00016595744680851065,
"loss": 0.5145,
"step": 90
},
{
"epoch": 1.1375,
"grad_norm": 0.6328797340393066,
"learning_rate": 0.00016553191489361704,
"loss": 0.4414,
"step": 91
},
{
"epoch": 1.15,
"grad_norm": 0.6504305005073547,
"learning_rate": 0.0001651063829787234,
"loss": 0.4773,
"step": 92
},
{
"epoch": 1.1625,
"grad_norm": 0.6754821538925171,
"learning_rate": 0.0001646808510638298,
"loss": 0.4611,
"step": 93
},
{
"epoch": 1.175,
"grad_norm": 0.75724858045578,
"learning_rate": 0.00016425531914893618,
"loss": 0.5343,
"step": 94
},
{
"epoch": 1.1875,
"grad_norm": 0.7232646942138672,
"learning_rate": 0.00016382978723404257,
"loss": 0.4531,
"step": 95
},
{
"epoch": 1.2,
"grad_norm": 0.7688515782356262,
"learning_rate": 0.00016340425531914893,
"loss": 0.5669,
"step": 96
},
{
"epoch": 1.2,
"eval_loss": 0.6230465769767761,
"eval_runtime": 9.9417,
"eval_samples_per_second": 3.621,
"eval_steps_per_second": 1.811,
"step": 96
},
{
"epoch": 1.2125,
"grad_norm": 0.6552408337593079,
"learning_rate": 0.00016297872340425532,
"loss": 0.4746,
"step": 97
},
{
"epoch": 1.225,
"grad_norm": 0.6849213242530823,
"learning_rate": 0.0001625531914893617,
"loss": 0.4506,
"step": 98
},
{
"epoch": 1.2375,
"grad_norm": 0.6564849019050598,
"learning_rate": 0.0001621276595744681,
"loss": 0.4336,
"step": 99
},
{
"epoch": 1.25,
"grad_norm": 0.660833477973938,
"learning_rate": 0.00016170212765957446,
"loss": 0.4918,
"step": 100
},
{
"epoch": 1.2625,
"grad_norm": 0.6267507076263428,
"learning_rate": 0.00016127659574468085,
"loss": 0.4693,
"step": 101
},
{
"epoch": 1.275,
"grad_norm": 0.6528612375259399,
"learning_rate": 0.00016085106382978726,
"loss": 0.4449,
"step": 102
},
{
"epoch": 1.2875,
"grad_norm": 0.6794521808624268,
"learning_rate": 0.00016042553191489362,
"loss": 0.4266,
"step": 103
},
{
"epoch": 1.3,
"grad_norm": 0.6800212264060974,
"learning_rate": 0.00016,
"loss": 0.4697,
"step": 104
},
{
"epoch": 1.3125,
"grad_norm": 0.6715764403343201,
"learning_rate": 0.00015957446808510637,
"loss": 0.481,
"step": 105
},
{
"epoch": 1.325,
"grad_norm": 0.7775130271911621,
"learning_rate": 0.0001591489361702128,
"loss": 0.4379,
"step": 106
},
{
"epoch": 1.3375,
"grad_norm": 0.6991564631462097,
"learning_rate": 0.00015872340425531915,
"loss": 0.4519,
"step": 107
},
{
"epoch": 1.35,
"grad_norm": 0.7205126881599426,
"learning_rate": 0.00015829787234042554,
"loss": 0.4595,
"step": 108
},
{
"epoch": 1.3625,
"grad_norm": 0.6301184892654419,
"learning_rate": 0.00015787234042553193,
"loss": 0.467,
"step": 109
},
{
"epoch": 1.375,
"grad_norm": 0.7067103981971741,
"learning_rate": 0.00015744680851063832,
"loss": 0.4619,
"step": 110
},
{
"epoch": 1.3875,
"grad_norm": 0.7016875743865967,
"learning_rate": 0.00015702127659574468,
"loss": 0.4747,
"step": 111
},
{
"epoch": 1.4,
"grad_norm": 0.6807534694671631,
"learning_rate": 0.00015659574468085107,
"loss": 0.477,
"step": 112
},
{
"epoch": 1.4125,
"grad_norm": 0.6434107422828674,
"learning_rate": 0.00015617021276595746,
"loss": 0.4486,
"step": 113
},
{
"epoch": 1.425,
"grad_norm": 0.611808717250824,
"learning_rate": 0.00015574468085106385,
"loss": 0.4184,
"step": 114
},
{
"epoch": 1.4375,
"grad_norm": 0.7139426469802856,
"learning_rate": 0.0001553191489361702,
"loss": 0.5161,
"step": 115
},
{
"epoch": 1.45,
"grad_norm": 0.6485333442687988,
"learning_rate": 0.0001548936170212766,
"loss": 0.4784,
"step": 116
},
{
"epoch": 1.4625,
"grad_norm": 0.7077224254608154,
"learning_rate": 0.00015446808510638299,
"loss": 0.5005,
"step": 117
},
{
"epoch": 1.475,
"grad_norm": 0.7061143517494202,
"learning_rate": 0.00015404255319148937,
"loss": 0.433,
"step": 118
},
{
"epoch": 1.4875,
"grad_norm": 0.6362001895904541,
"learning_rate": 0.00015361702127659576,
"loss": 0.39,
"step": 119
},
{
"epoch": 1.5,
"grad_norm": 0.742213249206543,
"learning_rate": 0.00015319148936170213,
"loss": 0.4926,
"step": 120
},
{
"epoch": 1.5125,
"grad_norm": 0.6973474621772766,
"learning_rate": 0.00015276595744680851,
"loss": 0.4787,
"step": 121
},
{
"epoch": 1.525,
"grad_norm": 0.6767258048057556,
"learning_rate": 0.0001523404255319149,
"loss": 0.3961,
"step": 122
},
{
"epoch": 1.5375,
"grad_norm": 0.8685336112976074,
"learning_rate": 0.0001519148936170213,
"loss": 0.4258,
"step": 123
},
{
"epoch": 1.55,
"grad_norm": 0.8819417357444763,
"learning_rate": 0.00015148936170212765,
"loss": 0.4783,
"step": 124
},
{
"epoch": 1.5625,
"grad_norm": 0.7913158535957336,
"learning_rate": 0.00015106382978723407,
"loss": 0.4467,
"step": 125
},
{
"epoch": 1.575,
"grad_norm": 0.8557113409042358,
"learning_rate": 0.00015063829787234043,
"loss": 0.499,
"step": 126
},
{
"epoch": 1.5875,
"grad_norm": 0.7897730469703674,
"learning_rate": 0.00015021276595744682,
"loss": 0.5141,
"step": 127
},
{
"epoch": 1.6,
"grad_norm": 0.7041512727737427,
"learning_rate": 0.00014978723404255318,
"loss": 0.4209,
"step": 128
},
{
"epoch": 1.6125,
"grad_norm": 0.7383488416671753,
"learning_rate": 0.0001493617021276596,
"loss": 0.4483,
"step": 129
},
{
"epoch": 1.625,
"grad_norm": 0.6808694005012512,
"learning_rate": 0.00014893617021276596,
"loss": 0.4233,
"step": 130
},
{
"epoch": 1.6375,
"grad_norm": 0.6982035636901855,
"learning_rate": 0.00014851063829787235,
"loss": 0.5055,
"step": 131
},
{
"epoch": 1.65,
"grad_norm": 0.7038189768791199,
"learning_rate": 0.0001480851063829787,
"loss": 0.4551,
"step": 132
},
{
"epoch": 1.6625,
"grad_norm": 0.6448100209236145,
"learning_rate": 0.00014765957446808513,
"loss": 0.4172,
"step": 133
},
{
"epoch": 1.675,
"grad_norm": 0.7629950046539307,
"learning_rate": 0.0001472340425531915,
"loss": 0.4794,
"step": 134
},
{
"epoch": 1.6875,
"grad_norm": 0.6743873357772827,
"learning_rate": 0.00014680851063829788,
"loss": 0.4207,
"step": 135
},
{
"epoch": 1.7,
"grad_norm": 0.6978509426116943,
"learning_rate": 0.00014638297872340426,
"loss": 0.4572,
"step": 136
},
{
"epoch": 1.7125,
"grad_norm": 0.7238950133323669,
"learning_rate": 0.00014595744680851065,
"loss": 0.4391,
"step": 137
},
{
"epoch": 1.725,
"grad_norm": 0.7844693660736084,
"learning_rate": 0.00014553191489361704,
"loss": 0.5253,
"step": 138
},
{
"epoch": 1.7375,
"grad_norm": 0.7656735181808472,
"learning_rate": 0.0001451063829787234,
"loss": 0.4311,
"step": 139
},
{
"epoch": 1.75,
"grad_norm": 0.7286013960838318,
"learning_rate": 0.0001446808510638298,
"loss": 0.4573,
"step": 140
},
{
"epoch": 1.7625,
"grad_norm": 0.7938070297241211,
"learning_rate": 0.00014425531914893618,
"loss": 0.5007,
"step": 141
},
{
"epoch": 1.775,
"grad_norm": 0.7464184165000916,
"learning_rate": 0.00014382978723404257,
"loss": 0.4731,
"step": 142
},
{
"epoch": 1.7875,
"grad_norm": 0.7830186486244202,
"learning_rate": 0.00014340425531914893,
"loss": 0.4451,
"step": 143
},
{
"epoch": 1.8,
"grad_norm": 0.6778513193130493,
"learning_rate": 0.00014297872340425532,
"loss": 0.4389,
"step": 144
},
{
"epoch": 1.8125,
"grad_norm": 0.7377083897590637,
"learning_rate": 0.0001425531914893617,
"loss": 0.4734,
"step": 145
},
{
"epoch": 1.825,
"grad_norm": 0.7428882718086243,
"learning_rate": 0.0001421276595744681,
"loss": 0.4741,
"step": 146
},
{
"epoch": 1.8375,
"grad_norm": 0.7576317191123962,
"learning_rate": 0.00014170212765957446,
"loss": 0.4952,
"step": 147
},
{
"epoch": 1.85,
"grad_norm": 0.6553872227668762,
"learning_rate": 0.00014127659574468085,
"loss": 0.4051,
"step": 148
},
{
"epoch": 1.8625,
"grad_norm": 0.7060889005661011,
"learning_rate": 0.00014085106382978724,
"loss": 0.4427,
"step": 149
},
{
"epoch": 1.875,
"grad_norm": 0.7130544185638428,
"learning_rate": 0.00014042553191489363,
"loss": 0.4519,
"step": 150
},
{
"epoch": 1.8875,
"grad_norm": 0.7197202444076538,
"learning_rate": 0.00014,
"loss": 0.4621,
"step": 151
},
{
"epoch": 1.9,
"grad_norm": 0.7964305877685547,
"learning_rate": 0.0001395744680851064,
"loss": 0.5367,
"step": 152
},
{
"epoch": 1.9125,
"grad_norm": 0.7618861794471741,
"learning_rate": 0.00013914893617021277,
"loss": 0.5231,
"step": 153
},
{
"epoch": 1.925,
"grad_norm": 0.6833578944206238,
"learning_rate": 0.00013872340425531915,
"loss": 0.4354,
"step": 154
},
{
"epoch": 1.9375,
"grad_norm": 0.6926935911178589,
"learning_rate": 0.00013829787234042554,
"loss": 0.4595,
"step": 155
},
{
"epoch": 1.95,
"grad_norm": 0.755796492099762,
"learning_rate": 0.00013787234042553193,
"loss": 0.4714,
"step": 156
},
{
"epoch": 1.9625,
"grad_norm": 0.8154721856117249,
"learning_rate": 0.00013744680851063832,
"loss": 0.4514,
"step": 157
},
{
"epoch": 1.975,
"grad_norm": 0.7180888652801514,
"learning_rate": 0.00013702127659574468,
"loss": 0.4487,
"step": 158
},
{
"epoch": 1.9875,
"grad_norm": 0.7182486653327942,
"learning_rate": 0.00013659574468085107,
"loss": 0.4651,
"step": 159
},
{
"epoch": 2.0,
"grad_norm": 0.8878002166748047,
"learning_rate": 0.00013617021276595746,
"loss": 0.4236,
"step": 160
},
{
"epoch": 2.0125,
"grad_norm": 0.6806989908218384,
"learning_rate": 0.00013574468085106385,
"loss": 0.294,
"step": 161
},
{
"epoch": 2.025,
"grad_norm": 0.7871399521827698,
"learning_rate": 0.0001353191489361702,
"loss": 0.2722,
"step": 162
},
{
"epoch": 2.0375,
"grad_norm": 0.615963339805603,
"learning_rate": 0.0001348936170212766,
"loss": 0.2559,
"step": 163
},
{
"epoch": 2.05,
"grad_norm": 0.6783286929130554,
"learning_rate": 0.000134468085106383,
"loss": 0.2697,
"step": 164
},
{
"epoch": 2.0625,
"grad_norm": 0.69609135389328,
"learning_rate": 0.00013404255319148938,
"loss": 0.2952,
"step": 165
},
{
"epoch": 2.075,
"grad_norm": 0.7641659379005432,
"learning_rate": 0.00013361702127659574,
"loss": 0.2784,
"step": 166
},
{
"epoch": 2.0875,
"grad_norm": 0.7802071571350098,
"learning_rate": 0.00013319148936170213,
"loss": 0.268,
"step": 167
},
{
"epoch": 2.1,
"grad_norm": 0.7029564380645752,
"learning_rate": 0.00013276595744680852,
"loss": 0.2239,
"step": 168
},
{
"epoch": 2.1125,
"grad_norm": 0.8360403776168823,
"learning_rate": 0.0001323404255319149,
"loss": 0.2797,
"step": 169
},
{
"epoch": 2.125,
"grad_norm": 1.003860592842102,
"learning_rate": 0.00013191489361702127,
"loss": 0.3147,
"step": 170
},
{
"epoch": 2.1375,
"grad_norm": 0.8554705381393433,
"learning_rate": 0.00013148936170212765,
"loss": 0.2898,
"step": 171
},
{
"epoch": 2.15,
"grad_norm": 0.856831431388855,
"learning_rate": 0.00013106382978723404,
"loss": 0.244,
"step": 172
},
{
"epoch": 2.1625,
"grad_norm": 0.840379536151886,
"learning_rate": 0.00013063829787234043,
"loss": 0.2511,
"step": 173
},
{
"epoch": 2.175,
"grad_norm": 0.7467439770698547,
"learning_rate": 0.00013021276595744682,
"loss": 0.2394,
"step": 174
},
{
"epoch": 2.1875,
"grad_norm": 0.8085935115814209,
"learning_rate": 0.00012978723404255318,
"loss": 0.2612,
"step": 175
},
{
"epoch": 2.2,
"grad_norm": 0.7966387271881104,
"learning_rate": 0.0001293617021276596,
"loss": 0.2646,
"step": 176
},
{
"epoch": 2.2125,
"grad_norm": 0.8461523056030273,
"learning_rate": 0.00012893617021276596,
"loss": 0.2697,
"step": 177
},
{
"epoch": 2.225,
"grad_norm": 0.7941194176673889,
"learning_rate": 0.00012851063829787235,
"loss": 0.2326,
"step": 178
},
{
"epoch": 2.2375,
"grad_norm": 0.7666419744491577,
"learning_rate": 0.00012808510638297874,
"loss": 0.2247,
"step": 179
},
{
"epoch": 2.25,
"grad_norm": 0.788275420665741,
"learning_rate": 0.00012765957446808513,
"loss": 0.311,
"step": 180
},
{
"epoch": 2.2625,
"grad_norm": 0.8255228996276855,
"learning_rate": 0.0001272340425531915,
"loss": 0.2662,
"step": 181
},
{
"epoch": 2.275,
"grad_norm": 0.8195288181304932,
"learning_rate": 0.00012680851063829788,
"loss": 0.2683,
"step": 182
},
{
"epoch": 2.2875,
"grad_norm": 0.7774321436882019,
"learning_rate": 0.00012638297872340427,
"loss": 0.2431,
"step": 183
},
{
"epoch": 2.3,
"grad_norm": 0.8164303302764893,
"learning_rate": 0.00012595744680851065,
"loss": 0.2674,
"step": 184
},
{
"epoch": 2.3125,
"grad_norm": 0.8182231783866882,
"learning_rate": 0.00012553191489361702,
"loss": 0.2922,
"step": 185
},
{
"epoch": 2.325,
"grad_norm": 0.7818589806556702,
"learning_rate": 0.0001251063829787234,
"loss": 0.2558,
"step": 186
},
{
"epoch": 2.3375,
"grad_norm": 0.809130847454071,
"learning_rate": 0.0001246808510638298,
"loss": 0.2387,
"step": 187
},
{
"epoch": 2.35,
"grad_norm": 0.7757312059402466,
"learning_rate": 0.00012425531914893618,
"loss": 0.2685,
"step": 188
},
{
"epoch": 2.3625,
"grad_norm": 0.8028156757354736,
"learning_rate": 0.00012382978723404254,
"loss": 0.2618,
"step": 189
},
{
"epoch": 2.375,
"grad_norm": 0.8730686902999878,
"learning_rate": 0.00012340425531914893,
"loss": 0.2668,
"step": 190
},
{
"epoch": 2.3875,
"grad_norm": 0.8512089252471924,
"learning_rate": 0.00012297872340425535,
"loss": 0.2687,
"step": 191
},
{
"epoch": 2.4,
"grad_norm": 0.8351570963859558,
"learning_rate": 0.0001225531914893617,
"loss": 0.2612,
"step": 192
},
{
"epoch": 2.4,
"eval_loss": 0.7145271301269531,
"eval_runtime": 11.4074,
"eval_samples_per_second": 3.156,
"eval_steps_per_second": 1.578,
"step": 192
},
{
"epoch": 2.4125,
"grad_norm": 0.7363073825836182,
"learning_rate": 0.0001221276595744681,
"loss": 0.2462,
"step": 193
},
{
"epoch": 2.425,
"grad_norm": 0.7738860845565796,
"learning_rate": 0.00012170212765957448,
"loss": 0.2455,
"step": 194
},
{
"epoch": 2.4375,
"grad_norm": 0.7793389558792114,
"learning_rate": 0.00012127659574468086,
"loss": 0.2357,
"step": 195
},
{
"epoch": 2.45,
"grad_norm": 1.0476254224777222,
"learning_rate": 0.00012085106382978724,
"loss": 0.3119,
"step": 196
},
{
"epoch": 2.4625,
"grad_norm": 0.9584843516349792,
"learning_rate": 0.00012042553191489363,
"loss": 0.2931,
"step": 197
},
{
"epoch": 2.475,
"grad_norm": 0.7162908911705017,
"learning_rate": 0.00012,
"loss": 0.2134,
"step": 198
},
{
"epoch": 2.4875,
"grad_norm": 0.7601090669631958,
"learning_rate": 0.00011957446808510639,
"loss": 0.2444,
"step": 199
},
{
"epoch": 2.5,
"grad_norm": 0.7643441557884216,
"learning_rate": 0.00011914893617021277,
"loss": 0.2667,
"step": 200
},
{
"epoch": 2.5125,
"grad_norm": 0.7849262952804565,
"learning_rate": 0.00011872340425531916,
"loss": 0.2629,
"step": 201
},
{
"epoch": 2.525,
"grad_norm": 0.8611546754837036,
"learning_rate": 0.00011829787234042553,
"loss": 0.2728,
"step": 202
},
{
"epoch": 2.5375,
"grad_norm": 0.8595278263092041,
"learning_rate": 0.00011787234042553192,
"loss": 0.2749,
"step": 203
},
{
"epoch": 2.55,
"grad_norm": 0.778231143951416,
"learning_rate": 0.0001174468085106383,
"loss": 0.2779,
"step": 204
},
{
"epoch": 2.5625,
"grad_norm": 0.9688648581504822,
"learning_rate": 0.00011702127659574468,
"loss": 0.3187,
"step": 205
},
{
"epoch": 2.575,
"grad_norm": 0.842782199382782,
"learning_rate": 0.00011659574468085106,
"loss": 0.2624,
"step": 206
},
{
"epoch": 2.5875,
"grad_norm": 0.7811837792396545,
"learning_rate": 0.00011617021276595745,
"loss": 0.2753,
"step": 207
},
{
"epoch": 2.6,
"grad_norm": 0.8029890060424805,
"learning_rate": 0.00011574468085106382,
"loss": 0.2704,
"step": 208
},
{
"epoch": 2.6125,
"grad_norm": 0.8911982178688049,
"learning_rate": 0.00011531914893617021,
"loss": 0.2821,
"step": 209
},
{
"epoch": 2.625,
"grad_norm": 0.7516865730285645,
"learning_rate": 0.00011489361702127661,
"loss": 0.2411,
"step": 210
},
{
"epoch": 2.6375,
"grad_norm": 0.8153129816055298,
"learning_rate": 0.00011446808510638299,
"loss": 0.2939,
"step": 211
},
{
"epoch": 2.65,
"grad_norm": 0.8791115283966064,
"learning_rate": 0.00011404255319148938,
"loss": 0.2528,
"step": 212
},
{
"epoch": 2.6625,
"grad_norm": 0.8871167898178101,
"learning_rate": 0.00011361702127659575,
"loss": 0.28,
"step": 213
},
{
"epoch": 2.675,
"grad_norm": 0.8256612420082092,
"learning_rate": 0.00011319148936170214,
"loss": 0.3028,
"step": 214
},
{
"epoch": 2.6875,
"grad_norm": 0.8403842449188232,
"learning_rate": 0.00011276595744680852,
"loss": 0.2826,
"step": 215
},
{
"epoch": 2.7,
"grad_norm": 0.773061215877533,
"learning_rate": 0.0001123404255319149,
"loss": 0.2667,
"step": 216
},
{
"epoch": 2.7125,
"grad_norm": 0.7826652526855469,
"learning_rate": 0.00011191489361702128,
"loss": 0.249,
"step": 217
},
{
"epoch": 2.725,
"grad_norm": 0.7895304560661316,
"learning_rate": 0.00011148936170212767,
"loss": 0.2549,
"step": 218
},
{
"epoch": 2.7375,
"grad_norm": 0.778076171875,
"learning_rate": 0.00011106382978723405,
"loss": 0.2731,
"step": 219
},
{
"epoch": 2.75,
"grad_norm": 0.8927327990531921,
"learning_rate": 0.00011063829787234043,
"loss": 0.2986,
"step": 220
},
{
"epoch": 2.7625,
"grad_norm": 0.9613041877746582,
"learning_rate": 0.00011021276595744681,
"loss": 0.3228,
"step": 221
},
{
"epoch": 2.775,
"grad_norm": 0.7272498607635498,
"learning_rate": 0.0001097872340425532,
"loss": 0.2297,
"step": 222
},
{
"epoch": 2.7875,
"grad_norm": 0.8113637566566467,
"learning_rate": 0.00010936170212765957,
"loss": 0.2812,
"step": 223
},
{
"epoch": 2.8,
"grad_norm": 0.9033439755439758,
"learning_rate": 0.00010893617021276596,
"loss": 0.3016,
"step": 224
},
{
"epoch": 2.8125,
"grad_norm": 0.7996534109115601,
"learning_rate": 0.00010851063829787234,
"loss": 0.2496,
"step": 225
},
{
"epoch": 2.825,
"grad_norm": 0.7771035432815552,
"learning_rate": 0.00010808510638297873,
"loss": 0.2852,
"step": 226
},
{
"epoch": 2.8375,
"grad_norm": 0.7756218314170837,
"learning_rate": 0.00010765957446808513,
"loss": 0.2703,
"step": 227
},
{
"epoch": 2.85,
"grad_norm": 0.8187890648841858,
"learning_rate": 0.00010723404255319149,
"loss": 0.2855,
"step": 228
},
{
"epoch": 2.8625,
"grad_norm": 0.858629047870636,
"learning_rate": 0.00010680851063829789,
"loss": 0.2628,
"step": 229
},
{
"epoch": 2.875,
"grad_norm": 0.83708655834198,
"learning_rate": 0.00010638297872340425,
"loss": 0.2606,
"step": 230
},
{
"epoch": 2.8875,
"grad_norm": 0.7959954738616943,
"learning_rate": 0.00010595744680851066,
"loss": 0.3034,
"step": 231
},
{
"epoch": 2.9,
"grad_norm": 0.7901068329811096,
"learning_rate": 0.00010553191489361702,
"loss": 0.2259,
"step": 232
},
{
"epoch": 2.9125,
"grad_norm": 0.8610655665397644,
"learning_rate": 0.00010510638297872342,
"loss": 0.2815,
"step": 233
},
{
"epoch": 2.925,
"grad_norm": 0.8414681553840637,
"learning_rate": 0.00010468085106382978,
"loss": 0.2969,
"step": 234
},
{
"epoch": 2.9375,
"grad_norm": 0.8410956263542175,
"learning_rate": 0.00010425531914893618,
"loss": 0.2742,
"step": 235
},
{
"epoch": 2.95,
"grad_norm": 0.8325995802879333,
"learning_rate": 0.00010382978723404255,
"loss": 0.2653,
"step": 236
},
{
"epoch": 2.9625,
"grad_norm": 0.7669159173965454,
"learning_rate": 0.00010340425531914895,
"loss": 0.2579,
"step": 237
},
{
"epoch": 2.975,
"grad_norm": 0.7911613583564758,
"learning_rate": 0.00010297872340425532,
"loss": 0.3053,
"step": 238
},
{
"epoch": 2.9875,
"grad_norm": 0.7572367787361145,
"learning_rate": 0.00010255319148936171,
"loss": 0.2515,
"step": 239
},
{
"epoch": 3.0,
"grad_norm": 1.0301828384399414,
"learning_rate": 0.00010212765957446809,
"loss": 0.2728,
"step": 240
},
{
"epoch": 3.0125,
"grad_norm": 0.6737964153289795,
"learning_rate": 0.00010170212765957448,
"loss": 0.1407,
"step": 241
},
{
"epoch": 3.025,
"grad_norm": 0.7623804807662964,
"learning_rate": 0.00010127659574468085,
"loss": 0.1287,
"step": 242
},
{
"epoch": 3.0375,
"grad_norm": 0.7205766439437866,
"learning_rate": 0.00010085106382978724,
"loss": 0.1547,
"step": 243
},
{
"epoch": 3.05,
"grad_norm": 0.905816912651062,
"learning_rate": 0.00010042553191489362,
"loss": 0.1846,
"step": 244
},
{
"epoch": 3.0625,
"grad_norm": 0.7594558596611023,
"learning_rate": 0.0001,
"loss": 0.1563,
"step": 245
},
{
"epoch": 3.075,
"grad_norm": 0.6924287676811218,
"learning_rate": 9.95744680851064e-05,
"loss": 0.1445,
"step": 246
},
{
"epoch": 3.0875,
"grad_norm": 0.6288279891014099,
"learning_rate": 9.914893617021277e-05,
"loss": 0.1235,
"step": 247
},
{
"epoch": 3.1,
"grad_norm": 0.6929881572723389,
"learning_rate": 9.872340425531916e-05,
"loss": 0.1379,
"step": 248
},
{
"epoch": 3.1125,
"grad_norm": 0.9299332499504089,
"learning_rate": 9.829787234042553e-05,
"loss": 0.1521,
"step": 249
},
{
"epoch": 3.125,
"grad_norm": 0.9375365376472473,
"learning_rate": 9.787234042553192e-05,
"loss": 0.145,
"step": 250
},
{
"epoch": 3.1375,
"grad_norm": 0.8231996893882751,
"learning_rate": 9.74468085106383e-05,
"loss": 0.1228,
"step": 251
},
{
"epoch": 3.15,
"grad_norm": 0.8826015591621399,
"learning_rate": 9.702127659574469e-05,
"loss": 0.1487,
"step": 252
},
{
"epoch": 3.1625,
"grad_norm": 0.8321478962898254,
"learning_rate": 9.659574468085106e-05,
"loss": 0.1346,
"step": 253
},
{
"epoch": 3.175,
"grad_norm": 1.043582558631897,
"learning_rate": 9.617021276595745e-05,
"loss": 0.1587,
"step": 254
},
{
"epoch": 3.1875,
"grad_norm": 0.8663302063941956,
"learning_rate": 9.574468085106384e-05,
"loss": 0.1253,
"step": 255
},
{
"epoch": 3.2,
"grad_norm": 0.7811707854270935,
"learning_rate": 9.531914893617023e-05,
"loss": 0.1427,
"step": 256
},
{
"epoch": 3.2125,
"grad_norm": 0.8035542964935303,
"learning_rate": 9.48936170212766e-05,
"loss": 0.1374,
"step": 257
},
{
"epoch": 3.225,
"grad_norm": 0.8229654431343079,
"learning_rate": 9.446808510638299e-05,
"loss": 0.124,
"step": 258
},
{
"epoch": 3.2375,
"grad_norm": 0.7131748199462891,
"learning_rate": 9.404255319148937e-05,
"loss": 0.1197,
"step": 259
},
{
"epoch": 3.25,
"grad_norm": 0.7480083703994751,
"learning_rate": 9.361702127659576e-05,
"loss": 0.1336,
"step": 260
},
{
"epoch": 3.2625,
"grad_norm": 0.7220382690429688,
"learning_rate": 9.319148936170213e-05,
"loss": 0.128,
"step": 261
},
{
"epoch": 3.275,
"grad_norm": 0.8449149131774902,
"learning_rate": 9.276595744680852e-05,
"loss": 0.1661,
"step": 262
},
{
"epoch": 3.2875,
"grad_norm": 0.6896558403968811,
"learning_rate": 9.23404255319149e-05,
"loss": 0.1231,
"step": 263
},
{
"epoch": 3.3,
"grad_norm": 0.7313699126243591,
"learning_rate": 9.191489361702128e-05,
"loss": 0.1532,
"step": 264
},
{
"epoch": 3.3125,
"grad_norm": 0.8504534959793091,
"learning_rate": 9.148936170212766e-05,
"loss": 0.1447,
"step": 265
},
{
"epoch": 3.325,
"grad_norm": 0.8592658638954163,
"learning_rate": 9.106382978723405e-05,
"loss": 0.1429,
"step": 266
},
{
"epoch": 3.3375,
"grad_norm": 0.7510311007499695,
"learning_rate": 9.063829787234042e-05,
"loss": 0.1191,
"step": 267
},
{
"epoch": 3.35,
"grad_norm": 0.772495687007904,
"learning_rate": 9.021276595744681e-05,
"loss": 0.1525,
"step": 268
},
{
"epoch": 3.3625,
"grad_norm": 0.8163837790489197,
"learning_rate": 8.978723404255319e-05,
"loss": 0.1463,
"step": 269
},
{
"epoch": 3.375,
"grad_norm": 0.8092824816703796,
"learning_rate": 8.936170212765958e-05,
"loss": 0.1373,
"step": 270
},
{
"epoch": 3.3875,
"grad_norm": 0.8845824003219604,
"learning_rate": 8.893617021276595e-05,
"loss": 0.1389,
"step": 271
},
{
"epoch": 3.4,
"grad_norm": 0.8480741381645203,
"learning_rate": 8.851063829787234e-05,
"loss": 0.1481,
"step": 272
},
{
"epoch": 3.4125,
"grad_norm": 0.727211594581604,
"learning_rate": 8.808510638297873e-05,
"loss": 0.1319,
"step": 273
},
{
"epoch": 3.425,
"grad_norm": 0.8201771378517151,
"learning_rate": 8.765957446808512e-05,
"loss": 0.1471,
"step": 274
},
{
"epoch": 3.4375,
"grad_norm": 0.8638917803764343,
"learning_rate": 8.723404255319149e-05,
"loss": 0.1558,
"step": 275
},
{
"epoch": 3.45,
"grad_norm": 0.9420880079269409,
"learning_rate": 8.680851063829788e-05,
"loss": 0.1336,
"step": 276
},
{
"epoch": 3.4625,
"grad_norm": 0.827743411064148,
"learning_rate": 8.638297872340426e-05,
"loss": 0.1351,
"step": 277
},
{
"epoch": 3.475,
"grad_norm": 0.9593076705932617,
"learning_rate": 8.595744680851064e-05,
"loss": 0.1355,
"step": 278
},
{
"epoch": 3.4875,
"grad_norm": 0.8220702409744263,
"learning_rate": 8.553191489361702e-05,
"loss": 0.1358,
"step": 279
},
{
"epoch": 3.5,
"grad_norm": 0.977692186832428,
"learning_rate": 8.510638297872341e-05,
"loss": 0.1694,
"step": 280
},
{
"epoch": 3.5125,
"grad_norm": 0.7905257344245911,
"learning_rate": 8.46808510638298e-05,
"loss": 0.1275,
"step": 281
},
{
"epoch": 3.525,
"grad_norm": 0.8010748624801636,
"learning_rate": 8.425531914893617e-05,
"loss": 0.1375,
"step": 282
},
{
"epoch": 3.5375,
"grad_norm": 0.7461961507797241,
"learning_rate": 8.382978723404256e-05,
"loss": 0.1371,
"step": 283
},
{
"epoch": 3.55,
"grad_norm": 0.8024889826774597,
"learning_rate": 8.340425531914894e-05,
"loss": 0.1368,
"step": 284
},
{
"epoch": 3.5625,
"grad_norm": 0.8584401607513428,
"learning_rate": 8.297872340425533e-05,
"loss": 0.1492,
"step": 285
},
{
"epoch": 3.575,
"grad_norm": 0.8518531918525696,
"learning_rate": 8.25531914893617e-05,
"loss": 0.1533,
"step": 286
},
{
"epoch": 3.5875,
"grad_norm": 0.8000539541244507,
"learning_rate": 8.212765957446809e-05,
"loss": 0.154,
"step": 287
},
{
"epoch": 3.6,
"grad_norm": 0.9574905037879944,
"learning_rate": 8.170212765957446e-05,
"loss": 0.1689,
"step": 288
},
{
"epoch": 3.6,
"eval_loss": 0.8238211870193481,
"eval_runtime": 10.9659,
"eval_samples_per_second": 3.283,
"eval_steps_per_second": 1.641,
"step": 288
},
{
"epoch": 3.6125,
"grad_norm": 0.8474534749984741,
"learning_rate": 8.127659574468085e-05,
"loss": 0.1524,
"step": 289
},
{
"epoch": 3.625,
"grad_norm": 0.7077217698097229,
"learning_rate": 8.085106382978723e-05,
"loss": 0.1369,
"step": 290
},
{
"epoch": 3.6375,
"grad_norm": 0.8012158274650574,
"learning_rate": 8.042553191489363e-05,
"loss": 0.1261,
"step": 291
},
{
"epoch": 3.65,
"grad_norm": 0.7327490448951721,
"learning_rate": 8e-05,
"loss": 0.1338,
"step": 292
},
{
"epoch": 3.6625,
"grad_norm": 0.7753359079360962,
"learning_rate": 7.95744680851064e-05,
"loss": 0.1439,
"step": 293
},
{
"epoch": 3.675,
"grad_norm": 0.8457762598991394,
"learning_rate": 7.914893617021277e-05,
"loss": 0.1343,
"step": 294
},
{
"epoch": 3.6875,
"grad_norm": 0.8116350173950195,
"learning_rate": 7.872340425531916e-05,
"loss": 0.1514,
"step": 295
},
{
"epoch": 3.7,
"grad_norm": 0.8146523833274841,
"learning_rate": 7.829787234042553e-05,
"loss": 0.1485,
"step": 296
},
{
"epoch": 3.7125,
"grad_norm": 0.6425414085388184,
"learning_rate": 7.787234042553192e-05,
"loss": 0.0959,
"step": 297
},
{
"epoch": 3.725,
"grad_norm": 0.9455553293228149,
"learning_rate": 7.74468085106383e-05,
"loss": 0.1597,
"step": 298
},
{
"epoch": 3.7375,
"grad_norm": 0.8597568273544312,
"learning_rate": 7.702127659574469e-05,
"loss": 0.1614,
"step": 299
},
{
"epoch": 3.75,
"grad_norm": 0.8073698878288269,
"learning_rate": 7.659574468085106e-05,
"loss": 0.1353,
"step": 300
},
{
"epoch": 3.7625,
"grad_norm": 0.7673254609107971,
"learning_rate": 7.617021276595745e-05,
"loss": 0.1188,
"step": 301
},
{
"epoch": 3.775,
"grad_norm": 0.8758047819137573,
"learning_rate": 7.574468085106383e-05,
"loss": 0.1839,
"step": 302
},
{
"epoch": 3.7875,
"grad_norm": 0.9272107481956482,
"learning_rate": 7.531914893617022e-05,
"loss": 0.1497,
"step": 303
},
{
"epoch": 3.8,
"grad_norm": 0.7681931257247925,
"learning_rate": 7.489361702127659e-05,
"loss": 0.1469,
"step": 304
},
{
"epoch": 3.8125,
"grad_norm": 0.8682704567909241,
"learning_rate": 7.446808510638298e-05,
"loss": 0.1618,
"step": 305
},
{
"epoch": 3.825,
"grad_norm": 1.0033032894134521,
"learning_rate": 7.404255319148935e-05,
"loss": 0.1621,
"step": 306
},
{
"epoch": 3.8375,
"grad_norm": 1.0055568218231201,
"learning_rate": 7.361702127659574e-05,
"loss": 0.1735,
"step": 307
},
{
"epoch": 3.85,
"grad_norm": 0.7715242505073547,
"learning_rate": 7.319148936170213e-05,
"loss": 0.1466,
"step": 308
},
{
"epoch": 3.8625,
"grad_norm": 0.8541414141654968,
"learning_rate": 7.276595744680852e-05,
"loss": 0.1411,
"step": 309
},
{
"epoch": 3.875,
"grad_norm": 0.7181360721588135,
"learning_rate": 7.23404255319149e-05,
"loss": 0.1391,
"step": 310
},
{
"epoch": 3.8875,
"grad_norm": 0.7194767594337463,
"learning_rate": 7.191489361702129e-05,
"loss": 0.1379,
"step": 311
},
{
"epoch": 3.9,
"grad_norm": 0.7853153347969055,
"learning_rate": 7.148936170212766e-05,
"loss": 0.1468,
"step": 312
},
{
"epoch": 3.9125,
"grad_norm": 0.7511010766029358,
"learning_rate": 7.106382978723405e-05,
"loss": 0.1377,
"step": 313
},
{
"epoch": 3.925,
"grad_norm": 0.7181860208511353,
"learning_rate": 7.063829787234042e-05,
"loss": 0.1389,
"step": 314
},
{
"epoch": 3.9375,
"grad_norm": 0.7916765809059143,
"learning_rate": 7.021276595744681e-05,
"loss": 0.1527,
"step": 315
},
{
"epoch": 3.95,
"grad_norm": 0.6324968338012695,
"learning_rate": 6.97872340425532e-05,
"loss": 0.1062,
"step": 316
},
{
"epoch": 3.9625,
"grad_norm": 0.8582820296287537,
"learning_rate": 6.936170212765958e-05,
"loss": 0.1788,
"step": 317
},
{
"epoch": 3.975,
"grad_norm": 0.7167566418647766,
"learning_rate": 6.893617021276597e-05,
"loss": 0.1376,
"step": 318
},
{
"epoch": 3.9875,
"grad_norm": 0.8698075413703918,
"learning_rate": 6.851063829787234e-05,
"loss": 0.1651,
"step": 319
},
{
"epoch": 4.0,
"grad_norm": 0.817995548248291,
"learning_rate": 6.808510638297873e-05,
"loss": 0.1072,
"step": 320
},
{
"epoch": 4.0125,
"grad_norm": 0.6073539853096008,
"learning_rate": 6.76595744680851e-05,
"loss": 0.0881,
"step": 321
},
{
"epoch": 4.025,
"grad_norm": 0.6550564765930176,
"learning_rate": 6.72340425531915e-05,
"loss": 0.1131,
"step": 322
},
{
"epoch": 4.0375,
"grad_norm": 0.6034475564956665,
"learning_rate": 6.680851063829787e-05,
"loss": 0.0758,
"step": 323
},
{
"epoch": 4.05,
"grad_norm": 0.81227707862854,
"learning_rate": 6.638297872340426e-05,
"loss": 0.0951,
"step": 324
},
{
"epoch": 4.0625,
"grad_norm": 0.5546196699142456,
"learning_rate": 6.595744680851063e-05,
"loss": 0.0759,
"step": 325
},
{
"epoch": 4.075,
"grad_norm": 0.5332831740379333,
"learning_rate": 6.553191489361702e-05,
"loss": 0.0696,
"step": 326
},
{
"epoch": 4.0875,
"grad_norm": 0.7316128611564636,
"learning_rate": 6.510638297872341e-05,
"loss": 0.0938,
"step": 327
},
{
"epoch": 4.1,
"grad_norm": 0.6615329384803772,
"learning_rate": 6.46808510638298e-05,
"loss": 0.0805,
"step": 328
},
{
"epoch": 4.1125,
"grad_norm": 0.6748345494270325,
"learning_rate": 6.425531914893617e-05,
"loss": 0.0708,
"step": 329
},
{
"epoch": 4.125,
"grad_norm": 0.692646861076355,
"learning_rate": 6.382978723404256e-05,
"loss": 0.0782,
"step": 330
},
{
"epoch": 4.1375,
"grad_norm": 0.7430183291435242,
"learning_rate": 6.340425531914894e-05,
"loss": 0.0723,
"step": 331
},
{
"epoch": 4.15,
"grad_norm": 0.6600062251091003,
"learning_rate": 6.297872340425533e-05,
"loss": 0.0686,
"step": 332
},
{
"epoch": 4.1625,
"grad_norm": 0.8680083155632019,
"learning_rate": 6.25531914893617e-05,
"loss": 0.0822,
"step": 333
},
{
"epoch": 4.175,
"grad_norm": 0.7598772644996643,
"learning_rate": 6.212765957446809e-05,
"loss": 0.083,
"step": 334
},
{
"epoch": 4.1875,
"grad_norm": 0.769776463508606,
"learning_rate": 6.170212765957447e-05,
"loss": 0.076,
"step": 335
},
{
"epoch": 4.2,
"grad_norm": 0.6409958600997925,
"learning_rate": 6.127659574468086e-05,
"loss": 0.0712,
"step": 336
},
{
"epoch": 4.2125,
"grad_norm": 0.8126680254936218,
"learning_rate": 6.085106382978724e-05,
"loss": 0.0767,
"step": 337
},
{
"epoch": 4.225,
"grad_norm": 0.7009455561637878,
"learning_rate": 6.042553191489362e-05,
"loss": 0.0633,
"step": 338
},
{
"epoch": 4.2375,
"grad_norm": 0.5959800481796265,
"learning_rate": 6e-05,
"loss": 0.0606,
"step": 339
},
{
"epoch": 4.25,
"grad_norm": 0.5804210901260376,
"learning_rate": 5.9574468085106384e-05,
"loss": 0.0601,
"step": 340
},
{
"epoch": 4.2625,
"grad_norm": 0.6084879040718079,
"learning_rate": 5.9148936170212766e-05,
"loss": 0.0693,
"step": 341
},
{
"epoch": 4.275,
"grad_norm": 0.6810378432273865,
"learning_rate": 5.872340425531915e-05,
"loss": 0.0656,
"step": 342
},
{
"epoch": 4.2875,
"grad_norm": 0.6785478591918945,
"learning_rate": 5.829787234042553e-05,
"loss": 0.0681,
"step": 343
},
{
"epoch": 4.3,
"grad_norm": 0.6475040912628174,
"learning_rate": 5.787234042553191e-05,
"loss": 0.0731,
"step": 344
},
{
"epoch": 4.3125,
"grad_norm": 0.6373022198677063,
"learning_rate": 5.744680851063831e-05,
"loss": 0.0707,
"step": 345
},
{
"epoch": 4.325,
"grad_norm": 0.6542766094207764,
"learning_rate": 5.702127659574469e-05,
"loss": 0.0564,
"step": 346
},
{
"epoch": 4.3375,
"grad_norm": 0.6114728450775146,
"learning_rate": 5.659574468085107e-05,
"loss": 0.0674,
"step": 347
},
{
"epoch": 4.35,
"grad_norm": 0.6630443930625916,
"learning_rate": 5.617021276595745e-05,
"loss": 0.0614,
"step": 348
},
{
"epoch": 4.3625,
"grad_norm": 0.6126697659492493,
"learning_rate": 5.5744680851063835e-05,
"loss": 0.0594,
"step": 349
},
{
"epoch": 4.375,
"grad_norm": 0.5768252015113831,
"learning_rate": 5.531914893617022e-05,
"loss": 0.0569,
"step": 350
},
{
"epoch": 4.3875,
"grad_norm": 0.7498059272766113,
"learning_rate": 5.48936170212766e-05,
"loss": 0.0777,
"step": 351
},
{
"epoch": 4.4,
"grad_norm": 0.6269453167915344,
"learning_rate": 5.446808510638298e-05,
"loss": 0.0613,
"step": 352
},
{
"epoch": 4.4125,
"grad_norm": 0.6231211423873901,
"learning_rate": 5.404255319148936e-05,
"loss": 0.0738,
"step": 353
},
{
"epoch": 4.425,
"grad_norm": 0.7814548015594482,
"learning_rate": 5.3617021276595745e-05,
"loss": 0.0687,
"step": 354
},
{
"epoch": 4.4375,
"grad_norm": 0.8020561337471008,
"learning_rate": 5.319148936170213e-05,
"loss": 0.0848,
"step": 355
},
{
"epoch": 4.45,
"grad_norm": 0.6911760568618774,
"learning_rate": 5.276595744680851e-05,
"loss": 0.0741,
"step": 356
},
{
"epoch": 4.4625,
"grad_norm": 0.7701168060302734,
"learning_rate": 5.234042553191489e-05,
"loss": 0.0679,
"step": 357
},
{
"epoch": 4.475,
"grad_norm": 0.6995530128479004,
"learning_rate": 5.191489361702127e-05,
"loss": 0.0628,
"step": 358
},
{
"epoch": 4.4875,
"grad_norm": 0.6969619393348694,
"learning_rate": 5.148936170212766e-05,
"loss": 0.0701,
"step": 359
},
{
"epoch": 4.5,
"grad_norm": 0.6292584538459778,
"learning_rate": 5.1063829787234044e-05,
"loss": 0.0651,
"step": 360
},
{
"epoch": 4.5125,
"grad_norm": 0.7294819951057434,
"learning_rate": 5.0638297872340426e-05,
"loss": 0.0698,
"step": 361
},
{
"epoch": 4.525,
"grad_norm": 0.6159396171569824,
"learning_rate": 5.021276595744681e-05,
"loss": 0.0602,
"step": 362
},
{
"epoch": 4.5375,
"grad_norm": 0.7271333336830139,
"learning_rate": 4.97872340425532e-05,
"loss": 0.0715,
"step": 363
},
{
"epoch": 4.55,
"grad_norm": 0.9358235597610474,
"learning_rate": 4.936170212765958e-05,
"loss": 0.0888,
"step": 364
},
{
"epoch": 4.5625,
"grad_norm": 0.6776322722434998,
"learning_rate": 4.893617021276596e-05,
"loss": 0.066,
"step": 365
},
{
"epoch": 4.575,
"grad_norm": 0.8353095054626465,
"learning_rate": 4.851063829787234e-05,
"loss": 0.0925,
"step": 366
},
{
"epoch": 4.5875,
"grad_norm": 0.6650261878967285,
"learning_rate": 4.8085106382978725e-05,
"loss": 0.0637,
"step": 367
},
{
"epoch": 4.6,
"grad_norm": 0.7034966945648193,
"learning_rate": 4.7659574468085114e-05,
"loss": 0.0744,
"step": 368
},
{
"epoch": 4.6125,
"grad_norm": 0.6829352378845215,
"learning_rate": 4.7234042553191496e-05,
"loss": 0.0695,
"step": 369
},
{
"epoch": 4.625,
"grad_norm": 0.5907257795333862,
"learning_rate": 4.680851063829788e-05,
"loss": 0.0625,
"step": 370
},
{
"epoch": 4.6375,
"grad_norm": 0.6723539233207703,
"learning_rate": 4.638297872340426e-05,
"loss": 0.0721,
"step": 371
},
{
"epoch": 4.65,
"grad_norm": 0.7118985056877136,
"learning_rate": 4.595744680851064e-05,
"loss": 0.0807,
"step": 372
},
{
"epoch": 4.6625,
"grad_norm": 0.5988577008247375,
"learning_rate": 4.5531914893617024e-05,
"loss": 0.065,
"step": 373
},
{
"epoch": 4.675,
"grad_norm": 0.6269676685333252,
"learning_rate": 4.5106382978723406e-05,
"loss": 0.0702,
"step": 374
},
{
"epoch": 4.6875,
"grad_norm": 0.7419471740722656,
"learning_rate": 4.468085106382979e-05,
"loss": 0.0938,
"step": 375
},
{
"epoch": 4.7,
"grad_norm": 0.5768789052963257,
"learning_rate": 4.425531914893617e-05,
"loss": 0.0585,
"step": 376
},
{
"epoch": 4.7125,
"grad_norm": 0.6381940245628357,
"learning_rate": 4.382978723404256e-05,
"loss": 0.0634,
"step": 377
},
{
"epoch": 4.725,
"grad_norm": 0.6856435537338257,
"learning_rate": 4.340425531914894e-05,
"loss": 0.076,
"step": 378
},
{
"epoch": 4.7375,
"grad_norm": 0.6991595029830933,
"learning_rate": 4.297872340425532e-05,
"loss": 0.0828,
"step": 379
},
{
"epoch": 4.75,
"grad_norm": 0.7512986660003662,
"learning_rate": 4.2553191489361704e-05,
"loss": 0.0748,
"step": 380
},
{
"epoch": 4.7625,
"grad_norm": 0.6096968650817871,
"learning_rate": 4.2127659574468086e-05,
"loss": 0.0692,
"step": 381
},
{
"epoch": 4.775,
"grad_norm": 0.674705982208252,
"learning_rate": 4.170212765957447e-05,
"loss": 0.0788,
"step": 382
},
{
"epoch": 4.7875,
"grad_norm": 0.6924496293067932,
"learning_rate": 4.127659574468085e-05,
"loss": 0.0731,
"step": 383
},
{
"epoch": 4.8,
"grad_norm": 0.6793739199638367,
"learning_rate": 4.085106382978723e-05,
"loss": 0.0697,
"step": 384
},
{
"epoch": 4.8,
"eval_loss": 1.009806513786316,
"eval_runtime": 11.392,
"eval_samples_per_second": 3.16,
"eval_steps_per_second": 1.58,
"step": 384
},
{
"epoch": 4.8125,
"grad_norm": 0.45708170533180237,
"learning_rate": 4.0425531914893614e-05,
"loss": 0.0499,
"step": 385
},
{
"epoch": 4.825,
"grad_norm": 0.7347174286842346,
"learning_rate": 4e-05,
"loss": 0.072,
"step": 386
},
{
"epoch": 4.8375,
"grad_norm": 0.7631716728210449,
"learning_rate": 3.9574468085106385e-05,
"loss": 0.0781,
"step": 387
},
{
"epoch": 4.85,
"grad_norm": 0.7159767150878906,
"learning_rate": 3.914893617021277e-05,
"loss": 0.0761,
"step": 388
},
{
"epoch": 4.8625,
"grad_norm": 0.7675968408584595,
"learning_rate": 3.872340425531915e-05,
"loss": 0.0649,
"step": 389
},
{
"epoch": 4.875,
"grad_norm": 0.6752840876579285,
"learning_rate": 3.829787234042553e-05,
"loss": 0.0674,
"step": 390
},
{
"epoch": 4.8875,
"grad_norm": 0.6436358690261841,
"learning_rate": 3.787234042553191e-05,
"loss": 0.0616,
"step": 391
},
{
"epoch": 4.9,
"grad_norm": 0.7673559188842773,
"learning_rate": 3.7446808510638295e-05,
"loss": 0.0803,
"step": 392
},
{
"epoch": 4.9125,
"grad_norm": 0.6778103709220886,
"learning_rate": 3.702127659574468e-05,
"loss": 0.0727,
"step": 393
},
{
"epoch": 4.925,
"grad_norm": 0.8372657895088196,
"learning_rate": 3.6595744680851066e-05,
"loss": 0.074,
"step": 394
},
{
"epoch": 4.9375,
"grad_norm": 0.6369761824607849,
"learning_rate": 3.617021276595745e-05,
"loss": 0.0669,
"step": 395
},
{
"epoch": 4.95,
"grad_norm": 0.6647788882255554,
"learning_rate": 3.574468085106383e-05,
"loss": 0.053,
"step": 396
},
{
"epoch": 4.9625,
"grad_norm": 0.7610755562782288,
"learning_rate": 3.531914893617021e-05,
"loss": 0.0668,
"step": 397
},
{
"epoch": 4.975,
"grad_norm": 0.6888034343719482,
"learning_rate": 3.48936170212766e-05,
"loss": 0.0708,
"step": 398
},
{
"epoch": 4.9875,
"grad_norm": 0.8322062492370605,
"learning_rate": 3.446808510638298e-05,
"loss": 0.067,
"step": 399
},
{
"epoch": 5.0,
"grad_norm": 0.7463421821594238,
"learning_rate": 3.4042553191489365e-05,
"loss": 0.0568,
"step": 400
},
{
"epoch": 5.0125,
"grad_norm": 0.3784998953342438,
"learning_rate": 3.361702127659575e-05,
"loss": 0.0376,
"step": 401
},
{
"epoch": 5.025,
"grad_norm": 0.39330971240997314,
"learning_rate": 3.319148936170213e-05,
"loss": 0.0367,
"step": 402
},
{
"epoch": 5.0375,
"grad_norm": 0.3745648264884949,
"learning_rate": 3.276595744680851e-05,
"loss": 0.0363,
"step": 403
},
{
"epoch": 5.05,
"grad_norm": 0.44352462887763977,
"learning_rate": 3.23404255319149e-05,
"loss": 0.0347,
"step": 404
},
{
"epoch": 5.0625,
"grad_norm": 0.4444413483142853,
"learning_rate": 3.191489361702128e-05,
"loss": 0.0437,
"step": 405
},
{
"epoch": 5.075,
"grad_norm": 0.4831590950489044,
"learning_rate": 3.1489361702127664e-05,
"loss": 0.0428,
"step": 406
},
{
"epoch": 5.0875,
"grad_norm": 0.4673588275909424,
"learning_rate": 3.1063829787234046e-05,
"loss": 0.0355,
"step": 407
},
{
"epoch": 5.1,
"grad_norm": 0.3534888029098511,
"learning_rate": 3.063829787234043e-05,
"loss": 0.0334,
"step": 408
},
{
"epoch": 5.1125,
"grad_norm": 0.4960513114929199,
"learning_rate": 3.021276595744681e-05,
"loss": 0.0483,
"step": 409
},
{
"epoch": 5.125,
"grad_norm": 0.46593356132507324,
"learning_rate": 2.9787234042553192e-05,
"loss": 0.0419,
"step": 410
},
{
"epoch": 5.1375,
"grad_norm": 0.5315698385238647,
"learning_rate": 2.9361702127659574e-05,
"loss": 0.043,
"step": 411
},
{
"epoch": 5.15,
"grad_norm": 0.5316590070724487,
"learning_rate": 2.8936170212765956e-05,
"loss": 0.0478,
"step": 412
},
{
"epoch": 5.1625,
"grad_norm": 0.454860657453537,
"learning_rate": 2.8510638297872345e-05,
"loss": 0.0427,
"step": 413
},
{
"epoch": 5.175,
"grad_norm": 0.5423195958137512,
"learning_rate": 2.8085106382978727e-05,
"loss": 0.041,
"step": 414
},
{
"epoch": 5.1875,
"grad_norm": 0.4226936995983124,
"learning_rate": 2.765957446808511e-05,
"loss": 0.0351,
"step": 415
},
{
"epoch": 5.2,
"grad_norm": 0.3787650465965271,
"learning_rate": 2.723404255319149e-05,
"loss": 0.0349,
"step": 416
},
{
"epoch": 5.2125,
"grad_norm": 0.45735734701156616,
"learning_rate": 2.6808510638297873e-05,
"loss": 0.0383,
"step": 417
},
{
"epoch": 5.225,
"grad_norm": 0.4093031585216522,
"learning_rate": 2.6382978723404255e-05,
"loss": 0.0314,
"step": 418
},
{
"epoch": 5.2375,
"grad_norm": 0.345889151096344,
"learning_rate": 2.5957446808510637e-05,
"loss": 0.0306,
"step": 419
},
{
"epoch": 5.25,
"grad_norm": 0.37512144446372986,
"learning_rate": 2.5531914893617022e-05,
"loss": 0.0289,
"step": 420
},
{
"epoch": 5.2625,
"grad_norm": 0.5944772362709045,
"learning_rate": 2.5106382978723404e-05,
"loss": 0.0458,
"step": 421
},
{
"epoch": 5.275,
"grad_norm": 0.6373673677444458,
"learning_rate": 2.468085106382979e-05,
"loss": 0.0473,
"step": 422
},
{
"epoch": 5.2875,
"grad_norm": 0.4604647159576416,
"learning_rate": 2.425531914893617e-05,
"loss": 0.0476,
"step": 423
},
{
"epoch": 5.3,
"grad_norm": 0.467212975025177,
"learning_rate": 2.3829787234042557e-05,
"loss": 0.0311,
"step": 424
},
{
"epoch": 5.3125,
"grad_norm": 0.5458898544311523,
"learning_rate": 2.340425531914894e-05,
"loss": 0.0383,
"step": 425
},
{
"epoch": 5.325,
"grad_norm": 0.39436912536621094,
"learning_rate": 2.297872340425532e-05,
"loss": 0.0333,
"step": 426
},
{
"epoch": 5.3375,
"grad_norm": 0.5241003036499023,
"learning_rate": 2.2553191489361703e-05,
"loss": 0.0365,
"step": 427
},
{
"epoch": 5.35,
"grad_norm": 0.46175524592399597,
"learning_rate": 2.2127659574468085e-05,
"loss": 0.0371,
"step": 428
},
{
"epoch": 5.3625,
"grad_norm": 0.4485141932964325,
"learning_rate": 2.170212765957447e-05,
"loss": 0.0362,
"step": 429
},
{
"epoch": 5.375,
"grad_norm": 0.4773487448692322,
"learning_rate": 2.1276595744680852e-05,
"loss": 0.0396,
"step": 430
},
{
"epoch": 5.3875,
"grad_norm": 0.6281043291091919,
"learning_rate": 2.0851063829787234e-05,
"loss": 0.0438,
"step": 431
},
{
"epoch": 5.4,
"grad_norm": 0.5021329522132874,
"learning_rate": 2.0425531914893616e-05,
"loss": 0.0392,
"step": 432
},
{
"epoch": 5.4125,
"grad_norm": 0.4514084458351135,
"learning_rate": 2e-05,
"loss": 0.0346,
"step": 433
},
{
"epoch": 5.425,
"grad_norm": 0.4999784231185913,
"learning_rate": 1.9574468085106384e-05,
"loss": 0.0405,
"step": 434
},
{
"epoch": 5.4375,
"grad_norm": 0.45210784673690796,
"learning_rate": 1.9148936170212766e-05,
"loss": 0.0309,
"step": 435
},
{
"epoch": 5.45,
"grad_norm": 0.8242072463035583,
"learning_rate": 1.8723404255319148e-05,
"loss": 0.0428,
"step": 436
},
{
"epoch": 5.4625,
"grad_norm": 0.45749950408935547,
"learning_rate": 1.8297872340425533e-05,
"loss": 0.0337,
"step": 437
},
{
"epoch": 5.475,
"grad_norm": 0.4928417503833771,
"learning_rate": 1.7872340425531915e-05,
"loss": 0.0294,
"step": 438
},
{
"epoch": 5.4875,
"grad_norm": 0.6273995637893677,
"learning_rate": 1.74468085106383e-05,
"loss": 0.0353,
"step": 439
},
{
"epoch": 5.5,
"grad_norm": 0.4409819543361664,
"learning_rate": 1.7021276595744682e-05,
"loss": 0.0374,
"step": 440
},
{
"epoch": 5.5125,
"grad_norm": 0.5446107387542725,
"learning_rate": 1.6595744680851064e-05,
"loss": 0.0395,
"step": 441
},
{
"epoch": 5.525,
"grad_norm": 0.5066544413566589,
"learning_rate": 1.617021276595745e-05,
"loss": 0.0362,
"step": 442
},
{
"epoch": 5.5375,
"grad_norm": 0.4936687648296356,
"learning_rate": 1.5744680851063832e-05,
"loss": 0.0348,
"step": 443
},
{
"epoch": 5.55,
"grad_norm": 0.5274161100387573,
"learning_rate": 1.5319148936170214e-05,
"loss": 0.0445,
"step": 444
},
{
"epoch": 5.5625,
"grad_norm": 0.3984338045120239,
"learning_rate": 1.4893617021276596e-05,
"loss": 0.0297,
"step": 445
},
{
"epoch": 5.575,
"grad_norm": 0.3740543723106384,
"learning_rate": 1.4468085106382978e-05,
"loss": 0.0298,
"step": 446
},
{
"epoch": 5.5875,
"grad_norm": 0.44997498393058777,
"learning_rate": 1.4042553191489363e-05,
"loss": 0.0343,
"step": 447
},
{
"epoch": 5.6,
"grad_norm": 0.553840696811676,
"learning_rate": 1.3617021276595745e-05,
"loss": 0.0392,
"step": 448
},
{
"epoch": 5.6125,
"grad_norm": 0.47161081433296204,
"learning_rate": 1.3191489361702127e-05,
"loss": 0.0355,
"step": 449
},
{
"epoch": 5.625,
"grad_norm": 0.4401087760925293,
"learning_rate": 1.2765957446808511e-05,
"loss": 0.033,
"step": 450
},
{
"epoch": 5.6375,
"grad_norm": 0.4902607798576355,
"learning_rate": 1.2340425531914895e-05,
"loss": 0.0401,
"step": 451
},
{
"epoch": 5.65,
"grad_norm": 0.5491804480552673,
"learning_rate": 1.1914893617021278e-05,
"loss": 0.0372,
"step": 452
},
{
"epoch": 5.6625,
"grad_norm": 0.5546887516975403,
"learning_rate": 1.148936170212766e-05,
"loss": 0.0353,
"step": 453
},
{
"epoch": 5.675,
"grad_norm": 0.42722412943840027,
"learning_rate": 1.1063829787234042e-05,
"loss": 0.0311,
"step": 454
},
{
"epoch": 5.6875,
"grad_norm": 0.4589029550552368,
"learning_rate": 1.0638297872340426e-05,
"loss": 0.0342,
"step": 455
},
{
"epoch": 5.7,
"grad_norm": 0.3946620523929596,
"learning_rate": 1.0212765957446808e-05,
"loss": 0.0307,
"step": 456
},
{
"epoch": 5.7125,
"grad_norm": 0.35820481181144714,
"learning_rate": 9.787234042553192e-06,
"loss": 0.0268,
"step": 457
},
{
"epoch": 5.725,
"grad_norm": 0.5740081667900085,
"learning_rate": 9.361702127659574e-06,
"loss": 0.0331,
"step": 458
},
{
"epoch": 5.7375,
"grad_norm": 0.6305598616600037,
"learning_rate": 8.936170212765958e-06,
"loss": 0.04,
"step": 459
},
{
"epoch": 5.75,
"grad_norm": 0.7847849130630493,
"learning_rate": 8.510638297872341e-06,
"loss": 0.0408,
"step": 460
},
{
"epoch": 5.7625,
"grad_norm": 0.4860388934612274,
"learning_rate": 8.085106382978725e-06,
"loss": 0.0397,
"step": 461
},
{
"epoch": 5.775,
"grad_norm": 0.33737698197364807,
"learning_rate": 7.659574468085107e-06,
"loss": 0.032,
"step": 462
},
{
"epoch": 5.7875,
"grad_norm": 0.4484858214855194,
"learning_rate": 7.234042553191489e-06,
"loss": 0.0309,
"step": 463
},
{
"epoch": 5.8,
"grad_norm": 0.628653883934021,
"learning_rate": 6.808510638297873e-06,
"loss": 0.0331,
"step": 464
},
{
"epoch": 5.8125,
"grad_norm": 0.45020169019699097,
"learning_rate": 6.3829787234042555e-06,
"loss": 0.033,
"step": 465
},
{
"epoch": 5.825,
"grad_norm": 0.3453785181045532,
"learning_rate": 5.957446808510639e-06,
"loss": 0.0331,
"step": 466
},
{
"epoch": 5.8375,
"grad_norm": 0.3796781897544861,
"learning_rate": 5.531914893617021e-06,
"loss": 0.0347,
"step": 467
},
{
"epoch": 5.85,
"grad_norm": 0.4639665186405182,
"learning_rate": 5.106382978723404e-06,
"loss": 0.0355,
"step": 468
},
{
"epoch": 5.8625,
"grad_norm": 0.6302652359008789,
"learning_rate": 4.680851063829787e-06,
"loss": 0.0417,
"step": 469
},
{
"epoch": 5.875,
"grad_norm": 0.3809727728366852,
"learning_rate": 4.255319148936171e-06,
"loss": 0.029,
"step": 470
},
{
"epoch": 5.8875,
"grad_norm": 0.4321640133857727,
"learning_rate": 3.8297872340425535e-06,
"loss": 0.0304,
"step": 471
},
{
"epoch": 5.9,
"grad_norm": 0.48058974742889404,
"learning_rate": 3.4042553191489363e-06,
"loss": 0.0379,
"step": 472
},
{
"epoch": 5.9125,
"grad_norm": 0.37713518738746643,
"learning_rate": 2.9787234042553196e-06,
"loss": 0.033,
"step": 473
},
{
"epoch": 5.925,
"grad_norm": 0.5107573866844177,
"learning_rate": 2.553191489361702e-06,
"loss": 0.036,
"step": 474
},
{
"epoch": 5.9375,
"grad_norm": 0.35142090916633606,
"learning_rate": 2.1276595744680853e-06,
"loss": 0.0273,
"step": 475
},
{
"epoch": 5.95,
"grad_norm": 0.3913898766040802,
"learning_rate": 1.7021276595744682e-06,
"loss": 0.0314,
"step": 476
},
{
"epoch": 5.9625,
"grad_norm": 0.5400916337966919,
"learning_rate": 1.276595744680851e-06,
"loss": 0.0372,
"step": 477
},
{
"epoch": 5.975,
"grad_norm": 0.4400492012500763,
"learning_rate": 8.510638297872341e-07,
"loss": 0.0345,
"step": 478
},
{
"epoch": 5.9875,
"grad_norm": 0.44733723998069763,
"learning_rate": 4.2553191489361704e-07,
"loss": 0.0308,
"step": 479
},
{
"epoch": 6.0,
"grad_norm": 0.42864102125167847,
"learning_rate": 0.0,
"loss": 0.0305,
"step": 480
},
{
"epoch": 6.0,
"eval_loss": 1.1705684661865234,
"eval_runtime": 11.4475,
"eval_samples_per_second": 3.145,
"eval_steps_per_second": 1.572,
"step": 480
}
],
"logging_steps": 1,
"max_steps": 480,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.41482731320361e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}