llama2_inst_truth_model / trainer_state.json
Ogamon's picture
Initial commit
cb26ffe verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.951768488745981,
"eval_steps": 500,
"global_step": 385,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.012861736334405145,
"grad_norm": 258.1509094238281,
"learning_rate": 8.333333333333335e-09,
"loss": 8.1083,
"num_input_tokens_seen": 16544,
"step": 1
},
{
"epoch": 0.02572347266881029,
"grad_norm": 266.15087890625,
"learning_rate": 1.666666666666667e-08,
"loss": 8.1052,
"num_input_tokens_seen": 33248,
"step": 2
},
{
"epoch": 0.03858520900321544,
"grad_norm": 276.69952392578125,
"learning_rate": 2.5000000000000002e-08,
"loss": 8.1366,
"num_input_tokens_seen": 49760,
"step": 3
},
{
"epoch": 0.05144694533762058,
"grad_norm": 268.43804931640625,
"learning_rate": 3.333333333333334e-08,
"loss": 8.0413,
"num_input_tokens_seen": 65824,
"step": 4
},
{
"epoch": 0.06430868167202572,
"grad_norm": 269.6364440917969,
"learning_rate": 4.166666666666667e-08,
"loss": 8.1055,
"num_input_tokens_seen": 82560,
"step": 5
},
{
"epoch": 0.07717041800643087,
"grad_norm": 274.33538818359375,
"learning_rate": 5.0000000000000004e-08,
"loss": 7.9666,
"num_input_tokens_seen": 99808,
"step": 6
},
{
"epoch": 0.09003215434083602,
"grad_norm": 266.3240051269531,
"learning_rate": 5.833333333333334e-08,
"loss": 8.0182,
"num_input_tokens_seen": 115808,
"step": 7
},
{
"epoch": 0.10289389067524116,
"grad_norm": 274.253662109375,
"learning_rate": 6.666666666666668e-08,
"loss": 8.0363,
"num_input_tokens_seen": 131392,
"step": 8
},
{
"epoch": 0.1157556270096463,
"grad_norm": 263.321533203125,
"learning_rate": 7.500000000000001e-08,
"loss": 8.0918,
"num_input_tokens_seen": 147840,
"step": 9
},
{
"epoch": 0.12861736334405144,
"grad_norm": 266.48394775390625,
"learning_rate": 8.333333333333334e-08,
"loss": 8.0197,
"num_input_tokens_seen": 164064,
"step": 10
},
{
"epoch": 0.1414790996784566,
"grad_norm": 258.7409362792969,
"learning_rate": 9.166666666666668e-08,
"loss": 7.9714,
"num_input_tokens_seen": 180704,
"step": 11
},
{
"epoch": 0.15434083601286175,
"grad_norm": 271.2587585449219,
"learning_rate": 1.0000000000000001e-07,
"loss": 8.0131,
"num_input_tokens_seen": 196064,
"step": 12
},
{
"epoch": 0.16720257234726688,
"grad_norm": 255.18431091308594,
"learning_rate": 1.0833333333333335e-07,
"loss": 7.9796,
"num_input_tokens_seen": 211424,
"step": 13
},
{
"epoch": 0.18006430868167203,
"grad_norm": 269.7242736816406,
"learning_rate": 1.1666666666666668e-07,
"loss": 7.9999,
"num_input_tokens_seen": 228096,
"step": 14
},
{
"epoch": 0.19292604501607716,
"grad_norm": 264.5142517089844,
"learning_rate": 1.2500000000000002e-07,
"loss": 8.0168,
"num_input_tokens_seen": 244416,
"step": 15
},
{
"epoch": 0.2057877813504823,
"grad_norm": 267.4490661621094,
"learning_rate": 1.3333333333333336e-07,
"loss": 7.8423,
"num_input_tokens_seen": 261056,
"step": 16
},
{
"epoch": 0.21864951768488747,
"grad_norm": 269.4408874511719,
"learning_rate": 1.4166666666666668e-07,
"loss": 7.9208,
"num_input_tokens_seen": 276992,
"step": 17
},
{
"epoch": 0.2315112540192926,
"grad_norm": 256.512939453125,
"learning_rate": 1.5000000000000002e-07,
"loss": 7.6898,
"num_input_tokens_seen": 293536,
"step": 18
},
{
"epoch": 0.24437299035369775,
"grad_norm": 265.34490966796875,
"learning_rate": 1.5833333333333336e-07,
"loss": 7.9285,
"num_input_tokens_seen": 309024,
"step": 19
},
{
"epoch": 0.2572347266881029,
"grad_norm": 268.9532470703125,
"learning_rate": 1.6666666666666668e-07,
"loss": 7.8924,
"num_input_tokens_seen": 325024,
"step": 20
},
{
"epoch": 0.27009646302250806,
"grad_norm": 268.1192321777344,
"learning_rate": 1.7500000000000002e-07,
"loss": 7.6173,
"num_input_tokens_seen": 341856,
"step": 21
},
{
"epoch": 0.2829581993569132,
"grad_norm": 271.13006591796875,
"learning_rate": 1.8333333333333336e-07,
"loss": 7.7661,
"num_input_tokens_seen": 358848,
"step": 22
},
{
"epoch": 0.2958199356913183,
"grad_norm": 260.5618591308594,
"learning_rate": 1.9166666666666668e-07,
"loss": 7.2936,
"num_input_tokens_seen": 375360,
"step": 23
},
{
"epoch": 0.3086816720257235,
"grad_norm": 249.86729431152344,
"learning_rate": 2.0000000000000002e-07,
"loss": 7.3044,
"num_input_tokens_seen": 392608,
"step": 24
},
{
"epoch": 0.3215434083601286,
"grad_norm": 257.8570861816406,
"learning_rate": 2.0833333333333333e-07,
"loss": 7.35,
"num_input_tokens_seen": 408800,
"step": 25
},
{
"epoch": 0.33440514469453375,
"grad_norm": 246.40611267089844,
"learning_rate": 2.166666666666667e-07,
"loss": 7.1213,
"num_input_tokens_seen": 425632,
"step": 26
},
{
"epoch": 0.34726688102893893,
"grad_norm": 252.2755889892578,
"learning_rate": 2.2500000000000002e-07,
"loss": 7.1363,
"num_input_tokens_seen": 441952,
"step": 27
},
{
"epoch": 0.36012861736334406,
"grad_norm": 235.94859313964844,
"learning_rate": 2.3333333333333336e-07,
"loss": 6.9016,
"num_input_tokens_seen": 458080,
"step": 28
},
{
"epoch": 0.3729903536977492,
"grad_norm": 243.25863647460938,
"learning_rate": 2.416666666666667e-07,
"loss": 6.9764,
"num_input_tokens_seen": 473824,
"step": 29
},
{
"epoch": 0.3858520900321543,
"grad_norm": 250.39564514160156,
"learning_rate": 2.5000000000000004e-07,
"loss": 6.9005,
"num_input_tokens_seen": 490336,
"step": 30
},
{
"epoch": 0.3987138263665595,
"grad_norm": 253.6007843017578,
"learning_rate": 2.5833333333333333e-07,
"loss": 6.9852,
"num_input_tokens_seen": 506976,
"step": 31
},
{
"epoch": 0.4115755627009646,
"grad_norm": 155.81201171875,
"learning_rate": 2.666666666666667e-07,
"loss": 5.834,
"num_input_tokens_seen": 523520,
"step": 32
},
{
"epoch": 0.42443729903536975,
"grad_norm": 124.55789947509766,
"learning_rate": 2.75e-07,
"loss": 5.5452,
"num_input_tokens_seen": 540672,
"step": 33
},
{
"epoch": 0.43729903536977494,
"grad_norm": 130.3427276611328,
"learning_rate": 2.8333333333333336e-07,
"loss": 5.5189,
"num_input_tokens_seen": 557696,
"step": 34
},
{
"epoch": 0.45016077170418006,
"grad_norm": 126.62003326416016,
"learning_rate": 2.916666666666667e-07,
"loss": 5.4654,
"num_input_tokens_seen": 574400,
"step": 35
},
{
"epoch": 0.4630225080385852,
"grad_norm": 120.25413513183594,
"learning_rate": 3.0000000000000004e-07,
"loss": 5.3117,
"num_input_tokens_seen": 590112,
"step": 36
},
{
"epoch": 0.4758842443729904,
"grad_norm": 123.52056884765625,
"learning_rate": 3.083333333333334e-07,
"loss": 5.2744,
"num_input_tokens_seen": 605920,
"step": 37
},
{
"epoch": 0.4887459807073955,
"grad_norm": 124.25894927978516,
"learning_rate": 3.166666666666667e-07,
"loss": 5.2821,
"num_input_tokens_seen": 622208,
"step": 38
},
{
"epoch": 0.5016077170418006,
"grad_norm": 111.38367462158203,
"learning_rate": 3.25e-07,
"loss": 5.0967,
"num_input_tokens_seen": 638464,
"step": 39
},
{
"epoch": 0.5144694533762058,
"grad_norm": 119.48200225830078,
"learning_rate": 3.3333333333333335e-07,
"loss": 5.2167,
"num_input_tokens_seen": 654656,
"step": 40
},
{
"epoch": 0.5273311897106109,
"grad_norm": 108.62528991699219,
"learning_rate": 3.416666666666667e-07,
"loss": 5.1517,
"num_input_tokens_seen": 671424,
"step": 41
},
{
"epoch": 0.5401929260450161,
"grad_norm": 109.66475677490234,
"learning_rate": 3.5000000000000004e-07,
"loss": 4.6959,
"num_input_tokens_seen": 687968,
"step": 42
},
{
"epoch": 0.5530546623794212,
"grad_norm": 107.45171356201172,
"learning_rate": 3.583333333333334e-07,
"loss": 4.1458,
"num_input_tokens_seen": 704224,
"step": 43
},
{
"epoch": 0.5659163987138264,
"grad_norm": 102.30545806884766,
"learning_rate": 3.666666666666667e-07,
"loss": 3.6692,
"num_input_tokens_seen": 720704,
"step": 44
},
{
"epoch": 0.5787781350482315,
"grad_norm": 96.8830795288086,
"learning_rate": 3.75e-07,
"loss": 3.3322,
"num_input_tokens_seen": 737408,
"step": 45
},
{
"epoch": 0.5916398713826366,
"grad_norm": 102.91023254394531,
"learning_rate": 3.8333333333333335e-07,
"loss": 3.2857,
"num_input_tokens_seen": 753728,
"step": 46
},
{
"epoch": 0.6045016077170418,
"grad_norm": 105.37537384033203,
"learning_rate": 3.9166666666666675e-07,
"loss": 3.0806,
"num_input_tokens_seen": 770080,
"step": 47
},
{
"epoch": 0.617363344051447,
"grad_norm": 98.45983123779297,
"learning_rate": 4.0000000000000003e-07,
"loss": 2.8239,
"num_input_tokens_seen": 786912,
"step": 48
},
{
"epoch": 0.6302250803858521,
"grad_norm": 108.93148040771484,
"learning_rate": 4.083333333333334e-07,
"loss": 2.8695,
"num_input_tokens_seen": 804192,
"step": 49
},
{
"epoch": 0.6430868167202572,
"grad_norm": 100.31180572509766,
"learning_rate": 4.1666666666666667e-07,
"loss": 2.6218,
"num_input_tokens_seen": 820928,
"step": 50
},
{
"epoch": 0.6559485530546624,
"grad_norm": 101.31784057617188,
"learning_rate": 4.2500000000000006e-07,
"loss": 2.3993,
"num_input_tokens_seen": 837152,
"step": 51
},
{
"epoch": 0.6688102893890675,
"grad_norm": 101.09466552734375,
"learning_rate": 4.333333333333334e-07,
"loss": 2.2828,
"num_input_tokens_seen": 853568,
"step": 52
},
{
"epoch": 0.6816720257234726,
"grad_norm": 99.55025482177734,
"learning_rate": 4.416666666666667e-07,
"loss": 2.2063,
"num_input_tokens_seen": 870592,
"step": 53
},
{
"epoch": 0.6945337620578779,
"grad_norm": 104.6786117553711,
"learning_rate": 4.5000000000000003e-07,
"loss": 1.8878,
"num_input_tokens_seen": 886080,
"step": 54
},
{
"epoch": 0.707395498392283,
"grad_norm": 106.05968475341797,
"learning_rate": 4.583333333333333e-07,
"loss": 1.5971,
"num_input_tokens_seen": 903296,
"step": 55
},
{
"epoch": 0.7202572347266881,
"grad_norm": 93.43476867675781,
"learning_rate": 4.666666666666667e-07,
"loss": 0.9604,
"num_input_tokens_seen": 919296,
"step": 56
},
{
"epoch": 0.7331189710610932,
"grad_norm": 60.13694763183594,
"learning_rate": 4.7500000000000006e-07,
"loss": 0.6287,
"num_input_tokens_seen": 936096,
"step": 57
},
{
"epoch": 0.7459807073954984,
"grad_norm": 42.870086669921875,
"learning_rate": 4.833333333333334e-07,
"loss": 0.4844,
"num_input_tokens_seen": 953024,
"step": 58
},
{
"epoch": 0.7588424437299035,
"grad_norm": 45.984989166259766,
"learning_rate": 4.916666666666667e-07,
"loss": 0.3891,
"num_input_tokens_seen": 969024,
"step": 59
},
{
"epoch": 0.7717041800643086,
"grad_norm": 44.375484466552734,
"learning_rate": 5.000000000000001e-07,
"loss": 0.357,
"num_input_tokens_seen": 985600,
"step": 60
},
{
"epoch": 0.7845659163987139,
"grad_norm": 28.2038516998291,
"learning_rate": 5.083333333333334e-07,
"loss": 0.2854,
"num_input_tokens_seen": 1002336,
"step": 61
},
{
"epoch": 0.797427652733119,
"grad_norm": 13.842344284057617,
"learning_rate": 5.166666666666667e-07,
"loss": 0.2403,
"num_input_tokens_seen": 1018496,
"step": 62
},
{
"epoch": 0.8102893890675241,
"grad_norm": 21.351642608642578,
"learning_rate": 5.250000000000001e-07,
"loss": 0.2522,
"num_input_tokens_seen": 1035552,
"step": 63
},
{
"epoch": 0.8231511254019293,
"grad_norm": 21.38069725036621,
"learning_rate": 5.333333333333335e-07,
"loss": 0.2339,
"num_input_tokens_seen": 1051584,
"step": 64
},
{
"epoch": 0.8360128617363344,
"grad_norm": 9.553906440734863,
"learning_rate": 5.416666666666667e-07,
"loss": 0.2214,
"num_input_tokens_seen": 1068640,
"step": 65
},
{
"epoch": 0.8488745980707395,
"grad_norm": 6.100700855255127,
"learning_rate": 5.5e-07,
"loss": 0.1912,
"num_input_tokens_seen": 1085568,
"step": 66
},
{
"epoch": 0.8617363344051447,
"grad_norm": 18.959564208984375,
"learning_rate": 5.583333333333333e-07,
"loss": 0.2157,
"num_input_tokens_seen": 1102784,
"step": 67
},
{
"epoch": 0.8745980707395499,
"grad_norm": 16.303438186645508,
"learning_rate": 5.666666666666667e-07,
"loss": 0.2045,
"num_input_tokens_seen": 1118432,
"step": 68
},
{
"epoch": 0.887459807073955,
"grad_norm": 12.491047859191895,
"learning_rate": 5.750000000000001e-07,
"loss": 0.1964,
"num_input_tokens_seen": 1135232,
"step": 69
},
{
"epoch": 0.9003215434083601,
"grad_norm": 16.655309677124023,
"learning_rate": 5.833333333333334e-07,
"loss": 0.1901,
"num_input_tokens_seen": 1150496,
"step": 70
},
{
"epoch": 0.9131832797427653,
"grad_norm": 12.015859603881836,
"learning_rate": 5.916666666666667e-07,
"loss": 0.1891,
"num_input_tokens_seen": 1166848,
"step": 71
},
{
"epoch": 0.9260450160771704,
"grad_norm": 12.255152702331543,
"learning_rate": 6.000000000000001e-07,
"loss": 0.1841,
"num_input_tokens_seen": 1183552,
"step": 72
},
{
"epoch": 0.9389067524115756,
"grad_norm": 5.163099765777588,
"learning_rate": 6.083333333333334e-07,
"loss": 0.1803,
"num_input_tokens_seen": 1199968,
"step": 73
},
{
"epoch": 0.9517684887459807,
"grad_norm": 10.502095222473145,
"learning_rate": 6.166666666666668e-07,
"loss": 0.1657,
"num_input_tokens_seen": 1216256,
"step": 74
},
{
"epoch": 0.9646302250803859,
"grad_norm": 23.80043601989746,
"learning_rate": 6.25e-07,
"loss": 0.2157,
"num_input_tokens_seen": 1232736,
"step": 75
},
{
"epoch": 0.977491961414791,
"grad_norm": 19.7410831451416,
"learning_rate": 6.333333333333334e-07,
"loss": 0.2,
"num_input_tokens_seen": 1249248,
"step": 76
},
{
"epoch": 0.9903536977491961,
"grad_norm": 4.730998516082764,
"learning_rate": 6.416666666666667e-07,
"loss": 0.1519,
"num_input_tokens_seen": 1265152,
"step": 77
},
{
"epoch": 1.0032154340836013,
"grad_norm": 14.700318336486816,
"learning_rate": 6.5e-07,
"loss": 0.1765,
"num_input_tokens_seen": 1282336,
"step": 78
},
{
"epoch": 1.0160771704180065,
"grad_norm": 18.6468448638916,
"learning_rate": 6.583333333333333e-07,
"loss": 0.1907,
"num_input_tokens_seen": 1299168,
"step": 79
},
{
"epoch": 1.0289389067524115,
"grad_norm": 11.44071102142334,
"learning_rate": 6.666666666666667e-07,
"loss": 0.1593,
"num_input_tokens_seen": 1314816,
"step": 80
},
{
"epoch": 1.0418006430868167,
"grad_norm": 3.6438238620758057,
"learning_rate": 6.750000000000001e-07,
"loss": 0.1659,
"num_input_tokens_seen": 1331168,
"step": 81
},
{
"epoch": 1.0546623794212218,
"grad_norm": 3.6471991539001465,
"learning_rate": 6.833333333333334e-07,
"loss": 0.1619,
"num_input_tokens_seen": 1347968,
"step": 82
},
{
"epoch": 1.067524115755627,
"grad_norm": 4.136232376098633,
"learning_rate": 6.916666666666668e-07,
"loss": 0.168,
"num_input_tokens_seen": 1363872,
"step": 83
},
{
"epoch": 1.0803858520900322,
"grad_norm": 4.436996936798096,
"learning_rate": 7.000000000000001e-07,
"loss": 0.1627,
"num_input_tokens_seen": 1380896,
"step": 84
},
{
"epoch": 1.0932475884244373,
"grad_norm": 10.516170501708984,
"learning_rate": 7.083333333333334e-07,
"loss": 0.1421,
"num_input_tokens_seen": 1397792,
"step": 85
},
{
"epoch": 1.1061093247588425,
"grad_norm": 15.584237098693848,
"learning_rate": 7.166666666666668e-07,
"loss": 0.1517,
"num_input_tokens_seen": 1414272,
"step": 86
},
{
"epoch": 1.1189710610932475,
"grad_norm": 8.475227355957031,
"learning_rate": 7.25e-07,
"loss": 0.1519,
"num_input_tokens_seen": 1429696,
"step": 87
},
{
"epoch": 1.1318327974276527,
"grad_norm": 5.768808841705322,
"learning_rate": 7.333333333333334e-07,
"loss": 0.128,
"num_input_tokens_seen": 1446720,
"step": 88
},
{
"epoch": 1.144694533762058,
"grad_norm": 15.064901351928711,
"learning_rate": 7.416666666666668e-07,
"loss": 0.1481,
"num_input_tokens_seen": 1463008,
"step": 89
},
{
"epoch": 1.157556270096463,
"grad_norm": 17.70471954345703,
"learning_rate": 7.5e-07,
"loss": 0.1636,
"num_input_tokens_seen": 1480096,
"step": 90
},
{
"epoch": 1.1704180064308682,
"grad_norm": 5.037917613983154,
"learning_rate": 7.583333333333334e-07,
"loss": 0.1443,
"num_input_tokens_seen": 1497024,
"step": 91
},
{
"epoch": 1.1832797427652733,
"grad_norm": 17.397666931152344,
"learning_rate": 7.666666666666667e-07,
"loss": 0.1592,
"num_input_tokens_seen": 1513792,
"step": 92
},
{
"epoch": 1.1961414790996785,
"grad_norm": 18.808683395385742,
"learning_rate": 7.750000000000001e-07,
"loss": 0.1744,
"num_input_tokens_seen": 1529696,
"step": 93
},
{
"epoch": 1.2090032154340835,
"grad_norm": 15.173974990844727,
"learning_rate": 7.833333333333335e-07,
"loss": 0.1615,
"num_input_tokens_seen": 1546400,
"step": 94
},
{
"epoch": 1.2218649517684887,
"grad_norm": 3.6459403038024902,
"learning_rate": 7.916666666666667e-07,
"loss": 0.1466,
"num_input_tokens_seen": 1564448,
"step": 95
},
{
"epoch": 1.234726688102894,
"grad_norm": 4.910373210906982,
"learning_rate": 8.000000000000001e-07,
"loss": 0.1333,
"num_input_tokens_seen": 1580736,
"step": 96
},
{
"epoch": 1.247588424437299,
"grad_norm": 11.399758338928223,
"learning_rate": 8.083333333333334e-07,
"loss": 0.1475,
"num_input_tokens_seen": 1596928,
"step": 97
},
{
"epoch": 1.2604501607717042,
"grad_norm": 4.701439380645752,
"learning_rate": 8.166666666666668e-07,
"loss": 0.142,
"num_input_tokens_seen": 1613728,
"step": 98
},
{
"epoch": 1.2733118971061093,
"grad_norm": 7.250323295593262,
"learning_rate": 8.250000000000001e-07,
"loss": 0.148,
"num_input_tokens_seen": 1630112,
"step": 99
},
{
"epoch": 1.2861736334405145,
"grad_norm": 17.24268341064453,
"learning_rate": 8.333333333333333e-07,
"loss": 0.1539,
"num_input_tokens_seen": 1645248,
"step": 100
},
{
"epoch": 1.2990353697749195,
"grad_norm": 4.017528057098389,
"learning_rate": 8.416666666666667e-07,
"loss": 0.1496,
"num_input_tokens_seen": 1662400,
"step": 101
},
{
"epoch": 1.3118971061093248,
"grad_norm": 10.226727485656738,
"learning_rate": 8.500000000000001e-07,
"loss": 0.1563,
"num_input_tokens_seen": 1678528,
"step": 102
},
{
"epoch": 1.32475884244373,
"grad_norm": 4.085242748260498,
"learning_rate": 8.583333333333334e-07,
"loss": 0.1295,
"num_input_tokens_seen": 1694432,
"step": 103
},
{
"epoch": 1.337620578778135,
"grad_norm": 9.983370780944824,
"learning_rate": 8.666666666666668e-07,
"loss": 0.1353,
"num_input_tokens_seen": 1710624,
"step": 104
},
{
"epoch": 1.3504823151125402,
"grad_norm": 6.53292179107666,
"learning_rate": 8.75e-07,
"loss": 0.1376,
"num_input_tokens_seen": 1726560,
"step": 105
},
{
"epoch": 1.3633440514469453,
"grad_norm": 4.412125587463379,
"learning_rate": 8.833333333333334e-07,
"loss": 0.1313,
"num_input_tokens_seen": 1742912,
"step": 106
},
{
"epoch": 1.3762057877813505,
"grad_norm": 13.620604515075684,
"learning_rate": 8.916666666666668e-07,
"loss": 0.1367,
"num_input_tokens_seen": 1758688,
"step": 107
},
{
"epoch": 1.3890675241157555,
"grad_norm": 6.362453937530518,
"learning_rate": 9.000000000000001e-07,
"loss": 0.135,
"num_input_tokens_seen": 1775264,
"step": 108
},
{
"epoch": 1.4019292604501608,
"grad_norm": 3.8915514945983887,
"learning_rate": 9.083333333333335e-07,
"loss": 0.1212,
"num_input_tokens_seen": 1791904,
"step": 109
},
{
"epoch": 1.414790996784566,
"grad_norm": 9.137332916259766,
"learning_rate": 9.166666666666666e-07,
"loss": 0.1355,
"num_input_tokens_seen": 1808736,
"step": 110
},
{
"epoch": 1.427652733118971,
"grad_norm": 4.745510578155518,
"learning_rate": 9.25e-07,
"loss": 0.1439,
"num_input_tokens_seen": 1825504,
"step": 111
},
{
"epoch": 1.4405144694533762,
"grad_norm": 10.531625747680664,
"learning_rate": 9.333333333333334e-07,
"loss": 0.1323,
"num_input_tokens_seen": 1842080,
"step": 112
},
{
"epoch": 1.4533762057877815,
"grad_norm": 11.189078330993652,
"learning_rate": 9.416666666666667e-07,
"loss": 0.1376,
"num_input_tokens_seen": 1858432,
"step": 113
},
{
"epoch": 1.4662379421221865,
"grad_norm": 5.619433879852295,
"learning_rate": 9.500000000000001e-07,
"loss": 0.1191,
"num_input_tokens_seen": 1874304,
"step": 114
},
{
"epoch": 1.4790996784565915,
"grad_norm": 4.590907096862793,
"learning_rate": 9.583333333333334e-07,
"loss": 0.1045,
"num_input_tokens_seen": 1890144,
"step": 115
},
{
"epoch": 1.4919614147909968,
"grad_norm": 4.900210380554199,
"learning_rate": 9.666666666666668e-07,
"loss": 0.1467,
"num_input_tokens_seen": 1905664,
"step": 116
},
{
"epoch": 1.504823151125402,
"grad_norm": 3.0244414806365967,
"learning_rate": 9.750000000000002e-07,
"loss": 0.1142,
"num_input_tokens_seen": 1922240,
"step": 117
},
{
"epoch": 1.517684887459807,
"grad_norm": 2.886939287185669,
"learning_rate": 9.833333333333334e-07,
"loss": 0.1107,
"num_input_tokens_seen": 1938240,
"step": 118
},
{
"epoch": 1.5305466237942122,
"grad_norm": 6.869494438171387,
"learning_rate": 9.916666666666668e-07,
"loss": 0.146,
"num_input_tokens_seen": 1955008,
"step": 119
},
{
"epoch": 1.5434083601286175,
"grad_norm": 16.34566879272461,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.1533,
"num_input_tokens_seen": 1971456,
"step": 120
},
{
"epoch": 1.5562700964630225,
"grad_norm": 15.347967147827148,
"learning_rate": 1.0083333333333333e-06,
"loss": 0.1315,
"num_input_tokens_seen": 1987552,
"step": 121
},
{
"epoch": 1.5691318327974275,
"grad_norm": 4.213266372680664,
"learning_rate": 1.0166666666666667e-06,
"loss": 0.1197,
"num_input_tokens_seen": 2004192,
"step": 122
},
{
"epoch": 1.5819935691318328,
"grad_norm": 14.208905220031738,
"learning_rate": 1.025e-06,
"loss": 0.1228,
"num_input_tokens_seen": 2020832,
"step": 123
},
{
"epoch": 1.594855305466238,
"grad_norm": 17.388845443725586,
"learning_rate": 1.0333333333333333e-06,
"loss": 0.1273,
"num_input_tokens_seen": 2037312,
"step": 124
},
{
"epoch": 1.607717041800643,
"grad_norm": 14.383207321166992,
"learning_rate": 1.0416666666666667e-06,
"loss": 0.141,
"num_input_tokens_seen": 2054304,
"step": 125
},
{
"epoch": 1.6205787781350482,
"grad_norm": 6.2650370597839355,
"learning_rate": 1.0500000000000001e-06,
"loss": 0.1315,
"num_input_tokens_seen": 2071136,
"step": 126
},
{
"epoch": 1.6334405144694535,
"grad_norm": 8.396490097045898,
"learning_rate": 1.0583333333333335e-06,
"loss": 0.1136,
"num_input_tokens_seen": 2088064,
"step": 127
},
{
"epoch": 1.6463022508038585,
"grad_norm": 4.598783016204834,
"learning_rate": 1.066666666666667e-06,
"loss": 0.1013,
"num_input_tokens_seen": 2105120,
"step": 128
},
{
"epoch": 1.6591639871382635,
"grad_norm": 9.912994384765625,
"learning_rate": 1.075e-06,
"loss": 0.1056,
"num_input_tokens_seen": 2120992,
"step": 129
},
{
"epoch": 1.6720257234726688,
"grad_norm": 7.70595645904541,
"learning_rate": 1.0833333333333335e-06,
"loss": 0.1071,
"num_input_tokens_seen": 2137984,
"step": 130
},
{
"epoch": 1.684887459807074,
"grad_norm": 6.183793544769287,
"learning_rate": 1.0916666666666667e-06,
"loss": 0.1357,
"num_input_tokens_seen": 2154240,
"step": 131
},
{
"epoch": 1.697749196141479,
"grad_norm": 6.863802909851074,
"learning_rate": 1.1e-06,
"loss": 0.1181,
"num_input_tokens_seen": 2170464,
"step": 132
},
{
"epoch": 1.7106109324758842,
"grad_norm": 2.7540361881256104,
"learning_rate": 1.1083333333333335e-06,
"loss": 0.0826,
"num_input_tokens_seen": 2187008,
"step": 133
},
{
"epoch": 1.7234726688102895,
"grad_norm": 12.97872257232666,
"learning_rate": 1.1166666666666666e-06,
"loss": 0.1221,
"num_input_tokens_seen": 2202880,
"step": 134
},
{
"epoch": 1.7363344051446945,
"grad_norm": 4.774652004241943,
"learning_rate": 1.125e-06,
"loss": 0.1021,
"num_input_tokens_seen": 2219744,
"step": 135
},
{
"epoch": 1.7491961414790995,
"grad_norm": 3.889441967010498,
"learning_rate": 1.1333333333333334e-06,
"loss": 0.098,
"num_input_tokens_seen": 2236064,
"step": 136
},
{
"epoch": 1.762057877813505,
"grad_norm": 5.095253944396973,
"learning_rate": 1.1416666666666668e-06,
"loss": 0.1085,
"num_input_tokens_seen": 2252832,
"step": 137
},
{
"epoch": 1.77491961414791,
"grad_norm": 5.206950664520264,
"learning_rate": 1.1500000000000002e-06,
"loss": 0.1038,
"num_input_tokens_seen": 2268704,
"step": 138
},
{
"epoch": 1.787781350482315,
"grad_norm": 10.339069366455078,
"learning_rate": 1.1583333333333334e-06,
"loss": 0.1081,
"num_input_tokens_seen": 2285280,
"step": 139
},
{
"epoch": 1.8006430868167203,
"grad_norm": 8.227212905883789,
"learning_rate": 1.1666666666666668e-06,
"loss": 0.1287,
"num_input_tokens_seen": 2301600,
"step": 140
},
{
"epoch": 1.8135048231511255,
"grad_norm": 6.72472620010376,
"learning_rate": 1.175e-06,
"loss": 0.1068,
"num_input_tokens_seen": 2318048,
"step": 141
},
{
"epoch": 1.8263665594855305,
"grad_norm": 10.02261734008789,
"learning_rate": 1.1833333333333334e-06,
"loss": 0.1202,
"num_input_tokens_seen": 2333728,
"step": 142
},
{
"epoch": 1.8392282958199357,
"grad_norm": 10.770837783813477,
"learning_rate": 1.1916666666666668e-06,
"loss": 0.119,
"num_input_tokens_seen": 2349760,
"step": 143
},
{
"epoch": 1.852090032154341,
"grad_norm": 6.685535907745361,
"learning_rate": 1.2000000000000002e-06,
"loss": 0.1273,
"num_input_tokens_seen": 2366336,
"step": 144
},
{
"epoch": 1.864951768488746,
"grad_norm": 4.437434673309326,
"learning_rate": 1.2083333333333333e-06,
"loss": 0.1024,
"num_input_tokens_seen": 2382016,
"step": 145
},
{
"epoch": 1.877813504823151,
"grad_norm": 11.942359924316406,
"learning_rate": 1.2166666666666667e-06,
"loss": 0.1143,
"num_input_tokens_seen": 2398176,
"step": 146
},
{
"epoch": 1.8906752411575563,
"grad_norm": 6.428237438201904,
"learning_rate": 1.2250000000000001e-06,
"loss": 0.1229,
"num_input_tokens_seen": 2414656,
"step": 147
},
{
"epoch": 1.9035369774919615,
"grad_norm": 7.896732807159424,
"learning_rate": 1.2333333333333335e-06,
"loss": 0.0964,
"num_input_tokens_seen": 2430400,
"step": 148
},
{
"epoch": 1.9163987138263665,
"grad_norm": 12.14167594909668,
"learning_rate": 1.2416666666666667e-06,
"loss": 0.1132,
"num_input_tokens_seen": 2447520,
"step": 149
},
{
"epoch": 1.9292604501607717,
"grad_norm": 3.0627174377441406,
"learning_rate": 1.25e-06,
"loss": 0.0734,
"num_input_tokens_seen": 2463872,
"step": 150
},
{
"epoch": 1.942122186495177,
"grad_norm": 3.2333552837371826,
"learning_rate": 1.2583333333333333e-06,
"loss": 0.0939,
"num_input_tokens_seen": 2481344,
"step": 151
},
{
"epoch": 1.954983922829582,
"grad_norm": 14.160690307617188,
"learning_rate": 1.2666666666666669e-06,
"loss": 0.1143,
"num_input_tokens_seen": 2497632,
"step": 152
},
{
"epoch": 1.967845659163987,
"grad_norm": 5.38826322555542,
"learning_rate": 1.275e-06,
"loss": 0.1114,
"num_input_tokens_seen": 2513024,
"step": 153
},
{
"epoch": 1.9807073954983923,
"grad_norm": 3.4523892402648926,
"learning_rate": 1.2833333333333335e-06,
"loss": 0.0948,
"num_input_tokens_seen": 2528800,
"step": 154
},
{
"epoch": 1.9935691318327975,
"grad_norm": 6.9574127197265625,
"learning_rate": 1.2916666666666669e-06,
"loss": 0.0805,
"num_input_tokens_seen": 2545696,
"step": 155
},
{
"epoch": 2.0064308681672025,
"grad_norm": 7.220541954040527,
"learning_rate": 1.3e-06,
"loss": 0.1001,
"num_input_tokens_seen": 2561728,
"step": 156
},
{
"epoch": 2.0192926045016075,
"grad_norm": 6.95176887512207,
"learning_rate": 1.3083333333333334e-06,
"loss": 0.1002,
"num_input_tokens_seen": 2577856,
"step": 157
},
{
"epoch": 2.032154340836013,
"grad_norm": 10.752556800842285,
"learning_rate": 1.3166666666666666e-06,
"loss": 0.0847,
"num_input_tokens_seen": 2593984,
"step": 158
},
{
"epoch": 2.045016077170418,
"grad_norm": 6.510659217834473,
"learning_rate": 1.3250000000000002e-06,
"loss": 0.071,
"num_input_tokens_seen": 2610720,
"step": 159
},
{
"epoch": 2.057877813504823,
"grad_norm": 3.897312641143799,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.0791,
"num_input_tokens_seen": 2626400,
"step": 160
},
{
"epoch": 2.0707395498392285,
"grad_norm": 2.8594729900360107,
"learning_rate": 1.3416666666666666e-06,
"loss": 0.0769,
"num_input_tokens_seen": 2642496,
"step": 161
},
{
"epoch": 2.0836012861736335,
"grad_norm": 8.970246315002441,
"learning_rate": 1.3500000000000002e-06,
"loss": 0.0916,
"num_input_tokens_seen": 2658144,
"step": 162
},
{
"epoch": 2.0964630225080385,
"grad_norm": 2.965878963470459,
"learning_rate": 1.3583333333333334e-06,
"loss": 0.0584,
"num_input_tokens_seen": 2674560,
"step": 163
},
{
"epoch": 2.1093247588424435,
"grad_norm": 6.79748010635376,
"learning_rate": 1.3666666666666668e-06,
"loss": 0.0811,
"num_input_tokens_seen": 2690688,
"step": 164
},
{
"epoch": 2.122186495176849,
"grad_norm": 10.366084098815918,
"learning_rate": 1.3750000000000002e-06,
"loss": 0.0663,
"num_input_tokens_seen": 2706720,
"step": 165
},
{
"epoch": 2.135048231511254,
"grad_norm": 8.038703918457031,
"learning_rate": 1.3833333333333336e-06,
"loss": 0.0802,
"num_input_tokens_seen": 2723008,
"step": 166
},
{
"epoch": 2.147909967845659,
"grad_norm": 6.01867151260376,
"learning_rate": 1.3916666666666668e-06,
"loss": 0.0587,
"num_input_tokens_seen": 2738784,
"step": 167
},
{
"epoch": 2.1607717041800645,
"grad_norm": 5.053437232971191,
"learning_rate": 1.4000000000000001e-06,
"loss": 0.0953,
"num_input_tokens_seen": 2755072,
"step": 168
},
{
"epoch": 2.1736334405144695,
"grad_norm": 7.119932174682617,
"learning_rate": 1.4083333333333335e-06,
"loss": 0.0795,
"num_input_tokens_seen": 2771296,
"step": 169
},
{
"epoch": 2.1864951768488745,
"grad_norm": 4.602327346801758,
"learning_rate": 1.4166666666666667e-06,
"loss": 0.1015,
"num_input_tokens_seen": 2787712,
"step": 170
},
{
"epoch": 2.19935691318328,
"grad_norm": 2.7282161712646484,
"learning_rate": 1.425e-06,
"loss": 0.0614,
"num_input_tokens_seen": 2803552,
"step": 171
},
{
"epoch": 2.212218649517685,
"grad_norm": 4.694391250610352,
"learning_rate": 1.4333333333333335e-06,
"loss": 0.0753,
"num_input_tokens_seen": 2820352,
"step": 172
},
{
"epoch": 2.22508038585209,
"grad_norm": 3.231022357940674,
"learning_rate": 1.4416666666666667e-06,
"loss": 0.08,
"num_input_tokens_seen": 2837696,
"step": 173
},
{
"epoch": 2.237942122186495,
"grad_norm": 4.522435188293457,
"learning_rate": 1.45e-06,
"loss": 0.0603,
"num_input_tokens_seen": 2854272,
"step": 174
},
{
"epoch": 2.2508038585209005,
"grad_norm": 8.292889595031738,
"learning_rate": 1.4583333333333335e-06,
"loss": 0.0874,
"num_input_tokens_seen": 2870464,
"step": 175
},
{
"epoch": 2.2636655948553055,
"grad_norm": 3.635068655014038,
"learning_rate": 1.4666666666666669e-06,
"loss": 0.0833,
"num_input_tokens_seen": 2886880,
"step": 176
},
{
"epoch": 2.2765273311897105,
"grad_norm": 3.8000781536102295,
"learning_rate": 1.475e-06,
"loss": 0.0676,
"num_input_tokens_seen": 2902880,
"step": 177
},
{
"epoch": 2.289389067524116,
"grad_norm": 9.920318603515625,
"learning_rate": 1.4833333333333337e-06,
"loss": 0.086,
"num_input_tokens_seen": 2919584,
"step": 178
},
{
"epoch": 2.302250803858521,
"grad_norm": 3.8999791145324707,
"learning_rate": 1.4916666666666669e-06,
"loss": 0.0662,
"num_input_tokens_seen": 2935136,
"step": 179
},
{
"epoch": 2.315112540192926,
"grad_norm": 6.011571407318115,
"learning_rate": 1.5e-06,
"loss": 0.1028,
"num_input_tokens_seen": 2951680,
"step": 180
},
{
"epoch": 2.327974276527331,
"grad_norm": 5.94368839263916,
"learning_rate": 1.5083333333333336e-06,
"loss": 0.0882,
"num_input_tokens_seen": 2967712,
"step": 181
},
{
"epoch": 2.3408360128617365,
"grad_norm": 3.9432876110076904,
"learning_rate": 1.5166666666666668e-06,
"loss": 0.0806,
"num_input_tokens_seen": 2984224,
"step": 182
},
{
"epoch": 2.3536977491961415,
"grad_norm": 18.06218147277832,
"learning_rate": 1.525e-06,
"loss": 0.1108,
"num_input_tokens_seen": 2999904,
"step": 183
},
{
"epoch": 2.3665594855305465,
"grad_norm": 5.799235820770264,
"learning_rate": 1.5333333333333334e-06,
"loss": 0.0687,
"num_input_tokens_seen": 3017376,
"step": 184
},
{
"epoch": 2.379421221864952,
"grad_norm": 4.252455711364746,
"learning_rate": 1.5416666666666668e-06,
"loss": 0.0736,
"num_input_tokens_seen": 3033728,
"step": 185
},
{
"epoch": 2.392282958199357,
"grad_norm": 6.050833702087402,
"learning_rate": 1.5500000000000002e-06,
"loss": 0.0779,
"num_input_tokens_seen": 3050176,
"step": 186
},
{
"epoch": 2.405144694533762,
"grad_norm": 8.076692581176758,
"learning_rate": 1.5583333333333334e-06,
"loss": 0.0709,
"num_input_tokens_seen": 3066400,
"step": 187
},
{
"epoch": 2.418006430868167,
"grad_norm": 7.372990608215332,
"learning_rate": 1.566666666666667e-06,
"loss": 0.0652,
"num_input_tokens_seen": 3083104,
"step": 188
},
{
"epoch": 2.4308681672025725,
"grad_norm": 14.172348022460938,
"learning_rate": 1.5750000000000002e-06,
"loss": 0.1095,
"num_input_tokens_seen": 3100000,
"step": 189
},
{
"epoch": 2.4437299035369775,
"grad_norm": 4.726587772369385,
"learning_rate": 1.5833333333333333e-06,
"loss": 0.0618,
"num_input_tokens_seen": 3116800,
"step": 190
},
{
"epoch": 2.4565916398713825,
"grad_norm": 10.408024787902832,
"learning_rate": 1.591666666666667e-06,
"loss": 0.0666,
"num_input_tokens_seen": 3133568,
"step": 191
},
{
"epoch": 2.469453376205788,
"grad_norm": 7.295907497406006,
"learning_rate": 1.6000000000000001e-06,
"loss": 0.0573,
"num_input_tokens_seen": 3150048,
"step": 192
},
{
"epoch": 2.482315112540193,
"grad_norm": 6.490230083465576,
"learning_rate": 1.6083333333333333e-06,
"loss": 0.0577,
"num_input_tokens_seen": 3166688,
"step": 193
},
{
"epoch": 2.495176848874598,
"grad_norm": 6.940796375274658,
"learning_rate": 1.6166666666666667e-06,
"loss": 0.0813,
"num_input_tokens_seen": 3182752,
"step": 194
},
{
"epoch": 2.508038585209003,
"grad_norm": 8.307454109191895,
"learning_rate": 1.6250000000000001e-06,
"loss": 0.066,
"num_input_tokens_seen": 3198944,
"step": 195
},
{
"epoch": 2.5209003215434085,
"grad_norm": 4.472862243652344,
"learning_rate": 1.6333333333333335e-06,
"loss": 0.0622,
"num_input_tokens_seen": 3215552,
"step": 196
},
{
"epoch": 2.5337620578778135,
"grad_norm": 3.5001590251922607,
"learning_rate": 1.6416666666666667e-06,
"loss": 0.0616,
"num_input_tokens_seen": 3231488,
"step": 197
},
{
"epoch": 2.5466237942122185,
"grad_norm": 13.425641059875488,
"learning_rate": 1.6500000000000003e-06,
"loss": 0.0993,
"num_input_tokens_seen": 3247840,
"step": 198
},
{
"epoch": 2.559485530546624,
"grad_norm": 12.016642570495605,
"learning_rate": 1.6583333333333335e-06,
"loss": 0.0702,
"num_input_tokens_seen": 3264128,
"step": 199
},
{
"epoch": 2.572347266881029,
"grad_norm": 10.653433799743652,
"learning_rate": 1.6666666666666667e-06,
"loss": 0.0743,
"num_input_tokens_seen": 3280192,
"step": 200
},
{
"epoch": 2.585209003215434,
"grad_norm": 3.59945011138916,
"learning_rate": 1.6750000000000003e-06,
"loss": 0.0647,
"num_input_tokens_seen": 3296480,
"step": 201
},
{
"epoch": 2.598070739549839,
"grad_norm": 11.130800247192383,
"learning_rate": 1.6833333333333335e-06,
"loss": 0.0814,
"num_input_tokens_seen": 3313120,
"step": 202
},
{
"epoch": 2.6109324758842445,
"grad_norm": 11.733818054199219,
"learning_rate": 1.6916666666666666e-06,
"loss": 0.0861,
"num_input_tokens_seen": 3329184,
"step": 203
},
{
"epoch": 2.6237942122186495,
"grad_norm": 6.696333885192871,
"learning_rate": 1.7000000000000002e-06,
"loss": 0.0769,
"num_input_tokens_seen": 3346208,
"step": 204
},
{
"epoch": 2.6366559485530545,
"grad_norm": 8.523835182189941,
"learning_rate": 1.7083333333333334e-06,
"loss": 0.0888,
"num_input_tokens_seen": 3363008,
"step": 205
},
{
"epoch": 2.64951768488746,
"grad_norm": 12.503429412841797,
"learning_rate": 1.7166666666666668e-06,
"loss": 0.1017,
"num_input_tokens_seen": 3379488,
"step": 206
},
{
"epoch": 2.662379421221865,
"grad_norm": 7.644631385803223,
"learning_rate": 1.725e-06,
"loss": 0.0677,
"num_input_tokens_seen": 3395648,
"step": 207
},
{
"epoch": 2.67524115755627,
"grad_norm": 7.251969814300537,
"learning_rate": 1.7333333333333336e-06,
"loss": 0.0861,
"num_input_tokens_seen": 3412736,
"step": 208
},
{
"epoch": 2.688102893890675,
"grad_norm": 3.763118267059326,
"learning_rate": 1.7416666666666668e-06,
"loss": 0.0562,
"num_input_tokens_seen": 3429472,
"step": 209
},
{
"epoch": 2.7009646302250805,
"grad_norm": 8.583102226257324,
"learning_rate": 1.75e-06,
"loss": 0.0641,
"num_input_tokens_seen": 3445632,
"step": 210
},
{
"epoch": 2.7138263665594855,
"grad_norm": 9.621906280517578,
"learning_rate": 1.7583333333333336e-06,
"loss": 0.0829,
"num_input_tokens_seen": 3462720,
"step": 211
},
{
"epoch": 2.7266881028938905,
"grad_norm": 6.067561626434326,
"learning_rate": 1.7666666666666668e-06,
"loss": 0.0632,
"num_input_tokens_seen": 3478720,
"step": 212
},
{
"epoch": 2.739549839228296,
"grad_norm": 4.912470817565918,
"learning_rate": 1.7750000000000002e-06,
"loss": 0.062,
"num_input_tokens_seen": 3494304,
"step": 213
},
{
"epoch": 2.752411575562701,
"grad_norm": 8.044519424438477,
"learning_rate": 1.7833333333333336e-06,
"loss": 0.0816,
"num_input_tokens_seen": 3510176,
"step": 214
},
{
"epoch": 2.765273311897106,
"grad_norm": 8.589887619018555,
"learning_rate": 1.7916666666666667e-06,
"loss": 0.096,
"num_input_tokens_seen": 3526848,
"step": 215
},
{
"epoch": 2.778135048231511,
"grad_norm": 3.3357672691345215,
"learning_rate": 1.8000000000000001e-06,
"loss": 0.0639,
"num_input_tokens_seen": 3543136,
"step": 216
},
{
"epoch": 2.7909967845659165,
"grad_norm": 5.992812156677246,
"learning_rate": 1.8083333333333335e-06,
"loss": 0.0915,
"num_input_tokens_seen": 3558240,
"step": 217
},
{
"epoch": 2.8038585209003215,
"grad_norm": 4.8690924644470215,
"learning_rate": 1.816666666666667e-06,
"loss": 0.061,
"num_input_tokens_seen": 3574240,
"step": 218
},
{
"epoch": 2.816720257234727,
"grad_norm": 2.928898811340332,
"learning_rate": 1.825e-06,
"loss": 0.0572,
"num_input_tokens_seen": 3590240,
"step": 219
},
{
"epoch": 2.829581993569132,
"grad_norm": 2.8405849933624268,
"learning_rate": 1.8333333333333333e-06,
"loss": 0.0497,
"num_input_tokens_seen": 3606912,
"step": 220
},
{
"epoch": 2.842443729903537,
"grad_norm": 5.940427303314209,
"learning_rate": 1.8416666666666669e-06,
"loss": 0.0672,
"num_input_tokens_seen": 3623008,
"step": 221
},
{
"epoch": 2.855305466237942,
"grad_norm": 4.370597839355469,
"learning_rate": 1.85e-06,
"loss": 0.0563,
"num_input_tokens_seen": 3639456,
"step": 222
},
{
"epoch": 2.868167202572347,
"grad_norm": 3.9959638118743896,
"learning_rate": 1.8583333333333335e-06,
"loss": 0.069,
"num_input_tokens_seen": 3656288,
"step": 223
},
{
"epoch": 2.8810289389067525,
"grad_norm": 4.756357192993164,
"learning_rate": 1.8666666666666669e-06,
"loss": 0.0824,
"num_input_tokens_seen": 3672896,
"step": 224
},
{
"epoch": 2.8938906752411575,
"grad_norm": 3.312553644180298,
"learning_rate": 1.8750000000000003e-06,
"loss": 0.057,
"num_input_tokens_seen": 3689632,
"step": 225
},
{
"epoch": 2.906752411575563,
"grad_norm": 3.71880841255188,
"learning_rate": 1.8833333333333334e-06,
"loss": 0.0549,
"num_input_tokens_seen": 3706944,
"step": 226
},
{
"epoch": 2.919614147909968,
"grad_norm": 4.111685276031494,
"learning_rate": 1.8916666666666668e-06,
"loss": 0.0652,
"num_input_tokens_seen": 3723072,
"step": 227
},
{
"epoch": 2.932475884244373,
"grad_norm": 4.362407684326172,
"learning_rate": 1.9000000000000002e-06,
"loss": 0.0743,
"num_input_tokens_seen": 3740064,
"step": 228
},
{
"epoch": 2.945337620578778,
"grad_norm": 2.441969156265259,
"learning_rate": 1.9083333333333334e-06,
"loss": 0.0416,
"num_input_tokens_seen": 3756864,
"step": 229
},
{
"epoch": 2.958199356913183,
"grad_norm": 3.4988579750061035,
"learning_rate": 1.916666666666667e-06,
"loss": 0.0668,
"num_input_tokens_seen": 3774336,
"step": 230
},
{
"epoch": 2.9710610932475885,
"grad_norm": 3.7807693481445312,
"learning_rate": 1.925e-06,
"loss": 0.0603,
"num_input_tokens_seen": 3790336,
"step": 231
},
{
"epoch": 2.9839228295819935,
"grad_norm": 3.471942901611328,
"learning_rate": 1.9333333333333336e-06,
"loss": 0.066,
"num_input_tokens_seen": 3807520,
"step": 232
},
{
"epoch": 2.996784565916399,
"grad_norm": 3.4720284938812256,
"learning_rate": 1.9416666666666666e-06,
"loss": 0.0692,
"num_input_tokens_seen": 3823872,
"step": 233
},
{
"epoch": 3.009646302250804,
"grad_norm": 1.9317958354949951,
"learning_rate": 1.9500000000000004e-06,
"loss": 0.0323,
"num_input_tokens_seen": 3840320,
"step": 234
},
{
"epoch": 3.022508038585209,
"grad_norm": 4.849405288696289,
"learning_rate": 1.9583333333333334e-06,
"loss": 0.0403,
"num_input_tokens_seen": 3856256,
"step": 235
},
{
"epoch": 3.035369774919614,
"grad_norm": 2.371988296508789,
"learning_rate": 1.9666666666666668e-06,
"loss": 0.0477,
"num_input_tokens_seen": 3872512,
"step": 236
},
{
"epoch": 3.0482315112540195,
"grad_norm": 3.362964153289795,
"learning_rate": 1.975e-06,
"loss": 0.0356,
"num_input_tokens_seen": 3888256,
"step": 237
},
{
"epoch": 3.0610932475884245,
"grad_norm": 2.618176221847534,
"learning_rate": 1.9833333333333335e-06,
"loss": 0.0324,
"num_input_tokens_seen": 3905024,
"step": 238
},
{
"epoch": 3.0739549839228295,
"grad_norm": 2.5595703125,
"learning_rate": 1.991666666666667e-06,
"loss": 0.0428,
"num_input_tokens_seen": 3922016,
"step": 239
},
{
"epoch": 3.0868167202572345,
"grad_norm": 2.788818597793579,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.0315,
"num_input_tokens_seen": 3938400,
"step": 240
},
{
"epoch": 3.09967845659164,
"grad_norm": 3.9531607627868652,
"learning_rate": 2.0083333333333337e-06,
"loss": 0.0366,
"num_input_tokens_seen": 3954912,
"step": 241
},
{
"epoch": 3.112540192926045,
"grad_norm": 3.2901012897491455,
"learning_rate": 2.0166666666666667e-06,
"loss": 0.0307,
"num_input_tokens_seen": 3970400,
"step": 242
},
{
"epoch": 3.12540192926045,
"grad_norm": 4.283529281616211,
"learning_rate": 2.025e-06,
"loss": 0.0338,
"num_input_tokens_seen": 3986944,
"step": 243
},
{
"epoch": 3.1382636655948555,
"grad_norm": 4.467242240905762,
"learning_rate": 2.0333333333333335e-06,
"loss": 0.0348,
"num_input_tokens_seen": 4003648,
"step": 244
},
{
"epoch": 3.1511254019292605,
"grad_norm": 3.3191885948181152,
"learning_rate": 2.041666666666667e-06,
"loss": 0.0355,
"num_input_tokens_seen": 4020384,
"step": 245
},
{
"epoch": 3.1639871382636655,
"grad_norm": 4.061459064483643,
"learning_rate": 2.05e-06,
"loss": 0.0272,
"num_input_tokens_seen": 4037088,
"step": 246
},
{
"epoch": 3.176848874598071,
"grad_norm": 3.0776522159576416,
"learning_rate": 2.0583333333333337e-06,
"loss": 0.0464,
"num_input_tokens_seen": 4053472,
"step": 247
},
{
"epoch": 3.189710610932476,
"grad_norm": 2.13104510307312,
"learning_rate": 2.0666666666666666e-06,
"loss": 0.0268,
"num_input_tokens_seen": 4069248,
"step": 248
},
{
"epoch": 3.202572347266881,
"grad_norm": 2.1540310382843018,
"learning_rate": 2.075e-06,
"loss": 0.0229,
"num_input_tokens_seen": 4085024,
"step": 249
},
{
"epoch": 3.215434083601286,
"grad_norm": 3.068967342376709,
"learning_rate": 2.0833333333333334e-06,
"loss": 0.0373,
"num_input_tokens_seen": 4102432,
"step": 250
},
{
"epoch": 3.2282958199356915,
"grad_norm": 2.7403666973114014,
"learning_rate": 2.091666666666667e-06,
"loss": 0.0361,
"num_input_tokens_seen": 4118368,
"step": 251
},
{
"epoch": 3.2411575562700965,
"grad_norm": 4.994419574737549,
"learning_rate": 2.1000000000000002e-06,
"loss": 0.0401,
"num_input_tokens_seen": 4134784,
"step": 252
},
{
"epoch": 3.2540192926045015,
"grad_norm": 2.287346839904785,
"learning_rate": 2.1083333333333336e-06,
"loss": 0.0262,
"num_input_tokens_seen": 4150528,
"step": 253
},
{
"epoch": 3.266881028938907,
"grad_norm": 3.649418354034424,
"learning_rate": 2.116666666666667e-06,
"loss": 0.0481,
"num_input_tokens_seen": 4166816,
"step": 254
},
{
"epoch": 3.279742765273312,
"grad_norm": 2.9041836261749268,
"learning_rate": 2.125e-06,
"loss": 0.0417,
"num_input_tokens_seen": 4182880,
"step": 255
},
{
"epoch": 3.292604501607717,
"grad_norm": 3.602240800857544,
"learning_rate": 2.133333333333334e-06,
"loss": 0.0457,
"num_input_tokens_seen": 4200128,
"step": 256
},
{
"epoch": 3.305466237942122,
"grad_norm": 1.6382755041122437,
"learning_rate": 2.1416666666666668e-06,
"loss": 0.0175,
"num_input_tokens_seen": 4215680,
"step": 257
},
{
"epoch": 3.3183279742765275,
"grad_norm": 2.4267561435699463,
"learning_rate": 2.15e-06,
"loss": 0.0371,
"num_input_tokens_seen": 4232736,
"step": 258
},
{
"epoch": 3.3311897106109325,
"grad_norm": 2.553440570831299,
"learning_rate": 2.1583333333333336e-06,
"loss": 0.0268,
"num_input_tokens_seen": 4249664,
"step": 259
},
{
"epoch": 3.3440514469453375,
"grad_norm": 2.951179027557373,
"learning_rate": 2.166666666666667e-06,
"loss": 0.0417,
"num_input_tokens_seen": 4266016,
"step": 260
},
{
"epoch": 3.356913183279743,
"grad_norm": 2.710747003555298,
"learning_rate": 2.1750000000000004e-06,
"loss": 0.0264,
"num_input_tokens_seen": 4281664,
"step": 261
},
{
"epoch": 3.369774919614148,
"grad_norm": 3.465902090072632,
"learning_rate": 2.1833333333333333e-06,
"loss": 0.0336,
"num_input_tokens_seen": 4298368,
"step": 262
},
{
"epoch": 3.382636655948553,
"grad_norm": 4.009627819061279,
"learning_rate": 2.191666666666667e-06,
"loss": 0.0434,
"num_input_tokens_seen": 4314336,
"step": 263
},
{
"epoch": 3.395498392282958,
"grad_norm": 4.071226596832275,
"learning_rate": 2.2e-06,
"loss": 0.0389,
"num_input_tokens_seen": 4330144,
"step": 264
},
{
"epoch": 3.4083601286173635,
"grad_norm": 4.341132164001465,
"learning_rate": 2.2083333333333335e-06,
"loss": 0.045,
"num_input_tokens_seen": 4345856,
"step": 265
},
{
"epoch": 3.4212218649517685,
"grad_norm": 4.663236141204834,
"learning_rate": 2.216666666666667e-06,
"loss": 0.0331,
"num_input_tokens_seen": 4361696,
"step": 266
},
{
"epoch": 3.4340836012861735,
"grad_norm": 2.17475962638855,
"learning_rate": 2.2250000000000003e-06,
"loss": 0.0228,
"num_input_tokens_seen": 4378304,
"step": 267
},
{
"epoch": 3.446945337620579,
"grad_norm": 5.099360942840576,
"learning_rate": 2.2333333333333333e-06,
"loss": 0.0307,
"num_input_tokens_seen": 4395232,
"step": 268
},
{
"epoch": 3.459807073954984,
"grad_norm": 4.4752020835876465,
"learning_rate": 2.2416666666666667e-06,
"loss": 0.0332,
"num_input_tokens_seen": 4411232,
"step": 269
},
{
"epoch": 3.472668810289389,
"grad_norm": 4.047648906707764,
"learning_rate": 2.25e-06,
"loss": 0.0662,
"num_input_tokens_seen": 4427520,
"step": 270
},
{
"epoch": 3.485530546623794,
"grad_norm": 5.564074516296387,
"learning_rate": 2.2583333333333335e-06,
"loss": 0.0431,
"num_input_tokens_seen": 4443904,
"step": 271
},
{
"epoch": 3.4983922829581995,
"grad_norm": 3.9845523834228516,
"learning_rate": 2.266666666666667e-06,
"loss": 0.0423,
"num_input_tokens_seen": 4460608,
"step": 272
},
{
"epoch": 3.5112540192926045,
"grad_norm": 3.6314871311187744,
"learning_rate": 2.2750000000000002e-06,
"loss": 0.0447,
"num_input_tokens_seen": 4477120,
"step": 273
},
{
"epoch": 3.5241157556270095,
"grad_norm": 3.1981313228607178,
"learning_rate": 2.2833333333333336e-06,
"loss": 0.0337,
"num_input_tokens_seen": 4493152,
"step": 274
},
{
"epoch": 3.536977491961415,
"grad_norm": 2.1383423805236816,
"learning_rate": 2.2916666666666666e-06,
"loss": 0.0319,
"num_input_tokens_seen": 4509120,
"step": 275
},
{
"epoch": 3.54983922829582,
"grad_norm": 2.778956890106201,
"learning_rate": 2.3000000000000004e-06,
"loss": 0.027,
"num_input_tokens_seen": 4525984,
"step": 276
},
{
"epoch": 3.562700964630225,
"grad_norm": 2.518002510070801,
"learning_rate": 2.3083333333333334e-06,
"loss": 0.0244,
"num_input_tokens_seen": 4542816,
"step": 277
},
{
"epoch": 3.57556270096463,
"grad_norm": 3.8519890308380127,
"learning_rate": 2.316666666666667e-06,
"loss": 0.0377,
"num_input_tokens_seen": 4559872,
"step": 278
},
{
"epoch": 3.5884244372990355,
"grad_norm": 5.541743278503418,
"learning_rate": 2.325e-06,
"loss": 0.0477,
"num_input_tokens_seen": 4576288,
"step": 279
},
{
"epoch": 3.6012861736334405,
"grad_norm": 4.099382400512695,
"learning_rate": 2.3333333333333336e-06,
"loss": 0.0332,
"num_input_tokens_seen": 4591456,
"step": 280
},
{
"epoch": 3.6141479099678455,
"grad_norm": 2.509037733078003,
"learning_rate": 2.341666666666667e-06,
"loss": 0.0285,
"num_input_tokens_seen": 4608608,
"step": 281
},
{
"epoch": 3.627009646302251,
"grad_norm": 6.764764308929443,
"learning_rate": 2.35e-06,
"loss": 0.0468,
"num_input_tokens_seen": 4625120,
"step": 282
},
{
"epoch": 3.639871382636656,
"grad_norm": 6.219926834106445,
"learning_rate": 2.3583333333333338e-06,
"loss": 0.037,
"num_input_tokens_seen": 4641952,
"step": 283
},
{
"epoch": 3.652733118971061,
"grad_norm": 3.869555950164795,
"learning_rate": 2.3666666666666667e-06,
"loss": 0.041,
"num_input_tokens_seen": 4657920,
"step": 284
},
{
"epoch": 3.665594855305466,
"grad_norm": 4.1329264640808105,
"learning_rate": 2.375e-06,
"loss": 0.0461,
"num_input_tokens_seen": 4674688,
"step": 285
},
{
"epoch": 3.6784565916398715,
"grad_norm": 6.7732648849487305,
"learning_rate": 2.3833333333333335e-06,
"loss": 0.0594,
"num_input_tokens_seen": 4690592,
"step": 286
},
{
"epoch": 3.6913183279742765,
"grad_norm": 8.550586700439453,
"learning_rate": 2.391666666666667e-06,
"loss": 0.0728,
"num_input_tokens_seen": 4707360,
"step": 287
},
{
"epoch": 3.7041800643086815,
"grad_norm": 4.587222099304199,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.029,
"num_input_tokens_seen": 4724192,
"step": 288
},
{
"epoch": 3.717041800643087,
"grad_norm": 3.3474302291870117,
"learning_rate": 2.4083333333333337e-06,
"loss": 0.0412,
"num_input_tokens_seen": 4741344,
"step": 289
},
{
"epoch": 3.729903536977492,
"grad_norm": 4.359026908874512,
"learning_rate": 2.4166666666666667e-06,
"loss": 0.0262,
"num_input_tokens_seen": 4756768,
"step": 290
},
{
"epoch": 3.742765273311897,
"grad_norm": 8.029643058776855,
"learning_rate": 2.425e-06,
"loss": 0.0796,
"num_input_tokens_seen": 4772608,
"step": 291
},
{
"epoch": 3.755627009646302,
"grad_norm": 3.9188246726989746,
"learning_rate": 2.4333333333333335e-06,
"loss": 0.0447,
"num_input_tokens_seen": 4788928,
"step": 292
},
{
"epoch": 3.7684887459807075,
"grad_norm": 2.2623226642608643,
"learning_rate": 2.441666666666667e-06,
"loss": 0.0252,
"num_input_tokens_seen": 4805408,
"step": 293
},
{
"epoch": 3.7813504823151125,
"grad_norm": 5.41758394241333,
"learning_rate": 2.4500000000000003e-06,
"loss": 0.0458,
"num_input_tokens_seen": 4821248,
"step": 294
},
{
"epoch": 3.7942122186495175,
"grad_norm": 3.402024984359741,
"learning_rate": 2.4583333333333332e-06,
"loss": 0.0431,
"num_input_tokens_seen": 4837024,
"step": 295
},
{
"epoch": 3.807073954983923,
"grad_norm": 2.2912893295288086,
"learning_rate": 2.466666666666667e-06,
"loss": 0.0422,
"num_input_tokens_seen": 4853760,
"step": 296
},
{
"epoch": 3.819935691318328,
"grad_norm": 2.5446107387542725,
"learning_rate": 2.475e-06,
"loss": 0.0428,
"num_input_tokens_seen": 4870560,
"step": 297
},
{
"epoch": 3.832797427652733,
"grad_norm": 4.001276969909668,
"learning_rate": 2.4833333333333334e-06,
"loss": 0.0473,
"num_input_tokens_seen": 4887040,
"step": 298
},
{
"epoch": 3.845659163987138,
"grad_norm": 2.1214165687561035,
"learning_rate": 2.491666666666667e-06,
"loss": 0.0223,
"num_input_tokens_seen": 4903232,
"step": 299
},
{
"epoch": 3.8585209003215435,
"grad_norm": 2.0410876274108887,
"learning_rate": 2.5e-06,
"loss": 0.0274,
"num_input_tokens_seen": 4919392,
"step": 300
},
{
"epoch": 3.8713826366559485,
"grad_norm": 4.287325382232666,
"learning_rate": 2.5083333333333336e-06,
"loss": 0.0454,
"num_input_tokens_seen": 4935648,
"step": 301
},
{
"epoch": 3.884244372990354,
"grad_norm": 4.437230110168457,
"learning_rate": 2.5166666666666666e-06,
"loss": 0.022,
"num_input_tokens_seen": 4952768,
"step": 302
},
{
"epoch": 3.897106109324759,
"grad_norm": 3.8610730171203613,
"learning_rate": 2.5250000000000004e-06,
"loss": 0.0357,
"num_input_tokens_seen": 4969408,
"step": 303
},
{
"epoch": 3.909967845659164,
"grad_norm": 2.636066436767578,
"learning_rate": 2.5333333333333338e-06,
"loss": 0.0458,
"num_input_tokens_seen": 4986592,
"step": 304
},
{
"epoch": 3.922829581993569,
"grad_norm": 4.873857021331787,
"learning_rate": 2.5416666666666668e-06,
"loss": 0.0454,
"num_input_tokens_seen": 5002720,
"step": 305
},
{
"epoch": 3.935691318327974,
"grad_norm": 2.460498571395874,
"learning_rate": 2.55e-06,
"loss": 0.0262,
"num_input_tokens_seen": 5018848,
"step": 306
},
{
"epoch": 3.9485530546623795,
"grad_norm": 1.9787613153457642,
"learning_rate": 2.558333333333334e-06,
"loss": 0.0234,
"num_input_tokens_seen": 5034528,
"step": 307
},
{
"epoch": 3.9614147909967845,
"grad_norm": 2.9524741172790527,
"learning_rate": 2.566666666666667e-06,
"loss": 0.0366,
"num_input_tokens_seen": 5051328,
"step": 308
},
{
"epoch": 3.97427652733119,
"grad_norm": 2.858250379562378,
"learning_rate": 2.5750000000000003e-06,
"loss": 0.0236,
"num_input_tokens_seen": 5067712,
"step": 309
},
{
"epoch": 3.987138263665595,
"grad_norm": 2.985665798187256,
"learning_rate": 2.5833333333333337e-06,
"loss": 0.0403,
"num_input_tokens_seen": 5085088,
"step": 310
},
{
"epoch": 4.0,
"grad_norm": 2.72078013420105,
"learning_rate": 2.5916666666666667e-06,
"loss": 0.0395,
"num_input_tokens_seen": 5101536,
"step": 311
},
{
"epoch": 4.012861736334405,
"grad_norm": 1.2907602787017822,
"learning_rate": 2.6e-06,
"loss": 0.0163,
"num_input_tokens_seen": 5117696,
"step": 312
},
{
"epoch": 4.02572347266881,
"grad_norm": 1.910504937171936,
"learning_rate": 2.608333333333333e-06,
"loss": 0.0181,
"num_input_tokens_seen": 5133408,
"step": 313
},
{
"epoch": 4.038585209003215,
"grad_norm": 1.1473162174224854,
"learning_rate": 2.616666666666667e-06,
"loss": 0.013,
"num_input_tokens_seen": 5149664,
"step": 314
},
{
"epoch": 4.051446945337621,
"grad_norm": 2.9885878562927246,
"learning_rate": 2.6250000000000003e-06,
"loss": 0.0178,
"num_input_tokens_seen": 5165344,
"step": 315
},
{
"epoch": 4.064308681672026,
"grad_norm": 2.308147430419922,
"learning_rate": 2.6333333333333332e-06,
"loss": 0.0153,
"num_input_tokens_seen": 5183264,
"step": 316
},
{
"epoch": 4.077170418006431,
"grad_norm": 3.1830930709838867,
"learning_rate": 2.6416666666666666e-06,
"loss": 0.0205,
"num_input_tokens_seen": 5199296,
"step": 317
},
{
"epoch": 4.090032154340836,
"grad_norm": 0.674608051776886,
"learning_rate": 2.6500000000000005e-06,
"loss": 0.0021,
"num_input_tokens_seen": 5216032,
"step": 318
},
{
"epoch": 4.102893890675241,
"grad_norm": 3.075059175491333,
"learning_rate": 2.6583333333333334e-06,
"loss": 0.022,
"num_input_tokens_seen": 5232544,
"step": 319
},
{
"epoch": 4.115755627009646,
"grad_norm": 1.1589558124542236,
"learning_rate": 2.666666666666667e-06,
"loss": 0.0134,
"num_input_tokens_seen": 5248704,
"step": 320
},
{
"epoch": 4.128617363344051,
"grad_norm": 4.190515518188477,
"learning_rate": 2.6750000000000002e-06,
"loss": 0.0175,
"num_input_tokens_seen": 5264384,
"step": 321
},
{
"epoch": 4.141479099678457,
"grad_norm": 3.435070276260376,
"learning_rate": 2.683333333333333e-06,
"loss": 0.023,
"num_input_tokens_seen": 5280928,
"step": 322
},
{
"epoch": 4.154340836012862,
"grad_norm": 3.3521199226379395,
"learning_rate": 2.691666666666667e-06,
"loss": 0.0303,
"num_input_tokens_seen": 5296512,
"step": 323
},
{
"epoch": 4.167202572347267,
"grad_norm": 2.0815391540527344,
"learning_rate": 2.7000000000000004e-06,
"loss": 0.0187,
"num_input_tokens_seen": 5313440,
"step": 324
},
{
"epoch": 4.180064308681672,
"grad_norm": 1.968308687210083,
"learning_rate": 2.7083333333333334e-06,
"loss": 0.0126,
"num_input_tokens_seen": 5329088,
"step": 325
},
{
"epoch": 4.192926045016077,
"grad_norm": 2.134216070175171,
"learning_rate": 2.7166666666666668e-06,
"loss": 0.0203,
"num_input_tokens_seen": 5345376,
"step": 326
},
{
"epoch": 4.205787781350482,
"grad_norm": 1.2245476245880127,
"learning_rate": 2.7250000000000006e-06,
"loss": 0.0078,
"num_input_tokens_seen": 5361568,
"step": 327
},
{
"epoch": 4.218649517684887,
"grad_norm": 1.908057451248169,
"learning_rate": 2.7333333333333336e-06,
"loss": 0.0165,
"num_input_tokens_seen": 5377536,
"step": 328
},
{
"epoch": 4.231511254019293,
"grad_norm": 1.6413862705230713,
"learning_rate": 2.741666666666667e-06,
"loss": 0.0113,
"num_input_tokens_seen": 5395040,
"step": 329
},
{
"epoch": 4.244372990353698,
"grad_norm": 1.442226529121399,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.0058,
"num_input_tokens_seen": 5411488,
"step": 330
},
{
"epoch": 4.257234726688103,
"grad_norm": 0.9877079129219055,
"learning_rate": 2.7583333333333333e-06,
"loss": 0.007,
"num_input_tokens_seen": 5427328,
"step": 331
},
{
"epoch": 4.270096463022508,
"grad_norm": 2.9424357414245605,
"learning_rate": 2.766666666666667e-06,
"loss": 0.027,
"num_input_tokens_seen": 5444352,
"step": 332
},
{
"epoch": 4.282958199356913,
"grad_norm": 3.5407063961029053,
"learning_rate": 2.7750000000000005e-06,
"loss": 0.0276,
"num_input_tokens_seen": 5459840,
"step": 333
},
{
"epoch": 4.295819935691318,
"grad_norm": 4.147865295410156,
"learning_rate": 2.7833333333333335e-06,
"loss": 0.0367,
"num_input_tokens_seen": 5476000,
"step": 334
},
{
"epoch": 4.308681672025724,
"grad_norm": 2.4164767265319824,
"learning_rate": 2.791666666666667e-06,
"loss": 0.0161,
"num_input_tokens_seen": 5491968,
"step": 335
},
{
"epoch": 4.321543408360129,
"grad_norm": 2.553860902786255,
"learning_rate": 2.8000000000000003e-06,
"loss": 0.018,
"num_input_tokens_seen": 5508864,
"step": 336
},
{
"epoch": 4.334405144694534,
"grad_norm": 1.050309419631958,
"learning_rate": 2.8083333333333333e-06,
"loss": 0.0044,
"num_input_tokens_seen": 5525536,
"step": 337
},
{
"epoch": 4.347266881028939,
"grad_norm": 2.375540018081665,
"learning_rate": 2.816666666666667e-06,
"loss": 0.0109,
"num_input_tokens_seen": 5542368,
"step": 338
},
{
"epoch": 4.360128617363344,
"grad_norm": 3.041912794113159,
"learning_rate": 2.825e-06,
"loss": 0.0173,
"num_input_tokens_seen": 5558400,
"step": 339
},
{
"epoch": 4.372990353697749,
"grad_norm": 2.464390754699707,
"learning_rate": 2.8333333333333335e-06,
"loss": 0.0107,
"num_input_tokens_seen": 5575008,
"step": 340
},
{
"epoch": 4.385852090032154,
"grad_norm": 1.3086371421813965,
"learning_rate": 2.841666666666667e-06,
"loss": 0.0116,
"num_input_tokens_seen": 5590784,
"step": 341
},
{
"epoch": 4.39871382636656,
"grad_norm": 3.057859420776367,
"learning_rate": 2.85e-06,
"loss": 0.0281,
"num_input_tokens_seen": 5608032,
"step": 342
},
{
"epoch": 4.411575562700965,
"grad_norm": 2.7371633052825928,
"learning_rate": 2.8583333333333336e-06,
"loss": 0.0246,
"num_input_tokens_seen": 5624704,
"step": 343
},
{
"epoch": 4.42443729903537,
"grad_norm": 1.7932931184768677,
"learning_rate": 2.866666666666667e-06,
"loss": 0.0146,
"num_input_tokens_seen": 5641056,
"step": 344
},
{
"epoch": 4.437299035369775,
"grad_norm": 3.9446775913238525,
"learning_rate": 2.875e-06,
"loss": 0.0439,
"num_input_tokens_seen": 5656608,
"step": 345
},
{
"epoch": 4.45016077170418,
"grad_norm": 3.7706024646759033,
"learning_rate": 2.8833333333333334e-06,
"loss": 0.0279,
"num_input_tokens_seen": 5672928,
"step": 346
},
{
"epoch": 4.463022508038585,
"grad_norm": 2.38531756401062,
"learning_rate": 2.8916666666666672e-06,
"loss": 0.0276,
"num_input_tokens_seen": 5689728,
"step": 347
},
{
"epoch": 4.47588424437299,
"grad_norm": 2.534675121307373,
"learning_rate": 2.9e-06,
"loss": 0.0167,
"num_input_tokens_seen": 5705824,
"step": 348
},
{
"epoch": 4.488745980707396,
"grad_norm": 3.28116774559021,
"learning_rate": 2.9083333333333336e-06,
"loss": 0.0258,
"num_input_tokens_seen": 5723008,
"step": 349
},
{
"epoch": 4.501607717041801,
"grad_norm": 3.5873191356658936,
"learning_rate": 2.916666666666667e-06,
"loss": 0.0306,
"num_input_tokens_seen": 5739680,
"step": 350
},
{
"epoch": 4.514469453376206,
"grad_norm": 4.889535427093506,
"learning_rate": 2.925e-06,
"loss": 0.0395,
"num_input_tokens_seen": 5756480,
"step": 351
},
{
"epoch": 4.527331189710611,
"grad_norm": 1.8129762411117554,
"learning_rate": 2.9333333333333338e-06,
"loss": 0.0203,
"num_input_tokens_seen": 5773088,
"step": 352
},
{
"epoch": 4.540192926045016,
"grad_norm": 1.5045830011367798,
"learning_rate": 2.941666666666667e-06,
"loss": 0.0241,
"num_input_tokens_seen": 5789312,
"step": 353
},
{
"epoch": 4.553054662379421,
"grad_norm": 1.4582304954528809,
"learning_rate": 2.95e-06,
"loss": 0.0151,
"num_input_tokens_seen": 5804352,
"step": 354
},
{
"epoch": 4.565916398713826,
"grad_norm": 1.6875511407852173,
"learning_rate": 2.9583333333333335e-06,
"loss": 0.0355,
"num_input_tokens_seen": 5820384,
"step": 355
},
{
"epoch": 4.578778135048232,
"grad_norm": 1.0483993291854858,
"learning_rate": 2.9666666666666673e-06,
"loss": 0.0093,
"num_input_tokens_seen": 5837280,
"step": 356
},
{
"epoch": 4.591639871382637,
"grad_norm": 2.731086254119873,
"learning_rate": 2.9750000000000003e-06,
"loss": 0.0314,
"num_input_tokens_seen": 5853760,
"step": 357
},
{
"epoch": 4.604501607717042,
"grad_norm": 1.5985758304595947,
"learning_rate": 2.9833333333333337e-06,
"loss": 0.0155,
"num_input_tokens_seen": 5870240,
"step": 358
},
{
"epoch": 4.617363344051447,
"grad_norm": 1.3119585514068604,
"learning_rate": 2.991666666666667e-06,
"loss": 0.0138,
"num_input_tokens_seen": 5887296,
"step": 359
},
{
"epoch": 4.630225080385852,
"grad_norm": 2.3107335567474365,
"learning_rate": 3e-06,
"loss": 0.0212,
"num_input_tokens_seen": 5904384,
"step": 360
},
{
"epoch": 4.643086816720257,
"grad_norm": 1.8473104238510132,
"learning_rate": 3.0083333333333335e-06,
"loss": 0.0255,
"num_input_tokens_seen": 5920832,
"step": 361
},
{
"epoch": 4.655948553054662,
"grad_norm": 1.6255360841751099,
"learning_rate": 3.0166666666666673e-06,
"loss": 0.0231,
"num_input_tokens_seen": 5936736,
"step": 362
},
{
"epoch": 4.668810289389068,
"grad_norm": 1.843116044998169,
"learning_rate": 3.0250000000000003e-06,
"loss": 0.0083,
"num_input_tokens_seen": 5953120,
"step": 363
},
{
"epoch": 4.681672025723473,
"grad_norm": 1.7974934577941895,
"learning_rate": 3.0333333333333337e-06,
"loss": 0.0122,
"num_input_tokens_seen": 5970336,
"step": 364
},
{
"epoch": 4.694533762057878,
"grad_norm": 1.799026370048523,
"learning_rate": 3.0416666666666666e-06,
"loss": 0.0142,
"num_input_tokens_seen": 5987488,
"step": 365
},
{
"epoch": 4.707395498392283,
"grad_norm": 2.38661789894104,
"learning_rate": 3.05e-06,
"loss": 0.0293,
"num_input_tokens_seen": 6003296,
"step": 366
},
{
"epoch": 4.720257234726688,
"grad_norm": 2.2720234394073486,
"learning_rate": 3.058333333333334e-06,
"loss": 0.0292,
"num_input_tokens_seen": 6020768,
"step": 367
},
{
"epoch": 4.733118971061093,
"grad_norm": 3.6864068508148193,
"learning_rate": 3.066666666666667e-06,
"loss": 0.032,
"num_input_tokens_seen": 6036992,
"step": 368
},
{
"epoch": 4.745980707395498,
"grad_norm": 2.1829123497009277,
"learning_rate": 3.075e-06,
"loss": 0.0189,
"num_input_tokens_seen": 6053568,
"step": 369
},
{
"epoch": 4.758842443729904,
"grad_norm": 2.033625602722168,
"learning_rate": 3.0833333333333336e-06,
"loss": 0.022,
"num_input_tokens_seen": 6070336,
"step": 370
},
{
"epoch": 4.771704180064309,
"grad_norm": 2.665611982345581,
"learning_rate": 3.0916666666666666e-06,
"loss": 0.0242,
"num_input_tokens_seen": 6087200,
"step": 371
},
{
"epoch": 4.784565916398714,
"grad_norm": 1.981733798980713,
"learning_rate": 3.1000000000000004e-06,
"loss": 0.015,
"num_input_tokens_seen": 6104224,
"step": 372
},
{
"epoch": 4.797427652733119,
"grad_norm": 1.30072820186615,
"learning_rate": 3.1083333333333338e-06,
"loss": 0.0154,
"num_input_tokens_seen": 6120576,
"step": 373
},
{
"epoch": 4.810289389067524,
"grad_norm": 2.196270704269409,
"learning_rate": 3.1166666666666668e-06,
"loss": 0.0195,
"num_input_tokens_seen": 6137440,
"step": 374
},
{
"epoch": 4.823151125401929,
"grad_norm": 2.597658157348633,
"learning_rate": 3.125e-06,
"loss": 0.031,
"num_input_tokens_seen": 6154976,
"step": 375
},
{
"epoch": 4.836012861736334,
"grad_norm": 3.6647465229034424,
"learning_rate": 3.133333333333334e-06,
"loss": 0.0376,
"num_input_tokens_seen": 6170912,
"step": 376
},
{
"epoch": 4.84887459807074,
"grad_norm": 1.159162998199463,
"learning_rate": 3.141666666666667e-06,
"loss": 0.0093,
"num_input_tokens_seen": 6187968,
"step": 377
},
{
"epoch": 4.861736334405145,
"grad_norm": 1.5810602903366089,
"learning_rate": 3.1500000000000003e-06,
"loss": 0.027,
"num_input_tokens_seen": 6204320,
"step": 378
},
{
"epoch": 4.87459807073955,
"grad_norm": 2.0108237266540527,
"learning_rate": 3.1583333333333337e-06,
"loss": 0.0191,
"num_input_tokens_seen": 6220512,
"step": 379
},
{
"epoch": 4.887459807073955,
"grad_norm": 1.5167394876480103,
"learning_rate": 3.1666666666666667e-06,
"loss": 0.0132,
"num_input_tokens_seen": 6236512,
"step": 380
},
{
"epoch": 4.90032154340836,
"grad_norm": 2.1819732189178467,
"learning_rate": 3.175e-06,
"loss": 0.0111,
"num_input_tokens_seen": 6252416,
"step": 381
},
{
"epoch": 4.913183279742765,
"grad_norm": 1.414007306098938,
"learning_rate": 3.183333333333334e-06,
"loss": 0.0145,
"num_input_tokens_seen": 6268992,
"step": 382
},
{
"epoch": 4.92604501607717,
"grad_norm": 1.6091843843460083,
"learning_rate": 3.191666666666667e-06,
"loss": 0.0082,
"num_input_tokens_seen": 6285312,
"step": 383
},
{
"epoch": 4.938906752411576,
"grad_norm": 1.0037862062454224,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.0165,
"num_input_tokens_seen": 6300992,
"step": 384
},
{
"epoch": 4.951768488745981,
"grad_norm": 2.4763152599334717,
"learning_rate": 3.2083333333333337e-06,
"loss": 0.0243,
"num_input_tokens_seen": 6318560,
"step": 385
},
{
"epoch": 4.951768488745981,
"num_input_tokens_seen": 6318560,
"step": 385,
"total_flos": 2.5049338265940787e+17,
"train_loss": 0.9364173337708336,
"train_runtime": 4331.1035,
"train_samples_per_second": 22.95,
"train_steps_per_second": 0.089
}
],
"logging_steps": 1,
"max_steps": 385,
"num_input_tokens_seen": 6318560,
"num_train_epochs": 5,
"save_steps": 980,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.5049338265940787e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}