|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.887459807073955, |
|
"eval_steps": 500, |
|
"global_step": 190, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02572347266881029, |
|
"grad_norm": 268.0186767578125, |
|
"learning_rate": 5.000000000000001e-07, |
|
"loss": 8.3599, |
|
"num_input_tokens_seen": 7120, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05144694533762058, |
|
"grad_norm": 277.9050598144531, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 8.1891, |
|
"num_input_tokens_seen": 13888, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.07717041800643087, |
|
"grad_norm": 277.69873046875, |
|
"learning_rate": 1.5e-06, |
|
"loss": 8.0792, |
|
"num_input_tokens_seen": 20656, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.10289389067524116, |
|
"grad_norm": 267.486328125, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 7.9682, |
|
"num_input_tokens_seen": 27184, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.12861736334405144, |
|
"grad_norm": 301.225830078125, |
|
"learning_rate": 2.5e-06, |
|
"loss": 6.9482, |
|
"num_input_tokens_seen": 34416, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.15434083601286175, |
|
"grad_norm": 137.74415588378906, |
|
"learning_rate": 3e-06, |
|
"loss": 5.1505, |
|
"num_input_tokens_seen": 41056, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.18006430868167203, |
|
"grad_norm": 113.7622299194336, |
|
"learning_rate": 3.5e-06, |
|
"loss": 4.7491, |
|
"num_input_tokens_seen": 47536, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.2057877813504823, |
|
"grad_norm": 109.39883422851562, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 3.2164, |
|
"num_input_tokens_seen": 54464, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.2315112540192926, |
|
"grad_norm": 119.0561294555664, |
|
"learning_rate": 4.5e-06, |
|
"loss": 2.7761, |
|
"num_input_tokens_seen": 61520, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.2572347266881029, |
|
"grad_norm": 111.31031036376953, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6703, |
|
"num_input_tokens_seen": 68464, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2829581993569132, |
|
"grad_norm": 39.81684494018555, |
|
"learning_rate": 4.9996192378909785e-06, |
|
"loss": 0.3255, |
|
"num_input_tokens_seen": 75152, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.3086816720257235, |
|
"grad_norm": 41.08443069458008, |
|
"learning_rate": 4.99847706754774e-06, |
|
"loss": 0.3301, |
|
"num_input_tokens_seen": 81888, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.33440514469453375, |
|
"grad_norm": 17.01146125793457, |
|
"learning_rate": 4.9965738368864345e-06, |
|
"loss": 0.2121, |
|
"num_input_tokens_seen": 88688, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.36012861736334406, |
|
"grad_norm": 73.99099731445312, |
|
"learning_rate": 4.993910125649561e-06, |
|
"loss": 1.1565, |
|
"num_input_tokens_seen": 95616, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.3858520900321543, |
|
"grad_norm": 114.83367919921875, |
|
"learning_rate": 4.990486745229364e-06, |
|
"loss": 0.8054, |
|
"num_input_tokens_seen": 102144, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.4115755627009646, |
|
"grad_norm": 18.247591018676758, |
|
"learning_rate": 4.986304738420684e-06, |
|
"loss": 0.2386, |
|
"num_input_tokens_seen": 109120, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.43729903536977494, |
|
"grad_norm": 27.782032012939453, |
|
"learning_rate": 4.981365379103306e-06, |
|
"loss": 0.3161, |
|
"num_input_tokens_seen": 115744, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.4630225080385852, |
|
"grad_norm": 21.16512107849121, |
|
"learning_rate": 4.975670171853926e-06, |
|
"loss": 0.2773, |
|
"num_input_tokens_seen": 122704, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.4887459807073955, |
|
"grad_norm": 6.738564491271973, |
|
"learning_rate": 4.9692208514878445e-06, |
|
"loss": 0.2062, |
|
"num_input_tokens_seen": 129552, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.5144694533762058, |
|
"grad_norm": 3.6603872776031494, |
|
"learning_rate": 4.962019382530521e-06, |
|
"loss": 0.1837, |
|
"num_input_tokens_seen": 136544, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5401929260450161, |
|
"grad_norm": 10.339789390563965, |
|
"learning_rate": 4.9540679586191605e-06, |
|
"loss": 0.1735, |
|
"num_input_tokens_seen": 143488, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.5659163987138264, |
|
"grad_norm": 4.833702087402344, |
|
"learning_rate": 4.9453690018345144e-06, |
|
"loss": 0.1588, |
|
"num_input_tokens_seen": 150224, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.5916398713826366, |
|
"grad_norm": 4.9161152839660645, |
|
"learning_rate": 4.935925161963089e-06, |
|
"loss": 0.1443, |
|
"num_input_tokens_seen": 157232, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.617363344051447, |
|
"grad_norm": 9.033807754516602, |
|
"learning_rate": 4.925739315689991e-06, |
|
"loss": 0.157, |
|
"num_input_tokens_seen": 163776, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.6430868167202572, |
|
"grad_norm": 4.518654823303223, |
|
"learning_rate": 4.914814565722671e-06, |
|
"loss": 0.1199, |
|
"num_input_tokens_seen": 170352, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.6688102893890675, |
|
"grad_norm": 10.43909740447998, |
|
"learning_rate": 4.903154239845798e-06, |
|
"loss": 0.1539, |
|
"num_input_tokens_seen": 177120, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.6945337620578779, |
|
"grad_norm": 9.000858306884766, |
|
"learning_rate": 4.890761889907589e-06, |
|
"loss": 0.1208, |
|
"num_input_tokens_seen": 184096, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.7202572347266881, |
|
"grad_norm": 3.3685858249664307, |
|
"learning_rate": 4.8776412907378845e-06, |
|
"loss": 0.0954, |
|
"num_input_tokens_seen": 191040, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.7459807073954984, |
|
"grad_norm": 6.876232147216797, |
|
"learning_rate": 4.863796438998293e-06, |
|
"loss": 0.1387, |
|
"num_input_tokens_seen": 198064, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.7717041800643086, |
|
"grad_norm": 11.019433975219727, |
|
"learning_rate": 4.849231551964771e-06, |
|
"loss": 0.1484, |
|
"num_input_tokens_seen": 205136, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.797427652733119, |
|
"grad_norm": 4.780524730682373, |
|
"learning_rate": 4.833951066243004e-06, |
|
"loss": 0.0998, |
|
"num_input_tokens_seen": 212000, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.8231511254019293, |
|
"grad_norm": 3.613060235977173, |
|
"learning_rate": 4.817959636416969e-06, |
|
"loss": 0.1068, |
|
"num_input_tokens_seen": 218720, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.8488745980707395, |
|
"grad_norm": 5.576949596405029, |
|
"learning_rate": 4.801262133631101e-06, |
|
"loss": 0.0801, |
|
"num_input_tokens_seen": 225856, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.8745980707395499, |
|
"grad_norm": 3.3204915523529053, |
|
"learning_rate": 4.783863644106502e-06, |
|
"loss": 0.1066, |
|
"num_input_tokens_seen": 232640, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.9003215434083601, |
|
"grad_norm": 3.090853452682495, |
|
"learning_rate": 4.765769467591626e-06, |
|
"loss": 0.1038, |
|
"num_input_tokens_seen": 239504, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.9260450160771704, |
|
"grad_norm": 6.979610443115234, |
|
"learning_rate": 4.746985115747918e-06, |
|
"loss": 0.106, |
|
"num_input_tokens_seen": 246288, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.9517684887459807, |
|
"grad_norm": 3.491868495941162, |
|
"learning_rate": 4.72751631047092e-06, |
|
"loss": 0.1107, |
|
"num_input_tokens_seen": 253136, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.977491961414791, |
|
"grad_norm": 6.108020782470703, |
|
"learning_rate": 4.707368982147318e-06, |
|
"loss": 0.1372, |
|
"num_input_tokens_seen": 260160, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.0032154340836013, |
|
"grad_norm": 1.9764081239700317, |
|
"learning_rate": 4.68654926784849e-06, |
|
"loss": 0.0816, |
|
"num_input_tokens_seen": 267120, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.0289389067524115, |
|
"grad_norm": 2.5603554248809814, |
|
"learning_rate": 4.665063509461098e-06, |
|
"loss": 0.0743, |
|
"num_input_tokens_seen": 274112, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.0546623794212218, |
|
"grad_norm": 5.457398414611816, |
|
"learning_rate": 4.642918251755281e-06, |
|
"loss": 0.072, |
|
"num_input_tokens_seen": 281136, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.0803858520900322, |
|
"grad_norm": 4.488720417022705, |
|
"learning_rate": 4.620120240391065e-06, |
|
"loss": 0.0596, |
|
"num_input_tokens_seen": 288048, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.1061093247588425, |
|
"grad_norm": 2.8057987689971924, |
|
"learning_rate": 4.596676419863561e-06, |
|
"loss": 0.0544, |
|
"num_input_tokens_seen": 295120, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.1318327974276527, |
|
"grad_norm": 2.921278476715088, |
|
"learning_rate": 4.572593931387604e-06, |
|
"loss": 0.0342, |
|
"num_input_tokens_seen": 302000, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.157556270096463, |
|
"grad_norm": 2.9281697273254395, |
|
"learning_rate": 4.54788011072248e-06, |
|
"loss": 0.0394, |
|
"num_input_tokens_seen": 308672, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.1832797427652733, |
|
"grad_norm": 4.087917327880859, |
|
"learning_rate": 4.522542485937369e-06, |
|
"loss": 0.0196, |
|
"num_input_tokens_seen": 315600, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.2090032154340835, |
|
"grad_norm": 3.390829086303711, |
|
"learning_rate": 4.496588775118232e-06, |
|
"loss": 0.0411, |
|
"num_input_tokens_seen": 322464, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.234726688102894, |
|
"grad_norm": 4.550433158874512, |
|
"learning_rate": 4.470026884016805e-06, |
|
"loss": 0.0257, |
|
"num_input_tokens_seen": 329024, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.2604501607717042, |
|
"grad_norm": 6.6677446365356445, |
|
"learning_rate": 4.442864903642428e-06, |
|
"loss": 0.0289, |
|
"num_input_tokens_seen": 336032, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.2861736334405145, |
|
"grad_norm": 7.874734878540039, |
|
"learning_rate": 4.415111107797445e-06, |
|
"loss": 0.1193, |
|
"num_input_tokens_seen": 342704, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.3118971061093248, |
|
"grad_norm": 5.971132278442383, |
|
"learning_rate": 4.386773950556931e-06, |
|
"loss": 0.0883, |
|
"num_input_tokens_seen": 349472, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.337620578778135, |
|
"grad_norm": 4.0566725730896, |
|
"learning_rate": 4.357862063693486e-06, |
|
"loss": 0.0377, |
|
"num_input_tokens_seen": 356272, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.3633440514469453, |
|
"grad_norm": 5.443573474884033, |
|
"learning_rate": 4.328384254047927e-06, |
|
"loss": 0.0602, |
|
"num_input_tokens_seen": 363040, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.3890675241157555, |
|
"grad_norm": 6.038252353668213, |
|
"learning_rate": 4.2983495008466285e-06, |
|
"loss": 0.083, |
|
"num_input_tokens_seen": 369664, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.414790996784566, |
|
"grad_norm": 2.8046696186065674, |
|
"learning_rate": 4.267766952966369e-06, |
|
"loss": 0.0358, |
|
"num_input_tokens_seen": 376704, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.4405144694533762, |
|
"grad_norm": 2.451719284057617, |
|
"learning_rate": 4.236645926147493e-06, |
|
"loss": 0.0321, |
|
"num_input_tokens_seen": 383600, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.4662379421221865, |
|
"grad_norm": 3.4111475944519043, |
|
"learning_rate": 4.204995900156247e-06, |
|
"loss": 0.0452, |
|
"num_input_tokens_seen": 390592, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.4919614147909968, |
|
"grad_norm": 3.065139055252075, |
|
"learning_rate": 4.172826515897146e-06, |
|
"loss": 0.0915, |
|
"num_input_tokens_seen": 397360, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.517684887459807, |
|
"grad_norm": 3.0692198276519775, |
|
"learning_rate": 4.140147572476269e-06, |
|
"loss": 0.0651, |
|
"num_input_tokens_seen": 404048, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.5434083601286175, |
|
"grad_norm": 3.6201350688934326, |
|
"learning_rate": 4.106969024216348e-06, |
|
"loss": 0.0868, |
|
"num_input_tokens_seen": 410960, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.5691318327974275, |
|
"grad_norm": 2.289900779724121, |
|
"learning_rate": 4.073300977624594e-06, |
|
"loss": 0.0554, |
|
"num_input_tokens_seen": 417888, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.594855305466238, |
|
"grad_norm": 1.4743808507919312, |
|
"learning_rate": 4.039153688314146e-06, |
|
"loss": 0.0336, |
|
"num_input_tokens_seen": 424880, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.6205787781350482, |
|
"grad_norm": 2.1891098022460938, |
|
"learning_rate": 4.0045375578801216e-06, |
|
"loss": 0.0455, |
|
"num_input_tokens_seen": 432000, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.6463022508038585, |
|
"grad_norm": 1.8203191757202148, |
|
"learning_rate": 3.969463130731183e-06, |
|
"loss": 0.0406, |
|
"num_input_tokens_seen": 438672, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.6720257234726688, |
|
"grad_norm": 2.453317165374756, |
|
"learning_rate": 3.933941090877615e-06, |
|
"loss": 0.0461, |
|
"num_input_tokens_seen": 445440, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.697749196141479, |
|
"grad_norm": 2.035806894302368, |
|
"learning_rate": 3.897982258676867e-06, |
|
"loss": 0.0466, |
|
"num_input_tokens_seen": 452064, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.7234726688102895, |
|
"grad_norm": 3.169175386428833, |
|
"learning_rate": 3.861597587537568e-06, |
|
"loss": 0.0382, |
|
"num_input_tokens_seen": 458992, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.7491961414790995, |
|
"grad_norm": 2.0320234298706055, |
|
"learning_rate": 3.824798160583012e-06, |
|
"loss": 0.0426, |
|
"num_input_tokens_seen": 465568, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.77491961414791, |
|
"grad_norm": 4.094840049743652, |
|
"learning_rate": 3.787595187275136e-06, |
|
"loss": 0.0264, |
|
"num_input_tokens_seen": 472496, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.8006430868167203, |
|
"grad_norm": 4.245150089263916, |
|
"learning_rate": 3.7500000000000005e-06, |
|
"loss": 0.0567, |
|
"num_input_tokens_seen": 479392, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.8263665594855305, |
|
"grad_norm": 3.226271152496338, |
|
"learning_rate": 3.7120240506158433e-06, |
|
"loss": 0.0688, |
|
"num_input_tokens_seen": 486416, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.852090032154341, |
|
"grad_norm": 2.070322275161743, |
|
"learning_rate": 3.6736789069647273e-06, |
|
"loss": 0.0351, |
|
"num_input_tokens_seen": 492896, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.877813504823151, |
|
"grad_norm": 2.4622035026550293, |
|
"learning_rate": 3.634976249348867e-06, |
|
"loss": 0.0246, |
|
"num_input_tokens_seen": 499760, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.9035369774919615, |
|
"grad_norm": 3.902181625366211, |
|
"learning_rate": 3.595927866972694e-06, |
|
"loss": 0.0364, |
|
"num_input_tokens_seen": 506816, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.9292604501607717, |
|
"grad_norm": 4.0147480964660645, |
|
"learning_rate": 3.556545654351749e-06, |
|
"loss": 0.0352, |
|
"num_input_tokens_seen": 513648, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.954983922829582, |
|
"grad_norm": 3.6791763305664062, |
|
"learning_rate": 3.516841607689501e-06, |
|
"loss": 0.0915, |
|
"num_input_tokens_seen": 520480, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.9807073954983923, |
|
"grad_norm": 1.641350269317627, |
|
"learning_rate": 3.476827821223184e-06, |
|
"loss": 0.0327, |
|
"num_input_tokens_seen": 527056, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 2.0064308681672025, |
|
"grad_norm": 3.6227402687072754, |
|
"learning_rate": 3.436516483539781e-06, |
|
"loss": 0.0448, |
|
"num_input_tokens_seen": 534112, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 2.032154340836013, |
|
"grad_norm": 1.654789924621582, |
|
"learning_rate": 3.39591987386325e-06, |
|
"loss": 0.0186, |
|
"num_input_tokens_seen": 541024, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 2.057877813504823, |
|
"grad_norm": 2.0418519973754883, |
|
"learning_rate": 3.3550503583141726e-06, |
|
"loss": 0.0342, |
|
"num_input_tokens_seen": 547888, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.0836012861736335, |
|
"grad_norm": 1.0242717266082764, |
|
"learning_rate": 3.313920386142892e-06, |
|
"loss": 0.0079, |
|
"num_input_tokens_seen": 554592, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 2.1093247588424435, |
|
"grad_norm": 1.59933602809906, |
|
"learning_rate": 3.272542485937369e-06, |
|
"loss": 0.0177, |
|
"num_input_tokens_seen": 561296, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 2.135048231511254, |
|
"grad_norm": 1.83909273147583, |
|
"learning_rate": 3.230929261806842e-06, |
|
"loss": 0.0139, |
|
"num_input_tokens_seen": 567872, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 2.1607717041800645, |
|
"grad_norm": 1.933048963546753, |
|
"learning_rate": 3.189093389542498e-06, |
|
"loss": 0.0103, |
|
"num_input_tokens_seen": 575072, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 2.1864951768488745, |
|
"grad_norm": 2.1397433280944824, |
|
"learning_rate": 3.147047612756302e-06, |
|
"loss": 0.0221, |
|
"num_input_tokens_seen": 582080, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 2.212218649517685, |
|
"grad_norm": 0.7958673238754272, |
|
"learning_rate": 3.1048047389991693e-06, |
|
"loss": 0.0021, |
|
"num_input_tokens_seen": 588816, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 2.237942122186495, |
|
"grad_norm": 1.8060617446899414, |
|
"learning_rate": 3.062377635859663e-06, |
|
"loss": 0.011, |
|
"num_input_tokens_seen": 596032, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 2.2636655948553055, |
|
"grad_norm": 1.7997971773147583, |
|
"learning_rate": 3.019779227044398e-06, |
|
"loss": 0.0081, |
|
"num_input_tokens_seen": 602672, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 2.289389067524116, |
|
"grad_norm": 2.0229434967041016, |
|
"learning_rate": 2.9770224884413625e-06, |
|
"loss": 0.0149, |
|
"num_input_tokens_seen": 609424, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 2.315112540192926, |
|
"grad_norm": 0.5248342156410217, |
|
"learning_rate": 2.9341204441673267e-06, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 616448, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.3408360128617365, |
|
"grad_norm": 1.1899443864822388, |
|
"learning_rate": 2.8910861626005774e-06, |
|
"loss": 0.007, |
|
"num_input_tokens_seen": 623296, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 2.3665594855305465, |
|
"grad_norm": 3.1920082569122314, |
|
"learning_rate": 2.847932752400164e-06, |
|
"loss": 0.0089, |
|
"num_input_tokens_seen": 630064, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 2.392282958199357, |
|
"grad_norm": 0.37337368726730347, |
|
"learning_rate": 2.804673358512869e-06, |
|
"loss": 0.0013, |
|
"num_input_tokens_seen": 637088, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 2.418006430868167, |
|
"grad_norm": 4.124266147613525, |
|
"learning_rate": 2.761321158169134e-06, |
|
"loss": 0.0267, |
|
"num_input_tokens_seen": 644080, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 2.4437299035369775, |
|
"grad_norm": 1.9108864068984985, |
|
"learning_rate": 2.717889356869146e-06, |
|
"loss": 0.0171, |
|
"num_input_tokens_seen": 650848, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 2.469453376205788, |
|
"grad_norm": 3.225116729736328, |
|
"learning_rate": 2.6743911843603134e-06, |
|
"loss": 0.0375, |
|
"num_input_tokens_seen": 657424, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 2.495176848874598, |
|
"grad_norm": 1.8133978843688965, |
|
"learning_rate": 2.6308398906073603e-06, |
|
"loss": 0.0101, |
|
"num_input_tokens_seen": 664128, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 2.5209003215434085, |
|
"grad_norm": 3.179337739944458, |
|
"learning_rate": 2.587248741756253e-06, |
|
"loss": 0.0282, |
|
"num_input_tokens_seen": 671120, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 2.5466237942122185, |
|
"grad_norm": 1.5852500200271606, |
|
"learning_rate": 2.543631016093209e-06, |
|
"loss": 0.0069, |
|
"num_input_tokens_seen": 677920, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 2.572347266881029, |
|
"grad_norm": 2.1428685188293457, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.0135, |
|
"num_input_tokens_seen": 684960, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.598070739549839, |
|
"grad_norm": 1.0775537490844727, |
|
"learning_rate": 2.4563689839067913e-06, |
|
"loss": 0.0062, |
|
"num_input_tokens_seen": 691856, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 2.6237942122186495, |
|
"grad_norm": 1.5073256492614746, |
|
"learning_rate": 2.4127512582437486e-06, |
|
"loss": 0.005, |
|
"num_input_tokens_seen": 698512, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 2.64951768488746, |
|
"grad_norm": 1.7251828908920288, |
|
"learning_rate": 2.3691601093926406e-06, |
|
"loss": 0.0285, |
|
"num_input_tokens_seen": 705440, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 2.67524115755627, |
|
"grad_norm": 2.271348237991333, |
|
"learning_rate": 2.325608815639687e-06, |
|
"loss": 0.0225, |
|
"num_input_tokens_seen": 712528, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 2.7009646302250805, |
|
"grad_norm": 0.9558768272399902, |
|
"learning_rate": 2.2821106431308546e-06, |
|
"loss": 0.028, |
|
"num_input_tokens_seen": 719168, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 2.7266881028938905, |
|
"grad_norm": 2.1378886699676514, |
|
"learning_rate": 2.238678841830867e-06, |
|
"loss": 0.0176, |
|
"num_input_tokens_seen": 725904, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 2.752411575562701, |
|
"grad_norm": 0.9674012660980225, |
|
"learning_rate": 2.195326641487132e-06, |
|
"loss": 0.0047, |
|
"num_input_tokens_seen": 732480, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 2.778135048231511, |
|
"grad_norm": 0.6068861484527588, |
|
"learning_rate": 2.1520672475998374e-06, |
|
"loss": 0.0135, |
|
"num_input_tokens_seen": 739184, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 2.8038585209003215, |
|
"grad_norm": 0.833519458770752, |
|
"learning_rate": 2.1089138373994226e-06, |
|
"loss": 0.0044, |
|
"num_input_tokens_seen": 746320, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 2.829581993569132, |
|
"grad_norm": 1.4018948078155518, |
|
"learning_rate": 2.0658795558326745e-06, |
|
"loss": 0.0252, |
|
"num_input_tokens_seen": 753136, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.855305466237942, |
|
"grad_norm": 1.5698492527008057, |
|
"learning_rate": 2.022977511558638e-06, |
|
"loss": 0.0249, |
|
"num_input_tokens_seen": 760096, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 2.8810289389067525, |
|
"grad_norm": 2.051377534866333, |
|
"learning_rate": 1.9802207729556023e-06, |
|
"loss": 0.0146, |
|
"num_input_tokens_seen": 767104, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 2.906752411575563, |
|
"grad_norm": 0.48683854937553406, |
|
"learning_rate": 1.937622364140338e-06, |
|
"loss": 0.0044, |
|
"num_input_tokens_seen": 773872, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 2.932475884244373, |
|
"grad_norm": 0.9895896911621094, |
|
"learning_rate": 1.895195261000831e-06, |
|
"loss": 0.0054, |
|
"num_input_tokens_seen": 780736, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 2.958199356913183, |
|
"grad_norm": 0.8025338053703308, |
|
"learning_rate": 1.852952387243698e-06, |
|
"loss": 0.0106, |
|
"num_input_tokens_seen": 787424, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 2.9839228295819935, |
|
"grad_norm": 1.7318856716156006, |
|
"learning_rate": 1.8109066104575023e-06, |
|
"loss": 0.0167, |
|
"num_input_tokens_seen": 794224, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 3.009646302250804, |
|
"grad_norm": 1.5407195091247559, |
|
"learning_rate": 1.7690707381931585e-06, |
|
"loss": 0.009, |
|
"num_input_tokens_seen": 801088, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 3.035369774919614, |
|
"grad_norm": 0.25947141647338867, |
|
"learning_rate": 1.7274575140626318e-06, |
|
"loss": 0.0024, |
|
"num_input_tokens_seen": 808048, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 3.0610932475884245, |
|
"grad_norm": 2.6516857147216797, |
|
"learning_rate": 1.686079613857109e-06, |
|
"loss": 0.0235, |
|
"num_input_tokens_seen": 814800, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 3.0868167202572345, |
|
"grad_norm": 0.9633734822273254, |
|
"learning_rate": 1.6449496416858285e-06, |
|
"loss": 0.0179, |
|
"num_input_tokens_seen": 821536, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.112540192926045, |
|
"grad_norm": 1.0766749382019043, |
|
"learning_rate": 1.6040801261367494e-06, |
|
"loss": 0.0059, |
|
"num_input_tokens_seen": 828128, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 3.1382636655948555, |
|
"grad_norm": 0.2934500277042389, |
|
"learning_rate": 1.56348351646022e-06, |
|
"loss": 0.0017, |
|
"num_input_tokens_seen": 834608, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 3.1639871382636655, |
|
"grad_norm": 0.3029981553554535, |
|
"learning_rate": 1.5231721787768162e-06, |
|
"loss": 0.0018, |
|
"num_input_tokens_seen": 841360, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 3.189710610932476, |
|
"grad_norm": 0.44817763566970825, |
|
"learning_rate": 1.4831583923105e-06, |
|
"loss": 0.0032, |
|
"num_input_tokens_seen": 848496, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 3.215434083601286, |
|
"grad_norm": 0.21245715022087097, |
|
"learning_rate": 1.443454345648252e-06, |
|
"loss": 0.0019, |
|
"num_input_tokens_seen": 855424, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 3.2411575562700965, |
|
"grad_norm": 0.22589029371738434, |
|
"learning_rate": 1.4040721330273063e-06, |
|
"loss": 0.0014, |
|
"num_input_tokens_seen": 862224, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 3.266881028938907, |
|
"grad_norm": 0.5972980856895447, |
|
"learning_rate": 1.3650237506511333e-06, |
|
"loss": 0.0052, |
|
"num_input_tokens_seen": 869312, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 3.292604501607717, |
|
"grad_norm": 0.06931939721107483, |
|
"learning_rate": 1.3263210930352737e-06, |
|
"loss": 0.0005, |
|
"num_input_tokens_seen": 876272, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 3.3183279742765275, |
|
"grad_norm": 1.2623660564422607, |
|
"learning_rate": 1.2879759493841577e-06, |
|
"loss": 0.0131, |
|
"num_input_tokens_seen": 883072, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 3.3440514469453375, |
|
"grad_norm": 0.5440672636032104, |
|
"learning_rate": 1.2500000000000007e-06, |
|
"loss": 0.0009, |
|
"num_input_tokens_seen": 889920, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 3.369774919614148, |
|
"grad_norm": 0.6807874441146851, |
|
"learning_rate": 1.2124048127248644e-06, |
|
"loss": 0.0057, |
|
"num_input_tokens_seen": 896896, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 3.395498392282958, |
|
"grad_norm": 0.03898243606090546, |
|
"learning_rate": 1.1752018394169882e-06, |
|
"loss": 0.0002, |
|
"num_input_tokens_seen": 903600, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 3.4212218649517685, |
|
"grad_norm": 0.0243828147649765, |
|
"learning_rate": 1.1384024124624324e-06, |
|
"loss": 0.0002, |
|
"num_input_tokens_seen": 910208, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 3.446945337620579, |
|
"grad_norm": 2.5577075481414795, |
|
"learning_rate": 1.1020177413231334e-06, |
|
"loss": 0.0145, |
|
"num_input_tokens_seen": 917328, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 3.472668810289389, |
|
"grad_norm": 1.2599172592163086, |
|
"learning_rate": 1.0660589091223854e-06, |
|
"loss": 0.0034, |
|
"num_input_tokens_seen": 924192, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 3.4983922829581995, |
|
"grad_norm": 1.3292278051376343, |
|
"learning_rate": 1.0305368692688175e-06, |
|
"loss": 0.0156, |
|
"num_input_tokens_seen": 930784, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 3.5241157556270095, |
|
"grad_norm": 0.2977862060070038, |
|
"learning_rate": 9.95462442119879e-07, |
|
"loss": 0.0013, |
|
"num_input_tokens_seen": 937856, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 3.54983922829582, |
|
"grad_norm": 0.3527968227863312, |
|
"learning_rate": 9.608463116858544e-07, |
|
"loss": 0.0007, |
|
"num_input_tokens_seen": 944640, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 3.57556270096463, |
|
"grad_norm": 0.16851164400577545, |
|
"learning_rate": 9.266990223754069e-07, |
|
"loss": 0.0005, |
|
"num_input_tokens_seen": 951504, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 3.6012861736334405, |
|
"grad_norm": 0.4394027292728424, |
|
"learning_rate": 8.930309757836517e-07, |
|
"loss": 0.0034, |
|
"num_input_tokens_seen": 958240, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.627009646302251, |
|
"grad_norm": 0.0209315475076437, |
|
"learning_rate": 8.598524275237321e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 964912, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 3.652733118971061, |
|
"grad_norm": 0.35701486468315125, |
|
"learning_rate": 8.271734841028553e-07, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 971872, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 3.6784565916398715, |
|
"grad_norm": 2.0450756549835205, |
|
"learning_rate": 7.950040998437541e-07, |
|
"loss": 0.0123, |
|
"num_input_tokens_seen": 978640, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 3.7041800643086815, |
|
"grad_norm": 0.057205893099308014, |
|
"learning_rate": 7.633540738525066e-07, |
|
"loss": 0.0002, |
|
"num_input_tokens_seen": 985328, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 3.729903536977492, |
|
"grad_norm": 1.7251015901565552, |
|
"learning_rate": 7.322330470336314e-07, |
|
"loss": 0.011, |
|
"num_input_tokens_seen": 992224, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 3.755627009646302, |
|
"grad_norm": 0.417669415473938, |
|
"learning_rate": 7.016504991533727e-07, |
|
"loss": 0.0008, |
|
"num_input_tokens_seen": 998688, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 3.7813504823151125, |
|
"grad_norm": 0.06796720623970032, |
|
"learning_rate": 6.716157459520739e-07, |
|
"loss": 0.0003, |
|
"num_input_tokens_seen": 1006032, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 3.807073954983923, |
|
"grad_norm": 0.5570574998855591, |
|
"learning_rate": 6.421379363065142e-07, |
|
"loss": 0.0018, |
|
"num_input_tokens_seen": 1012944, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 3.832797427652733, |
|
"grad_norm": 0.557479977607727, |
|
"learning_rate": 6.1322604944307e-07, |
|
"loss": 0.0016, |
|
"num_input_tokens_seen": 1019856, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 3.8585209003215435, |
|
"grad_norm": 0.48776909708976746, |
|
"learning_rate": 5.848888922025553e-07, |
|
"loss": 0.0021, |
|
"num_input_tokens_seen": 1026656, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 3.884244372990354, |
|
"grad_norm": 0.025394350290298462, |
|
"learning_rate": 5.571350963575728e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1033696, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 3.909967845659164, |
|
"grad_norm": 0.03195232152938843, |
|
"learning_rate": 5.299731159831953e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1040240, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 3.935691318327974, |
|
"grad_norm": 0.0676019936800003, |
|
"learning_rate": 5.034112248817685e-07, |
|
"loss": 0.0003, |
|
"num_input_tokens_seen": 1046992, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 3.9614147909967845, |
|
"grad_norm": 0.04871657118201256, |
|
"learning_rate": 4.774575140626317e-07, |
|
"loss": 0.0002, |
|
"num_input_tokens_seen": 1053936, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 3.987138263665595, |
|
"grad_norm": 0.08272106945514679, |
|
"learning_rate": 4.5211988927752026e-07, |
|
"loss": 0.0002, |
|
"num_input_tokens_seen": 1060576, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 4.012861736334405, |
|
"grad_norm": 0.009170151315629482, |
|
"learning_rate": 4.27406068612396e-07, |
|
"loss": 0.0, |
|
"num_input_tokens_seen": 1067648, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 4.038585209003215, |
|
"grad_norm": 0.10544967651367188, |
|
"learning_rate": 4.033235801364402e-07, |
|
"loss": 0.0003, |
|
"num_input_tokens_seen": 1074512, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 4.064308681672026, |
|
"grad_norm": 0.04709336906671524, |
|
"learning_rate": 3.798797596089351e-07, |
|
"loss": 0.0002, |
|
"num_input_tokens_seen": 1081296, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 4.090032154340836, |
|
"grad_norm": 0.042879387736320496, |
|
"learning_rate": 3.5708174824471947e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1087888, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 4.115755627009646, |
|
"grad_norm": 0.03649509325623512, |
|
"learning_rate": 3.3493649053890325e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1094960, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 4.141479099678457, |
|
"grad_norm": 0.01273419987410307, |
|
"learning_rate": 3.134507321515107e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1101776, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 4.167202572347267, |
|
"grad_norm": 0.006498055998235941, |
|
"learning_rate": 2.9263101785268253e-07, |
|
"loss": 0.0, |
|
"num_input_tokens_seen": 1108736, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 4.192926045016077, |
|
"grad_norm": 0.027757082134485245, |
|
"learning_rate": 2.7248368952908055e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1115744, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 4.218649517684887, |
|
"grad_norm": 0.005632288288325071, |
|
"learning_rate": 2.53014884252083e-07, |
|
"loss": 0.0, |
|
"num_input_tokens_seen": 1122592, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 4.244372990353698, |
|
"grad_norm": 0.014449907466769218, |
|
"learning_rate": 2.3423053240837518e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1129136, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 4.270096463022508, |
|
"grad_norm": 0.013245908543467522, |
|
"learning_rate": 2.1613635589349756e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1136080, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 4.295819935691318, |
|
"grad_norm": 0.12783974409103394, |
|
"learning_rate": 1.9873786636889908e-07, |
|
"loss": 0.0002, |
|
"num_input_tokens_seen": 1142976, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 4.321543408360129, |
|
"grad_norm": 0.08086368441581726, |
|
"learning_rate": 1.8204036358303173e-07, |
|
"loss": 0.0002, |
|
"num_input_tokens_seen": 1149712, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 4.347266881028939, |
|
"grad_norm": 0.018286822363734245, |
|
"learning_rate": 1.6604893375699594e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1156336, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 4.372990353697749, |
|
"grad_norm": 0.009972570464015007, |
|
"learning_rate": 1.507684480352292e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1163120, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 4.39871382636656, |
|
"grad_norm": 0.9909194707870483, |
|
"learning_rate": 1.362035610017079e-07, |
|
"loss": 0.0115, |
|
"num_input_tokens_seen": 1170032, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 4.42443729903537, |
|
"grad_norm": 0.21059785783290863, |
|
"learning_rate": 1.223587092621162e-07, |
|
"loss": 0.0005, |
|
"num_input_tokens_seen": 1176832, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 4.45016077170418, |
|
"grad_norm": 0.08872511237859726, |
|
"learning_rate": 1.0923811009241142e-07, |
|
"loss": 0.0003, |
|
"num_input_tokens_seen": 1183856, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 4.47588424437299, |
|
"grad_norm": 0.7515408396720886, |
|
"learning_rate": 9.684576015420277e-08, |
|
"loss": 0.005, |
|
"num_input_tokens_seen": 1190544, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 4.501607717041801, |
|
"grad_norm": 0.0772569328546524, |
|
"learning_rate": 8.518543427732951e-08, |
|
"loss": 0.0003, |
|
"num_input_tokens_seen": 1197168, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 4.527331189710611, |
|
"grad_norm": 0.19207893311977386, |
|
"learning_rate": 7.426068431000883e-08, |
|
"loss": 0.0008, |
|
"num_input_tokens_seen": 1203776, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 4.553054662379421, |
|
"grad_norm": 0.007837573066353798, |
|
"learning_rate": 6.407483803691216e-08, |
|
"loss": 0.0, |
|
"num_input_tokens_seen": 1210816, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 4.578778135048232, |
|
"grad_norm": 0.009602434933185577, |
|
"learning_rate": 5.463099816548578e-08, |
|
"loss": 0.0, |
|
"num_input_tokens_seen": 1217696, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 4.604501607717042, |
|
"grad_norm": 1.3553417921066284, |
|
"learning_rate": 4.593204138084006e-08, |
|
"loss": 0.0042, |
|
"num_input_tokens_seen": 1224704, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 4.630225080385852, |
|
"grad_norm": 0.17981038987636566, |
|
"learning_rate": 3.798061746947995e-08, |
|
"loss": 0.0004, |
|
"num_input_tokens_seen": 1231664, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 4.655948553054662, |
|
"grad_norm": 0.04047521948814392, |
|
"learning_rate": 3.077914851215585e-08, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1238240, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 4.681672025723473, |
|
"grad_norm": 0.02045159973204136, |
|
"learning_rate": 2.4329828146074096e-08, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1244832, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 4.707395498392283, |
|
"grad_norm": 0.10965674370527267, |
|
"learning_rate": 1.8634620896695044e-08, |
|
"loss": 0.0004, |
|
"num_input_tokens_seen": 1251296, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 4.733118971061093, |
|
"grad_norm": 0.008195169270038605, |
|
"learning_rate": 1.3695261579316776e-08, |
|
"loss": 0.0, |
|
"num_input_tokens_seen": 1257968, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 4.758842443729904, |
|
"grad_norm": 0.25205764174461365, |
|
"learning_rate": 9.513254770636138e-09, |
|
"loss": 0.0009, |
|
"num_input_tokens_seen": 1264928, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 4.784565916398714, |
|
"grad_norm": 0.031606338918209076, |
|
"learning_rate": 6.089874350439507e-09, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1271792, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 4.810289389067524, |
|
"grad_norm": 0.19231323897838593, |
|
"learning_rate": 3.4261631135654174e-09, |
|
"loss": 0.0004, |
|
"num_input_tokens_seen": 1279024, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 4.836012861736334, |
|
"grad_norm": 0.06766192615032196, |
|
"learning_rate": 1.5229324522605949e-09, |
|
"loss": 0.0002, |
|
"num_input_tokens_seen": 1285792, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 4.861736334405145, |
|
"grad_norm": 0.005693785380572081, |
|
"learning_rate": 3.8076210902182607e-10, |
|
"loss": 0.0, |
|
"num_input_tokens_seen": 1292576, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 4.887459807073955, |
|
"grad_norm": 0.02735370770096779, |
|
"learning_rate": 0.0, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1299392, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 4.887459807073955, |
|
"num_input_tokens_seen": 1299392, |
|
"step": 190, |
|
"total_flos": 5.151317702790349e+16, |
|
"train_loss": 0.3433768034317166, |
|
"train_runtime": 2162.0959, |
|
"train_samples_per_second": 11.489, |
|
"train_steps_per_second": 0.088 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 190, |
|
"num_input_tokens_seen": 1299392, |
|
"num_train_epochs": 5, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.151317702790349e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|