|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9982668977469671, |
|
"eval_steps": 500, |
|
"global_step": 288, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0034662045060658577, |
|
"grad_norm": 0.322265625, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 1.5121, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006932409012131715, |
|
"grad_norm": 0.66015625, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 1.5791, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.010398613518197574, |
|
"grad_norm": 0.330078125, |
|
"learning_rate": 1.2e-05, |
|
"loss": 1.5152, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01386481802426343, |
|
"grad_norm": 0.32421875, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 1.5108, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01733102253032929, |
|
"grad_norm": 0.326171875, |
|
"learning_rate": 2e-05, |
|
"loss": 1.516, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02079722703639515, |
|
"grad_norm": 0.32421875, |
|
"learning_rate": 2.4e-05, |
|
"loss": 1.536, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.024263431542461005, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 2.8000000000000003e-05, |
|
"loss": 1.6207, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02772963604852686, |
|
"grad_norm": 0.30078125, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 1.5638, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.03119584055459272, |
|
"grad_norm": 0.234375, |
|
"learning_rate": 3.6e-05, |
|
"loss": 1.508, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03466204506065858, |
|
"grad_norm": 0.2578125, |
|
"learning_rate": 4e-05, |
|
"loss": 1.4821, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.038128249566724434, |
|
"grad_norm": 0.287109375, |
|
"learning_rate": 4.4000000000000006e-05, |
|
"loss": 1.4769, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0415944540727903, |
|
"grad_norm": 0.3203125, |
|
"learning_rate": 4.8e-05, |
|
"loss": 1.4965, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.045060658578856154, |
|
"grad_norm": 0.2734375, |
|
"learning_rate": 5.2000000000000004e-05, |
|
"loss": 1.4346, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.04852686308492201, |
|
"grad_norm": 0.2392578125, |
|
"learning_rate": 5.6000000000000006e-05, |
|
"loss": 1.4573, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.05199306759098787, |
|
"grad_norm": 0.263671875, |
|
"learning_rate": 6e-05, |
|
"loss": 1.3925, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.05545927209705372, |
|
"grad_norm": 0.2392578125, |
|
"learning_rate": 6.400000000000001e-05, |
|
"loss": 1.45, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.058925476603119586, |
|
"grad_norm": 0.20703125, |
|
"learning_rate": 6.800000000000001e-05, |
|
"loss": 1.4752, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.06239168110918544, |
|
"grad_norm": 0.1630859375, |
|
"learning_rate": 7.2e-05, |
|
"loss": 1.2915, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0658578856152513, |
|
"grad_norm": 0.1640625, |
|
"learning_rate": 7.6e-05, |
|
"loss": 1.4101, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.06932409012131716, |
|
"grad_norm": 0.1328125, |
|
"learning_rate": 8e-05, |
|
"loss": 1.3524, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07279029462738301, |
|
"grad_norm": 0.1279296875, |
|
"learning_rate": 8.4e-05, |
|
"loss": 1.3107, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.07625649913344887, |
|
"grad_norm": 0.263671875, |
|
"learning_rate": 8.800000000000001e-05, |
|
"loss": 1.4215, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.07972270363951472, |
|
"grad_norm": 0.1435546875, |
|
"learning_rate": 9.200000000000001e-05, |
|
"loss": 1.364, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0831889081455806, |
|
"grad_norm": 0.154296875, |
|
"learning_rate": 9.6e-05, |
|
"loss": 1.3493, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.08665511265164645, |
|
"grad_norm": 0.12451171875, |
|
"learning_rate": 0.0001, |
|
"loss": 1.3874, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.09012131715771231, |
|
"grad_norm": 0.1220703125, |
|
"learning_rate": 0.00010400000000000001, |
|
"loss": 1.4087, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.09358752166377816, |
|
"grad_norm": 0.15234375, |
|
"learning_rate": 0.00010800000000000001, |
|
"loss": 1.3556, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.09705372616984402, |
|
"grad_norm": 0.12158203125, |
|
"learning_rate": 0.00011200000000000001, |
|
"loss": 1.3657, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.10051993067590988, |
|
"grad_norm": 0.12060546875, |
|
"learning_rate": 0.000116, |
|
"loss": 1.3307, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.10398613518197573, |
|
"grad_norm": 0.134765625, |
|
"learning_rate": 0.00012, |
|
"loss": 1.389, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.10745233968804159, |
|
"grad_norm": 0.11865234375, |
|
"learning_rate": 0.000124, |
|
"loss": 1.3153, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.11091854419410745, |
|
"grad_norm": 0.1162109375, |
|
"learning_rate": 0.00012800000000000002, |
|
"loss": 1.2661, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.11438474870017332, |
|
"grad_norm": 0.1240234375, |
|
"learning_rate": 0.000132, |
|
"loss": 1.3624, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.11785095320623917, |
|
"grad_norm": 0.1640625, |
|
"learning_rate": 0.00013600000000000003, |
|
"loss": 1.3389, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.12131715771230503, |
|
"grad_norm": 0.10888671875, |
|
"learning_rate": 0.00014, |
|
"loss": 1.257, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.12478336221837089, |
|
"grad_norm": 0.11083984375, |
|
"learning_rate": 0.000144, |
|
"loss": 1.2892, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.12824956672443674, |
|
"grad_norm": 0.2890625, |
|
"learning_rate": 0.000148, |
|
"loss": 1.3073, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.1317157712305026, |
|
"grad_norm": 0.11962890625, |
|
"learning_rate": 0.000152, |
|
"loss": 1.2713, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.13518197573656845, |
|
"grad_norm": 0.142578125, |
|
"learning_rate": 0.00015600000000000002, |
|
"loss": 1.3651, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.1386481802426343, |
|
"grad_norm": 0.123046875, |
|
"learning_rate": 0.00016, |
|
"loss": 1.376, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.14211438474870017, |
|
"grad_norm": 0.146484375, |
|
"learning_rate": 0.000164, |
|
"loss": 1.4392, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.14558058925476602, |
|
"grad_norm": 0.169921875, |
|
"learning_rate": 0.000168, |
|
"loss": 1.3044, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.14904679376083188, |
|
"grad_norm": 0.1484375, |
|
"learning_rate": 0.000172, |
|
"loss": 1.3437, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.15251299826689774, |
|
"grad_norm": 0.1640625, |
|
"learning_rate": 0.00017600000000000002, |
|
"loss": 1.3892, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.1559792027729636, |
|
"grad_norm": 0.12451171875, |
|
"learning_rate": 0.00018, |
|
"loss": 1.3279, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.15944540727902945, |
|
"grad_norm": 0.15625, |
|
"learning_rate": 0.00018400000000000003, |
|
"loss": 1.3208, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.16291161178509533, |
|
"grad_norm": 0.1416015625, |
|
"learning_rate": 0.000188, |
|
"loss": 1.3207, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.1663778162911612, |
|
"grad_norm": 0.1337890625, |
|
"learning_rate": 0.000192, |
|
"loss": 1.325, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.16984402079722705, |
|
"grad_norm": 0.162109375, |
|
"learning_rate": 0.000196, |
|
"loss": 1.3512, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1733102253032929, |
|
"grad_norm": 0.12890625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.2986, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.17677642980935876, |
|
"grad_norm": 0.40234375, |
|
"learning_rate": 0.00019999821640202586, |
|
"loss": 1.3212, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.18024263431542462, |
|
"grad_norm": 0.1376953125, |
|
"learning_rate": 0.00019999286567172776, |
|
"loss": 1.3269, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.18370883882149047, |
|
"grad_norm": 0.13671875, |
|
"learning_rate": 0.00019998394799997682, |
|
"loss": 1.2831, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.18717504332755633, |
|
"grad_norm": 0.1591796875, |
|
"learning_rate": 0.00019997146370488383, |
|
"loss": 1.2762, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.19064124783362218, |
|
"grad_norm": 0.1279296875, |
|
"learning_rate": 0.00019995541323178807, |
|
"loss": 1.2278, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.19410745233968804, |
|
"grad_norm": 0.15234375, |
|
"learning_rate": 0.00019993579715324135, |
|
"loss": 1.2913, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.1975736568457539, |
|
"grad_norm": 0.1298828125, |
|
"learning_rate": 0.00019991261616898767, |
|
"loss": 1.2971, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.20103986135181975, |
|
"grad_norm": 0.162109375, |
|
"learning_rate": 0.00019988587110593808, |
|
"loss": 1.304, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.2045060658578856, |
|
"grad_norm": 0.12890625, |
|
"learning_rate": 0.00019985556291814147, |
|
"loss": 1.2796, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.20797227036395147, |
|
"grad_norm": 0.1728515625, |
|
"learning_rate": 0.00019982169268675023, |
|
"loss": 1.3182, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.21143847487001732, |
|
"grad_norm": 0.3828125, |
|
"learning_rate": 0.00019978426161998194, |
|
"loss": 1.4574, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.21490467937608318, |
|
"grad_norm": 0.13671875, |
|
"learning_rate": 0.00019974327105307604, |
|
"loss": 1.284, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.21837088388214904, |
|
"grad_norm": 0.138671875, |
|
"learning_rate": 0.00019969872244824638, |
|
"loss": 1.2724, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.2218370883882149, |
|
"grad_norm": 0.546875, |
|
"learning_rate": 0.00019965061739462902, |
|
"loss": 1.28, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.22530329289428075, |
|
"grad_norm": 0.1220703125, |
|
"learning_rate": 0.00019959895760822546, |
|
"loss": 1.219, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.22876949740034663, |
|
"grad_norm": 0.126953125, |
|
"learning_rate": 0.00019954374493184152, |
|
"loss": 1.2601, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.2322357019064125, |
|
"grad_norm": 0.134765625, |
|
"learning_rate": 0.00019948498133502153, |
|
"loss": 1.3263, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.23570190641247835, |
|
"grad_norm": 0.1435546875, |
|
"learning_rate": 0.00019942266891397815, |
|
"loss": 1.3496, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.2391681109185442, |
|
"grad_norm": 0.125, |
|
"learning_rate": 0.00019935680989151757, |
|
"loss": 1.2856, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.24263431542461006, |
|
"grad_norm": 0.138671875, |
|
"learning_rate": 0.0001992874066169601, |
|
"loss": 1.2727, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.24610051993067592, |
|
"grad_norm": 0.111328125, |
|
"learning_rate": 0.00019921446156605664, |
|
"loss": 1.214, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.24956672443674177, |
|
"grad_norm": 0.150390625, |
|
"learning_rate": 0.00019913797734089997, |
|
"loss": 1.2299, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.2530329289428076, |
|
"grad_norm": 0.1357421875, |
|
"learning_rate": 0.00019905795666983234, |
|
"loss": 1.2439, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.2564991334488735, |
|
"grad_norm": 0.1435546875, |
|
"learning_rate": 0.00019897440240734788, |
|
"loss": 1.2721, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.25996533795493937, |
|
"grad_norm": 0.1337890625, |
|
"learning_rate": 0.00019888731753399088, |
|
"loss": 1.2411, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.2634315424610052, |
|
"grad_norm": 0.1494140625, |
|
"learning_rate": 0.00019879670515624936, |
|
"loss": 1.2302, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.2668977469670711, |
|
"grad_norm": 0.1318359375, |
|
"learning_rate": 0.0001987025685064444, |
|
"loss": 1.2711, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.2703639514731369, |
|
"grad_norm": 0.3203125, |
|
"learning_rate": 0.0001986049109426148, |
|
"loss": 1.2415, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.2738301559792028, |
|
"grad_norm": 0.1494140625, |
|
"learning_rate": 0.00019850373594839716, |
|
"loss": 1.3111, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.2772963604852686, |
|
"grad_norm": 0.142578125, |
|
"learning_rate": 0.00019839904713290184, |
|
"loss": 1.3138, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.2807625649913345, |
|
"grad_norm": 0.1318359375, |
|
"learning_rate": 0.000198290848230584, |
|
"loss": 1.2662, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.28422876949740034, |
|
"grad_norm": 0.13671875, |
|
"learning_rate": 0.00019817914310111046, |
|
"loss": 1.2258, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.2876949740034662, |
|
"grad_norm": 0.1728515625, |
|
"learning_rate": 0.0001980639357292221, |
|
"loss": 1.2435, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.29116117850953205, |
|
"grad_norm": 0.5546875, |
|
"learning_rate": 0.00019794523022459166, |
|
"loss": 1.2568, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.29462738301559793, |
|
"grad_norm": 0.1455078125, |
|
"learning_rate": 0.00019782303082167704, |
|
"loss": 1.2181, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.29809358752166376, |
|
"grad_norm": 0.283203125, |
|
"learning_rate": 0.00019769734187957038, |
|
"loss": 1.2465, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.30155979202772965, |
|
"grad_norm": 0.1552734375, |
|
"learning_rate": 0.00019756816788184259, |
|
"loss": 1.2623, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.3050259965337955, |
|
"grad_norm": 0.1474609375, |
|
"learning_rate": 0.00019743551343638324, |
|
"loss": 1.193, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.30849220103986136, |
|
"grad_norm": 0.1123046875, |
|
"learning_rate": 0.00019729938327523635, |
|
"loss": 1.2262, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.3119584055459272, |
|
"grad_norm": 0.1796875, |
|
"learning_rate": 0.00019715978225443148, |
|
"loss": 1.2862, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.31542461005199307, |
|
"grad_norm": 0.146484375, |
|
"learning_rate": 0.00019701671535381064, |
|
"loss": 1.2466, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.3188908145580589, |
|
"grad_norm": 0.150390625, |
|
"learning_rate": 0.0001968701876768505, |
|
"loss": 1.2048, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.3223570190641248, |
|
"grad_norm": 0.1533203125, |
|
"learning_rate": 0.00019672020445048036, |
|
"loss": 1.3168, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.32582322357019067, |
|
"grad_norm": 0.1416015625, |
|
"learning_rate": 0.00019656677102489588, |
|
"loss": 1.2833, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.3292894280762565, |
|
"grad_norm": 0.1318359375, |
|
"learning_rate": 0.00019640989287336792, |
|
"loss": 1.3131, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.3327556325823224, |
|
"grad_norm": 0.1591796875, |
|
"learning_rate": 0.00019624957559204761, |
|
"loss": 1.2941, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.3362218370883882, |
|
"grad_norm": 0.150390625, |
|
"learning_rate": 0.00019608582489976647, |
|
"loss": 1.2618, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.3396880415944541, |
|
"grad_norm": 0.130859375, |
|
"learning_rate": 0.0001959186466378326, |
|
"loss": 1.2308, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.3431542461005199, |
|
"grad_norm": 0.12890625, |
|
"learning_rate": 0.00019574804676982216, |
|
"loss": 1.1744, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.3466204506065858, |
|
"grad_norm": 0.14453125, |
|
"learning_rate": 0.00019557403138136672, |
|
"loss": 1.2391, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.35008665511265163, |
|
"grad_norm": 0.1337890625, |
|
"learning_rate": 0.0001953966066799362, |
|
"loss": 1.2976, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.3535528596187175, |
|
"grad_norm": 0.1318359375, |
|
"learning_rate": 0.00019521577899461731, |
|
"loss": 1.289, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.35701906412478335, |
|
"grad_norm": 0.130859375, |
|
"learning_rate": 0.00019503155477588796, |
|
"loss": 1.219, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.36048526863084923, |
|
"grad_norm": 0.181640625, |
|
"learning_rate": 0.000194843940595387, |
|
"loss": 1.2099, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.36395147313691506, |
|
"grad_norm": 0.146484375, |
|
"learning_rate": 0.00019465294314567987, |
|
"loss": 1.2524, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.36741767764298094, |
|
"grad_norm": 0.12451171875, |
|
"learning_rate": 0.0001944585692400199, |
|
"loss": 1.2382, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.3708838821490468, |
|
"grad_norm": 0.1328125, |
|
"learning_rate": 0.0001942608258121051, |
|
"loss": 1.259, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.37435008665511266, |
|
"grad_norm": 0.32421875, |
|
"learning_rate": 0.00019405971991583108, |
|
"loss": 1.2613, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.3778162911611785, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 0.0001938552587250392, |
|
"loss": 1.3308, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.38128249566724437, |
|
"grad_norm": 0.2138671875, |
|
"learning_rate": 0.00019364744953326074, |
|
"loss": 1.3292, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.3847487001733102, |
|
"grad_norm": 0.146484375, |
|
"learning_rate": 0.00019343629975345685, |
|
"loss": 1.2605, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.3882149046793761, |
|
"grad_norm": 0.142578125, |
|
"learning_rate": 0.00019322181691775386, |
|
"loss": 1.2033, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.39168110918544197, |
|
"grad_norm": 0.138671875, |
|
"learning_rate": 0.00019300400867717482, |
|
"loss": 1.2595, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.3951473136915078, |
|
"grad_norm": 0.1962890625, |
|
"learning_rate": 0.00019278288280136647, |
|
"loss": 1.245, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.3986135181975737, |
|
"grad_norm": 0.1533203125, |
|
"learning_rate": 0.00019255844717832205, |
|
"loss": 1.2452, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.4020797227036395, |
|
"grad_norm": 0.1318359375, |
|
"learning_rate": 0.00019233070981410007, |
|
"loss": 1.2467, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.4055459272097054, |
|
"grad_norm": 0.1455078125, |
|
"learning_rate": 0.00019209967883253849, |
|
"loss": 1.2339, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.4090121317157712, |
|
"grad_norm": 0.1455078125, |
|
"learning_rate": 0.00019186536247496518, |
|
"loss": 1.2301, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.4124783362218371, |
|
"grad_norm": 0.2021484375, |
|
"learning_rate": 0.00019162776909990373, |
|
"loss": 1.2376, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.41594454072790293, |
|
"grad_norm": 0.134765625, |
|
"learning_rate": 0.00019138690718277542, |
|
"loss": 1.2114, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.4194107452339688, |
|
"grad_norm": 0.1494140625, |
|
"learning_rate": 0.00019114278531559675, |
|
"loss": 1.2057, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.42287694974003465, |
|
"grad_norm": 0.1328125, |
|
"learning_rate": 0.0001908954122066731, |
|
"loss": 1.2832, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.42634315424610053, |
|
"grad_norm": 0.201171875, |
|
"learning_rate": 0.000190644796680288, |
|
"loss": 1.2481, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.42980935875216636, |
|
"grad_norm": 0.3046875, |
|
"learning_rate": 0.00019039094767638832, |
|
"loss": 1.212, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.43327556325823224, |
|
"grad_norm": 0.1552734375, |
|
"learning_rate": 0.0001901338742502655, |
|
"loss": 1.2385, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.43674176776429807, |
|
"grad_norm": 0.1982421875, |
|
"learning_rate": 0.00018987358557223232, |
|
"loss": 1.2925, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.44020797227036396, |
|
"grad_norm": 0.1474609375, |
|
"learning_rate": 0.000189610090927296, |
|
"loss": 1.2285, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.4436741767764298, |
|
"grad_norm": 0.12158203125, |
|
"learning_rate": 0.00018934339971482674, |
|
"loss": 1.2074, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.44714038128249567, |
|
"grad_norm": 0.126953125, |
|
"learning_rate": 0.00018907352144822284, |
|
"loss": 1.1342, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.4506065857885615, |
|
"grad_norm": 0.142578125, |
|
"learning_rate": 0.00018880046575457074, |
|
"loss": 1.2651, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.4540727902946274, |
|
"grad_norm": 0.1337890625, |
|
"learning_rate": 0.00018852424237430216, |
|
"loss": 1.1864, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.45753899480069327, |
|
"grad_norm": 0.1376953125, |
|
"learning_rate": 0.0001882448611608463, |
|
"loss": 1.1171, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.4610051993067591, |
|
"grad_norm": 0.1455078125, |
|
"learning_rate": 0.0001879623320802785, |
|
"loss": 1.2275, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.464471403812825, |
|
"grad_norm": 0.384765625, |
|
"learning_rate": 0.00018767666521096466, |
|
"loss": 1.2224, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.4679376083188908, |
|
"grad_norm": 0.140625, |
|
"learning_rate": 0.00018738787074320179, |
|
"loss": 1.2373, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.4714038128249567, |
|
"grad_norm": 0.1435546875, |
|
"learning_rate": 0.00018709595897885439, |
|
"loss": 1.2542, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.4748700173310225, |
|
"grad_norm": 0.146484375, |
|
"learning_rate": 0.00018680094033098716, |
|
"loss": 1.2231, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.4783362218370884, |
|
"grad_norm": 0.1298828125, |
|
"learning_rate": 0.00018650282532349332, |
|
"loss": 1.1947, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.48180242634315423, |
|
"grad_norm": 0.1630859375, |
|
"learning_rate": 0.00018620162459071936, |
|
"loss": 1.2309, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.4852686308492201, |
|
"grad_norm": 0.1552734375, |
|
"learning_rate": 0.00018589734887708556, |
|
"loss": 1.2276, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.48873483535528595, |
|
"grad_norm": 0.14453125, |
|
"learning_rate": 0.0001855900090367029, |
|
"loss": 1.2637, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.49220103986135183, |
|
"grad_norm": 0.1416015625, |
|
"learning_rate": 0.00018527961603298572, |
|
"loss": 1.3006, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.49566724436741766, |
|
"grad_norm": 0.1298828125, |
|
"learning_rate": 0.00018496618093826063, |
|
"loss": 1.2016, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.49913344887348354, |
|
"grad_norm": 0.1484375, |
|
"learning_rate": 0.00018464971493337167, |
|
"loss": 1.1819, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.5025996533795494, |
|
"grad_norm": 0.1552734375, |
|
"learning_rate": 0.00018433022930728133, |
|
"loss": 1.2551, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.5060658578856152, |
|
"grad_norm": 0.1337890625, |
|
"learning_rate": 0.00018400773545666787, |
|
"loss": 1.2634, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.5095320623916811, |
|
"grad_norm": 0.1416015625, |
|
"learning_rate": 0.00018368224488551896, |
|
"loss": 1.1993, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.512998266897747, |
|
"grad_norm": 0.1533203125, |
|
"learning_rate": 0.00018335376920472097, |
|
"loss": 1.2224, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.5164644714038128, |
|
"grad_norm": 0.16796875, |
|
"learning_rate": 0.00018302232013164518, |
|
"loss": 1.2288, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.5199306759098787, |
|
"grad_norm": 0.1328125, |
|
"learning_rate": 0.0001826879094897294, |
|
"loss": 1.1786, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.5233968804159446, |
|
"grad_norm": 0.138671875, |
|
"learning_rate": 0.00018235054920805652, |
|
"loss": 1.2411, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.5268630849220104, |
|
"grad_norm": 0.1337890625, |
|
"learning_rate": 0.00018201025132092889, |
|
"loss": 1.1916, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.5303292894280762, |
|
"grad_norm": 0.21875, |
|
"learning_rate": 0.00018166702796743888, |
|
"loss": 1.2354, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.5337954939341422, |
|
"grad_norm": 0.13671875, |
|
"learning_rate": 0.00018132089139103613, |
|
"loss": 1.24, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.537261698440208, |
|
"grad_norm": 0.1552734375, |
|
"learning_rate": 0.00018097185393909049, |
|
"loss": 1.2284, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.5407279029462738, |
|
"grad_norm": 0.294921875, |
|
"learning_rate": 0.00018061992806245184, |
|
"loss": 1.2164, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.5441941074523396, |
|
"grad_norm": 0.130859375, |
|
"learning_rate": 0.00018026512631500583, |
|
"loss": 1.204, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.5476603119584056, |
|
"grad_norm": 0.1357421875, |
|
"learning_rate": 0.00017990746135322592, |
|
"loss": 1.2307, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.5511265164644714, |
|
"grad_norm": 0.1630859375, |
|
"learning_rate": 0.00017954694593572227, |
|
"loss": 1.2445, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.5545927209705372, |
|
"grad_norm": 0.1904296875, |
|
"learning_rate": 0.00017918359292278611, |
|
"loss": 1.2107, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.5580589254766031, |
|
"grad_norm": 0.1484375, |
|
"learning_rate": 0.0001788174152759315, |
|
"loss": 1.1999, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.561525129982669, |
|
"grad_norm": 0.1337890625, |
|
"learning_rate": 0.00017844842605743258, |
|
"loss": 1.2639, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.5649913344887348, |
|
"grad_norm": 0.115234375, |
|
"learning_rate": 0.00017807663842985777, |
|
"loss": 1.1628, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.5684575389948007, |
|
"grad_norm": 0.1513671875, |
|
"learning_rate": 0.00017770206565560033, |
|
"loss": 1.2099, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.5719237435008665, |
|
"grad_norm": 0.1318359375, |
|
"learning_rate": 0.00017732472109640503, |
|
"loss": 1.231, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.5753899480069324, |
|
"grad_norm": 0.1318359375, |
|
"learning_rate": 0.00017694461821289172, |
|
"loss": 1.1915, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.5788561525129983, |
|
"grad_norm": 0.1416015625, |
|
"learning_rate": 0.00017656177056407505, |
|
"loss": 1.2476, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.5823223570190641, |
|
"grad_norm": 0.1435546875, |
|
"learning_rate": 0.00017617619180688085, |
|
"loss": 1.2174, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.58578856152513, |
|
"grad_norm": 0.1611328125, |
|
"learning_rate": 0.0001757878956956589, |
|
"loss": 1.1836, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.5892547660311959, |
|
"grad_norm": 0.1376953125, |
|
"learning_rate": 0.00017539689608169238, |
|
"loss": 1.1782, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.5927209705372617, |
|
"grad_norm": 0.16796875, |
|
"learning_rate": 0.00017500320691270365, |
|
"loss": 1.2416, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.5961871750433275, |
|
"grad_norm": 0.1328125, |
|
"learning_rate": 0.0001746068422323568, |
|
"loss": 1.1911, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.5996533795493935, |
|
"grad_norm": 0.1357421875, |
|
"learning_rate": 0.00017420781617975665, |
|
"loss": 1.2743, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.6031195840554593, |
|
"grad_norm": 0.1240234375, |
|
"learning_rate": 0.00017380614298894442, |
|
"loss": 1.2339, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.6065857885615251, |
|
"grad_norm": 0.1396484375, |
|
"learning_rate": 0.0001734018369883898, |
|
"loss": 1.2063, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.610051993067591, |
|
"grad_norm": 0.138671875, |
|
"learning_rate": 0.0001729949126004802, |
|
"loss": 1.2124, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.6135181975736569, |
|
"grad_norm": 0.134765625, |
|
"learning_rate": 0.00017258538434100577, |
|
"loss": 1.1958, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.6169844020797227, |
|
"grad_norm": 0.193359375, |
|
"learning_rate": 0.00017217326681864207, |
|
"loss": 1.1745, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.6204506065857885, |
|
"grad_norm": 0.16015625, |
|
"learning_rate": 0.00017175857473442863, |
|
"loss": 1.1491, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.6239168110918544, |
|
"grad_norm": 0.1181640625, |
|
"learning_rate": 0.00017134132288124465, |
|
"loss": 1.1795, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.6273830155979203, |
|
"grad_norm": 0.12451171875, |
|
"learning_rate": 0.00017092152614328135, |
|
"loss": 1.1323, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.6308492201039861, |
|
"grad_norm": 0.138671875, |
|
"learning_rate": 0.00017049919949551102, |
|
"loss": 1.2577, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.634315424610052, |
|
"grad_norm": 0.14453125, |
|
"learning_rate": 0.00017007435800315263, |
|
"loss": 1.2299, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.6377816291161178, |
|
"grad_norm": 0.1640625, |
|
"learning_rate": 0.00016964701682113475, |
|
"loss": 1.2503, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.6412478336221837, |
|
"grad_norm": 0.130859375, |
|
"learning_rate": 0.00016921719119355468, |
|
"loss": 1.1444, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.6447140381282496, |
|
"grad_norm": 0.134765625, |
|
"learning_rate": 0.0001687848964531348, |
|
"loss": 1.1779, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.6481802426343154, |
|
"grad_norm": 0.18359375, |
|
"learning_rate": 0.00016835014802067558, |
|
"loss": 1.1597, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.6516464471403813, |
|
"grad_norm": 0.13671875, |
|
"learning_rate": 0.00016791296140450545, |
|
"loss": 1.2693, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.6551126516464472, |
|
"grad_norm": 0.1337890625, |
|
"learning_rate": 0.00016747335219992774, |
|
"loss": 1.2418, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.658578856152513, |
|
"grad_norm": 0.130859375, |
|
"learning_rate": 0.00016703133608866414, |
|
"loss": 1.2335, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.6620450606585788, |
|
"grad_norm": 0.126953125, |
|
"learning_rate": 0.00016658692883829548, |
|
"loss": 1.1994, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.6655112651646448, |
|
"grad_norm": 0.1259765625, |
|
"learning_rate": 0.00016614014630169917, |
|
"loss": 1.1553, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.6689774696707106, |
|
"grad_norm": 0.1357421875, |
|
"learning_rate": 0.00016569100441648374, |
|
"loss": 1.187, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.6724436741767764, |
|
"grad_norm": 0.1259765625, |
|
"learning_rate": 0.00016523951920442034, |
|
"loss": 1.2037, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.6759098786828422, |
|
"grad_norm": 0.1318359375, |
|
"learning_rate": 0.00016478570677087116, |
|
"loss": 1.2211, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.6793760831889082, |
|
"grad_norm": 0.193359375, |
|
"learning_rate": 0.00016432958330421497, |
|
"loss": 1.2273, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.682842287694974, |
|
"grad_norm": 0.1435546875, |
|
"learning_rate": 0.00016387116507526957, |
|
"loss": 1.1865, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.6863084922010398, |
|
"grad_norm": 0.12451171875, |
|
"learning_rate": 0.00016341046843671144, |
|
"loss": 1.192, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.6897746967071057, |
|
"grad_norm": 0.150390625, |
|
"learning_rate": 0.0001629475098224924, |
|
"loss": 1.255, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.6932409012131716, |
|
"grad_norm": 0.1396484375, |
|
"learning_rate": 0.0001624823057472534, |
|
"loss": 1.2314, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.6967071057192374, |
|
"grad_norm": 0.13671875, |
|
"learning_rate": 0.00016201487280573536, |
|
"loss": 1.2079, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.7001733102253033, |
|
"grad_norm": 0.16015625, |
|
"learning_rate": 0.00016154522767218725, |
|
"loss": 1.2081, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.7036395147313691, |
|
"grad_norm": 0.1494140625, |
|
"learning_rate": 0.0001610733870997712, |
|
"loss": 1.2703, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.707105719237435, |
|
"grad_norm": 0.146484375, |
|
"learning_rate": 0.000160599367919965, |
|
"loss": 1.2438, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.7105719237435009, |
|
"grad_norm": 0.142578125, |
|
"learning_rate": 0.00016012318704196164, |
|
"loss": 1.2292, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.7140381282495667, |
|
"grad_norm": 0.1494140625, |
|
"learning_rate": 0.0001596448614520661, |
|
"loss": 1.2269, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.7175043327556326, |
|
"grad_norm": 0.263671875, |
|
"learning_rate": 0.0001591644082130895, |
|
"loss": 1.2389, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.7209705372616985, |
|
"grad_norm": 0.1474609375, |
|
"learning_rate": 0.0001586818444637402, |
|
"loss": 1.2191, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.7244367417677643, |
|
"grad_norm": 0.1630859375, |
|
"learning_rate": 0.00015819718741801283, |
|
"loss": 1.1904, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.7279029462738301, |
|
"grad_norm": 0.140625, |
|
"learning_rate": 0.0001577104543645738, |
|
"loss": 1.2045, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.7313691507798961, |
|
"grad_norm": 0.13671875, |
|
"learning_rate": 0.00015722166266614494, |
|
"loss": 1.2283, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.7348353552859619, |
|
"grad_norm": 0.265625, |
|
"learning_rate": 0.00015673082975888386, |
|
"loss": 1.2956, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.7383015597920277, |
|
"grad_norm": 0.138671875, |
|
"learning_rate": 0.00015623797315176218, |
|
"loss": 1.2189, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.7417677642980935, |
|
"grad_norm": 0.2099609375, |
|
"learning_rate": 0.0001557431104259408, |
|
"loss": 1.1593, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.7452339688041595, |
|
"grad_norm": 0.1376953125, |
|
"learning_rate": 0.00015524625923414283, |
|
"loss": 1.1329, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.7487001733102253, |
|
"grad_norm": 0.130859375, |
|
"learning_rate": 0.0001547474373000238, |
|
"loss": 1.1314, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.7521663778162911, |
|
"grad_norm": 0.1572265625, |
|
"learning_rate": 0.00015424666241753966, |
|
"loss": 1.2665, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.755632582322357, |
|
"grad_norm": 0.15234375, |
|
"learning_rate": 0.0001537439524503116, |
|
"loss": 1.2165, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.7590987868284229, |
|
"grad_norm": 0.1318359375, |
|
"learning_rate": 0.00015323932533098925, |
|
"loss": 1.2062, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.7625649913344887, |
|
"grad_norm": 0.1298828125, |
|
"learning_rate": 0.00015273279906061082, |
|
"loss": 1.1524, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.7660311958405546, |
|
"grad_norm": 0.140625, |
|
"learning_rate": 0.0001522243917079608, |
|
"loss": 1.1551, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.7694974003466204, |
|
"grad_norm": 0.1474609375, |
|
"learning_rate": 0.00015171412140892575, |
|
"loss": 1.2309, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.7729636048526863, |
|
"grad_norm": 0.150390625, |
|
"learning_rate": 0.0001512020063658471, |
|
"loss": 1.1908, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.7764298093587522, |
|
"grad_norm": 0.12158203125, |
|
"learning_rate": 0.0001506880648468719, |
|
"loss": 1.1917, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.779896013864818, |
|
"grad_norm": 0.146484375, |
|
"learning_rate": 0.00015017231518530118, |
|
"loss": 1.1562, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.7833622183708839, |
|
"grad_norm": 0.1396484375, |
|
"learning_rate": 0.00014965477577893598, |
|
"loss": 1.1602, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.7868284228769498, |
|
"grad_norm": 0.1630859375, |
|
"learning_rate": 0.00014913546508942105, |
|
"loss": 1.2065, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.7902946273830156, |
|
"grad_norm": 0.1484375, |
|
"learning_rate": 0.0001486144016415862, |
|
"loss": 1.1824, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.7937608318890814, |
|
"grad_norm": 0.1513671875, |
|
"learning_rate": 0.00014809160402278572, |
|
"loss": 1.1687, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.7972270363951474, |
|
"grad_norm": 0.158203125, |
|
"learning_rate": 0.0001475670908822351, |
|
"loss": 1.2072, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.8006932409012132, |
|
"grad_norm": 0.1396484375, |
|
"learning_rate": 0.0001470408809303457, |
|
"loss": 1.2255, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.804159445407279, |
|
"grad_norm": 0.1435546875, |
|
"learning_rate": 0.00014651299293805774, |
|
"loss": 1.243, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.8076256499133448, |
|
"grad_norm": 0.1337890625, |
|
"learning_rate": 0.00014598344573617022, |
|
"loss": 1.1895, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.8110918544194108, |
|
"grad_norm": 0.12890625, |
|
"learning_rate": 0.0001454522582146695, |
|
"loss": 1.1114, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.8145580589254766, |
|
"grad_norm": 0.1728515625, |
|
"learning_rate": 0.0001449194493220553, |
|
"loss": 1.1989, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.8180242634315424, |
|
"grad_norm": 0.1435546875, |
|
"learning_rate": 0.0001443850380646649, |
|
"loss": 1.2013, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.8214904679376083, |
|
"grad_norm": 0.1669921875, |
|
"learning_rate": 0.00014384904350599496, |
|
"loss": 1.1654, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.8249566724436742, |
|
"grad_norm": 0.142578125, |
|
"learning_rate": 0.0001433114847660217, |
|
"loss": 1.1622, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.82842287694974, |
|
"grad_norm": 0.1376953125, |
|
"learning_rate": 0.0001427723810205187, |
|
"loss": 1.1871, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.8318890814558059, |
|
"grad_norm": 0.173828125, |
|
"learning_rate": 0.00014223175150037296, |
|
"loss": 1.2285, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.8353552859618717, |
|
"grad_norm": 0.169921875, |
|
"learning_rate": 0.00014168961549089874, |
|
"loss": 1.1787, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.8388214904679376, |
|
"grad_norm": 0.1484375, |
|
"learning_rate": 0.00014114599233114986, |
|
"loss": 1.1703, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.8422876949740035, |
|
"grad_norm": 0.13671875, |
|
"learning_rate": 0.00014060090141322968, |
|
"loss": 1.1674, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.8457538994800693, |
|
"grad_norm": 0.173828125, |
|
"learning_rate": 0.00014005436218159927, |
|
"loss": 1.2099, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.8492201039861352, |
|
"grad_norm": 0.169921875, |
|
"learning_rate": 0.00013950639413238394, |
|
"loss": 1.1779, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.8526863084922011, |
|
"grad_norm": 0.13671875, |
|
"learning_rate": 0.00013895701681267784, |
|
"loss": 1.2203, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.8561525129982669, |
|
"grad_norm": 0.14453125, |
|
"learning_rate": 0.0001384062498198464, |
|
"loss": 1.1241, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 0.8596187175043327, |
|
"grad_norm": 0.140625, |
|
"learning_rate": 0.00013785411280082746, |
|
"loss": 1.2283, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.8630849220103987, |
|
"grad_norm": 0.3984375, |
|
"learning_rate": 0.0001373006254514304, |
|
"loss": 1.2642, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.8665511265164645, |
|
"grad_norm": 0.1298828125, |
|
"learning_rate": 0.00013674580751563356, |
|
"loss": 1.1703, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.8700173310225303, |
|
"grad_norm": 0.146484375, |
|
"learning_rate": 0.00013618967878487983, |
|
"loss": 1.2154, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 0.8734835355285961, |
|
"grad_norm": 0.275390625, |
|
"learning_rate": 0.00013563225909737076, |
|
"loss": 1.1762, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.8769497400346621, |
|
"grad_norm": 0.1904296875, |
|
"learning_rate": 0.00013507356833735888, |
|
"loss": 1.2806, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 0.8804159445407279, |
|
"grad_norm": 0.150390625, |
|
"learning_rate": 0.00013451362643443832, |
|
"loss": 1.2032, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 0.8838821490467937, |
|
"grad_norm": 0.15625, |
|
"learning_rate": 0.00013395245336283396, |
|
"loss": 1.2122, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.8873483535528596, |
|
"grad_norm": 0.16015625, |
|
"learning_rate": 0.0001333900691406889, |
|
"loss": 1.1839, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.8908145580589255, |
|
"grad_norm": 0.1416015625, |
|
"learning_rate": 0.00013282649382935027, |
|
"loss": 1.184, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 0.8942807625649913, |
|
"grad_norm": 0.130859375, |
|
"learning_rate": 0.0001322617475326538, |
|
"loss": 1.1703, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.8977469670710572, |
|
"grad_norm": 0.205078125, |
|
"learning_rate": 0.0001316958503962065, |
|
"loss": 1.2546, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 0.901213171577123, |
|
"grad_norm": 0.2177734375, |
|
"learning_rate": 0.00013112882260666805, |
|
"loss": 1.2206, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.9046793760831889, |
|
"grad_norm": 0.125, |
|
"learning_rate": 0.00013056068439103085, |
|
"loss": 1.16, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.9081455805892548, |
|
"grad_norm": 0.1591796875, |
|
"learning_rate": 0.00012999145601589823, |
|
"loss": 1.2333, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 0.9116117850953206, |
|
"grad_norm": 0.2109375, |
|
"learning_rate": 0.00012942115778676177, |
|
"loss": 1.1691, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 0.9150779896013865, |
|
"grad_norm": 0.19140625, |
|
"learning_rate": 0.00012884981004727676, |
|
"loss": 1.2484, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.9185441941074524, |
|
"grad_norm": 0.126953125, |
|
"learning_rate": 0.00012827743317853665, |
|
"loss": 1.1829, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.9220103986135182, |
|
"grad_norm": 0.19921875, |
|
"learning_rate": 0.00012770404759834594, |
|
"loss": 1.1925, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 0.925476603119584, |
|
"grad_norm": 0.1455078125, |
|
"learning_rate": 0.00012712967376049176, |
|
"loss": 1.2528, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.92894280762565, |
|
"grad_norm": 0.17578125, |
|
"learning_rate": 0.00012655433215401438, |
|
"loss": 1.2717, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.9324090121317158, |
|
"grad_norm": 0.154296875, |
|
"learning_rate": 0.0001259780433024763, |
|
"loss": 1.2076, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 0.9358752166377816, |
|
"grad_norm": 0.46484375, |
|
"learning_rate": 0.00012540082776323007, |
|
"loss": 1.2072, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.9393414211438474, |
|
"grad_norm": 0.1357421875, |
|
"learning_rate": 0.00012482270612668508, |
|
"loss": 1.1718, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 0.9428076256499134, |
|
"grad_norm": 0.134765625, |
|
"learning_rate": 0.0001242436990155728, |
|
"loss": 1.1794, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.9462738301559792, |
|
"grad_norm": 0.1494140625, |
|
"learning_rate": 0.00012366382708421154, |
|
"loss": 1.2162, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.949740034662045, |
|
"grad_norm": 0.1806640625, |
|
"learning_rate": 0.00012308311101776932, |
|
"loss": 1.22, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 0.9532062391681109, |
|
"grad_norm": 0.13671875, |
|
"learning_rate": 0.0001225015715315261, |
|
"loss": 1.1998, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.9566724436741768, |
|
"grad_norm": 0.6015625, |
|
"learning_rate": 0.00012191922937013489, |
|
"loss": 1.1541, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.9601386481802426, |
|
"grad_norm": 0.1650390625, |
|
"learning_rate": 0.00012133610530688168, |
|
"loss": 1.2099, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 0.9636048526863085, |
|
"grad_norm": 0.1474609375, |
|
"learning_rate": 0.00012075222014294447, |
|
"loss": 1.2157, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 0.9670710571923743, |
|
"grad_norm": 0.1298828125, |
|
"learning_rate": 0.00012016759470665112, |
|
"loss": 1.1958, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 0.9705372616984402, |
|
"grad_norm": 0.1767578125, |
|
"learning_rate": 0.00011958224985273648, |
|
"loss": 1.1857, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.9740034662045061, |
|
"grad_norm": 0.1376953125, |
|
"learning_rate": 0.00011899620646159855, |
|
"loss": 1.1818, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 0.9774696707105719, |
|
"grad_norm": 0.130859375, |
|
"learning_rate": 0.00011840948543855335, |
|
"loss": 1.1787, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 0.9809358752166378, |
|
"grad_norm": 0.1279296875, |
|
"learning_rate": 0.00011782210771308948, |
|
"loss": 1.1672, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 0.9844020797227037, |
|
"grad_norm": 0.142578125, |
|
"learning_rate": 0.00011723409423812134, |
|
"loss": 1.1523, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.9878682842287695, |
|
"grad_norm": 0.1474609375, |
|
"learning_rate": 0.00011664546598924184, |
|
"loss": 1.1124, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.9913344887348353, |
|
"grad_norm": 0.1416015625, |
|
"learning_rate": 0.00011605624396397398, |
|
"loss": 1.2084, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 0.9948006932409013, |
|
"grad_norm": 0.1533203125, |
|
"learning_rate": 0.00011546644918102196, |
|
"loss": 1.1845, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 0.9982668977469671, |
|
"grad_norm": 0.13671875, |
|
"learning_rate": 0.00011487610267952142, |
|
"loss": 1.1879, |
|
"step": 288 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 576, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 288, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.8196759500528026e+18, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|