|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 15.0, |
|
"eval_steps": 500, |
|
"global_step": 1665, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.009009009009009009, |
|
"grad_norm": 608.0, |
|
"learning_rate": 1.1976047904191619e-06, |
|
"loss": 58.5641, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04504504504504504, |
|
"grad_norm": 536.0, |
|
"learning_rate": 5.9880239520958085e-06, |
|
"loss": 54.74, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.09009009009009009, |
|
"grad_norm": 488.0, |
|
"learning_rate": 1.1976047904191617e-05, |
|
"loss": 52.9637, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13513513513513514, |
|
"grad_norm": 392.0, |
|
"learning_rate": 1.7964071856287426e-05, |
|
"loss": 40.7219, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.18018018018018017, |
|
"grad_norm": 46.5, |
|
"learning_rate": 2.3952095808383234e-05, |
|
"loss": 28.1416, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.22522522522522523, |
|
"grad_norm": 40.25, |
|
"learning_rate": 2.994011976047904e-05, |
|
"loss": 24.2461, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2702702702702703, |
|
"grad_norm": 24.125, |
|
"learning_rate": 3.592814371257485e-05, |
|
"loss": 22.2451, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3153153153153153, |
|
"grad_norm": 13.5625, |
|
"learning_rate": 4.191616766467066e-05, |
|
"loss": 20.2199, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.36036036036036034, |
|
"grad_norm": 7.5625, |
|
"learning_rate": 4.790419161676647e-05, |
|
"loss": 19.4029, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.40540540540540543, |
|
"grad_norm": 9.1875, |
|
"learning_rate": 5.389221556886228e-05, |
|
"loss": 18.5345, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.45045045045045046, |
|
"grad_norm": 15.75, |
|
"learning_rate": 5.988023952095808e-05, |
|
"loss": 17.2917, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4954954954954955, |
|
"grad_norm": 37.25, |
|
"learning_rate": 6.58682634730539e-05, |
|
"loss": 15.8479, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5405405405405406, |
|
"grad_norm": 59.75, |
|
"learning_rate": 7.18562874251497e-05, |
|
"loss": 10.5192, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5855855855855856, |
|
"grad_norm": 13.375, |
|
"learning_rate": 7.784431137724552e-05, |
|
"loss": 3.399, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.6306306306306306, |
|
"grad_norm": 2.84375, |
|
"learning_rate": 8.383233532934131e-05, |
|
"loss": 2.1324, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.6756756756756757, |
|
"grad_norm": 2.125, |
|
"learning_rate": 8.982035928143712e-05, |
|
"loss": 1.8224, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.7207207207207207, |
|
"grad_norm": 3.703125, |
|
"learning_rate": 9.580838323353294e-05, |
|
"loss": 1.6034, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.7657657657657657, |
|
"grad_norm": 2.75, |
|
"learning_rate": 0.00010179640718562875, |
|
"loss": 1.4811, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.8108108108108109, |
|
"grad_norm": 7.53125, |
|
"learning_rate": 0.00010778443113772456, |
|
"loss": 1.4187, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.8558558558558559, |
|
"grad_norm": 2.234375, |
|
"learning_rate": 0.00011377245508982037, |
|
"loss": 1.3517, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.9009009009009009, |
|
"grad_norm": 8.375, |
|
"learning_rate": 0.00011976047904191617, |
|
"loss": 1.2567, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9459459459459459, |
|
"grad_norm": 5.90625, |
|
"learning_rate": 0.00012574850299401196, |
|
"loss": 1.2634, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.990990990990991, |
|
"grad_norm": 4.1875, |
|
"learning_rate": 0.0001317365269461078, |
|
"loss": 1.2272, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.3900420665740967, |
|
"eval_runtime": 1.0174, |
|
"eval_samples_per_second": 4.914, |
|
"eval_steps_per_second": 1.966, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 1.0360360360360361, |
|
"grad_norm": 3.234375, |
|
"learning_rate": 0.00013772455089820359, |
|
"loss": 1.1598, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.0810810810810811, |
|
"grad_norm": 2.3125, |
|
"learning_rate": 0.0001437125748502994, |
|
"loss": 1.1474, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.1261261261261262, |
|
"grad_norm": 6.375, |
|
"learning_rate": 0.0001497005988023952, |
|
"loss": 1.1051, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.1711711711711712, |
|
"grad_norm": 6.625, |
|
"learning_rate": 0.00015568862275449103, |
|
"loss": 1.0869, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.2162162162162162, |
|
"grad_norm": 1.0703125, |
|
"learning_rate": 0.00016167664670658683, |
|
"loss": 1.0864, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.2612612612612613, |
|
"grad_norm": 6.1875, |
|
"learning_rate": 0.00016766467065868263, |
|
"loss": 1.0419, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.3063063063063063, |
|
"grad_norm": 2.84375, |
|
"learning_rate": 0.00017365269461077845, |
|
"loss": 1.0647, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.3513513513513513, |
|
"grad_norm": 24.0, |
|
"learning_rate": 0.00017964071856287425, |
|
"loss": 1.0682, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.3963963963963963, |
|
"grad_norm": 5.3125, |
|
"learning_rate": 0.00018562874251497007, |
|
"loss": 1.0375, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.4414414414414414, |
|
"grad_norm": 2.5625, |
|
"learning_rate": 0.00019161676646706587, |
|
"loss": 1.0111, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.4864864864864864, |
|
"grad_norm": 4.25, |
|
"learning_rate": 0.0001976047904191617, |
|
"loss": 0.9605, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.5315315315315314, |
|
"grad_norm": 6.15625, |
|
"learning_rate": 0.00019999802081131313, |
|
"loss": 0.9843, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.5765765765765765, |
|
"grad_norm": 2.484375, |
|
"learning_rate": 0.00019998592605305064, |
|
"loss": 1.0225, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.6216216216216215, |
|
"grad_norm": 2.3125, |
|
"learning_rate": 0.00019996283741405458, |
|
"loss": 0.9908, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"grad_norm": 28.375, |
|
"learning_rate": 0.00019992875743301967, |
|
"loss": 1.0038, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.7117117117117115, |
|
"grad_norm": 2.390625, |
|
"learning_rate": 0.00019988368985718576, |
|
"loss": 0.9688, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.7567567567567568, |
|
"grad_norm": 1.75, |
|
"learning_rate": 0.00019982763964192585, |
|
"loss": 0.9638, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.8018018018018018, |
|
"grad_norm": 3.125, |
|
"learning_rate": 0.00019976061295020106, |
|
"loss": 0.9878, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.8468468468468469, |
|
"grad_norm": 1.921875, |
|
"learning_rate": 0.00019968261715188324, |
|
"loss": 0.9394, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.8918918918918919, |
|
"grad_norm": 1.546875, |
|
"learning_rate": 0.00019959366082294438, |
|
"loss": 0.9285, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.936936936936937, |
|
"grad_norm": 3.53125, |
|
"learning_rate": 0.00019949375374451377, |
|
"loss": 0.929, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.981981981981982, |
|
"grad_norm": 1.625, |
|
"learning_rate": 0.00019938290690180247, |
|
"loss": 0.9374, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.192767381668091, |
|
"eval_runtime": 1.0169, |
|
"eval_samples_per_second": 4.917, |
|
"eval_steps_per_second": 1.967, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 2.027027027027027, |
|
"grad_norm": 2.25, |
|
"learning_rate": 0.0001992611324828956, |
|
"loss": 0.8849, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 2.0720720720720722, |
|
"grad_norm": 1.8125, |
|
"learning_rate": 0.00019912844387741195, |
|
"loss": 0.864, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.1171171171171173, |
|
"grad_norm": 1.09375, |
|
"learning_rate": 0.00019898485567503187, |
|
"loss": 0.8497, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 2.1621621621621623, |
|
"grad_norm": 1.9609375, |
|
"learning_rate": 0.00019883038366389313, |
|
"loss": 0.8844, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.2072072072072073, |
|
"grad_norm": 1.0234375, |
|
"learning_rate": 0.0001986650448288548, |
|
"loss": 0.8509, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 2.2522522522522523, |
|
"grad_norm": 3.078125, |
|
"learning_rate": 0.00019848885734962982, |
|
"loss": 0.8718, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.2972972972972974, |
|
"grad_norm": 3.390625, |
|
"learning_rate": 0.00019830184059878606, |
|
"loss": 0.881, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 2.3423423423423424, |
|
"grad_norm": 2.75, |
|
"learning_rate": 0.0001981040151396161, |
|
"loss": 0.854, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.3873873873873874, |
|
"grad_norm": 1.25, |
|
"learning_rate": 0.0001978954027238763, |
|
"loss": 0.8856, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.4324324324324325, |
|
"grad_norm": 1.640625, |
|
"learning_rate": 0.00019767602628939507, |
|
"loss": 0.8797, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.4774774774774775, |
|
"grad_norm": 2.28125, |
|
"learning_rate": 0.00019744590995755088, |
|
"loss": 0.8829, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.5225225225225225, |
|
"grad_norm": 1.265625, |
|
"learning_rate": 0.00019720507903061974, |
|
"loss": 0.8319, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.5675675675675675, |
|
"grad_norm": 1.6328125, |
|
"learning_rate": 0.00019695355998899345, |
|
"loss": 0.8489, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.6126126126126126, |
|
"grad_norm": 1.1328125, |
|
"learning_rate": 0.00019669138048826766, |
|
"loss": 0.8409, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.6576576576576576, |
|
"grad_norm": 1.1640625, |
|
"learning_rate": 0.00019641856935620117, |
|
"loss": 0.8454, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.7027027027027026, |
|
"grad_norm": 0.625, |
|
"learning_rate": 0.00019613515658954624, |
|
"loss": 0.8501, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.7477477477477477, |
|
"grad_norm": 1.0859375, |
|
"learning_rate": 0.0001958411733507502, |
|
"loss": 0.8223, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 2.7927927927927927, |
|
"grad_norm": 0.875, |
|
"learning_rate": 0.00019553665196452903, |
|
"loss": 0.8396, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.8378378378378377, |
|
"grad_norm": 1.328125, |
|
"learning_rate": 0.0001952216259143132, |
|
"loss": 0.836, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 2.8828828828828827, |
|
"grad_norm": 2.09375, |
|
"learning_rate": 0.0001948961298385659, |
|
"loss": 0.8453, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.9279279279279278, |
|
"grad_norm": 0.62109375, |
|
"learning_rate": 0.00019456019952697462, |
|
"loss": 0.824, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.972972972972973, |
|
"grad_norm": 1.7578125, |
|
"learning_rate": 0.0001942138719165156, |
|
"loss": 0.8471, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 2.168168544769287, |
|
"eval_runtime": 1.0188, |
|
"eval_samples_per_second": 4.908, |
|
"eval_steps_per_second": 1.963, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 3.018018018018018, |
|
"grad_norm": 1.5078125, |
|
"learning_rate": 0.00019385718508739262, |
|
"loss": 0.8242, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 3.063063063063063, |
|
"grad_norm": 1.4375, |
|
"learning_rate": 0.00019349017825885, |
|
"loss": 0.7439, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 3.108108108108108, |
|
"grad_norm": 0.67578125, |
|
"learning_rate": 0.00019311289178486018, |
|
"loss": 0.7545, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 3.153153153153153, |
|
"grad_norm": 1.71875, |
|
"learning_rate": 0.00019272536714968658, |
|
"loss": 0.7531, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 3.1981981981981984, |
|
"grad_norm": 2.15625, |
|
"learning_rate": 0.0001923276469633223, |
|
"loss": 0.7731, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 3.2432432432432434, |
|
"grad_norm": 1.453125, |
|
"learning_rate": 0.000191919774956805, |
|
"loss": 0.7558, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 3.2882882882882885, |
|
"grad_norm": 1.71875, |
|
"learning_rate": 0.0001915017959774084, |
|
"loss": 0.782, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 3.3333333333333335, |
|
"grad_norm": 0.7265625, |
|
"learning_rate": 0.00019107375598371112, |
|
"loss": 0.7795, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 3.3783783783783785, |
|
"grad_norm": 1.71875, |
|
"learning_rate": 0.0001906357020405435, |
|
"loss": 0.7638, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 3.4234234234234235, |
|
"grad_norm": 1.7109375, |
|
"learning_rate": 0.00019018768231381238, |
|
"loss": 0.7511, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 3.4684684684684686, |
|
"grad_norm": 0.6015625, |
|
"learning_rate": 0.0001897297460652053, |
|
"loss": 0.7591, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 3.5135135135135136, |
|
"grad_norm": 7.625, |
|
"learning_rate": 0.0001892619436467738, |
|
"loss": 0.7433, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 3.5585585585585586, |
|
"grad_norm": 1.171875, |
|
"learning_rate": 0.00018878432649539696, |
|
"loss": 0.7673, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 3.6036036036036037, |
|
"grad_norm": 0.734375, |
|
"learning_rate": 0.0001882969471271259, |
|
"loss": 0.7633, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.6486486486486487, |
|
"grad_norm": 0.93359375, |
|
"learning_rate": 0.00018779985913140924, |
|
"loss": 0.7751, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 3.6936936936936937, |
|
"grad_norm": 0.9140625, |
|
"learning_rate": 0.00018729311716520074, |
|
"loss": 0.7628, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 3.7387387387387387, |
|
"grad_norm": 1.09375, |
|
"learning_rate": 0.00018677677694694958, |
|
"loss": 0.7553, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 3.7837837837837838, |
|
"grad_norm": 0.7578125, |
|
"learning_rate": 0.00018625089525047385, |
|
"loss": 0.7636, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 3.828828828828829, |
|
"grad_norm": 0.76953125, |
|
"learning_rate": 0.00018571552989871806, |
|
"loss": 0.7764, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 3.873873873873874, |
|
"grad_norm": 0.84765625, |
|
"learning_rate": 0.00018517073975739514, |
|
"loss": 0.7715, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 3.918918918918919, |
|
"grad_norm": 0.80859375, |
|
"learning_rate": 0.0001846165847285141, |
|
"loss": 0.77, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 3.963963963963964, |
|
"grad_norm": 3.0, |
|
"learning_rate": 0.0001840531257437934, |
|
"loss": 0.7873, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 2.2036337852478027, |
|
"eval_runtime": 1.0177, |
|
"eval_samples_per_second": 4.913, |
|
"eval_steps_per_second": 1.965, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 4.009009009009009, |
|
"grad_norm": 2.0625, |
|
"learning_rate": 0.00018348042475796122, |
|
"loss": 0.7489, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 4.054054054054054, |
|
"grad_norm": 0.6484375, |
|
"learning_rate": 0.00018289854474194347, |
|
"loss": 0.6615, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 4.099099099099099, |
|
"grad_norm": 1.2421875, |
|
"learning_rate": 0.00018230754967593963, |
|
"loss": 0.6595, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 4.1441441441441444, |
|
"grad_norm": 0.7578125, |
|
"learning_rate": 0.00018170750454238793, |
|
"loss": 0.6718, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 4.1891891891891895, |
|
"grad_norm": 0.89453125, |
|
"learning_rate": 0.00018109847531882047, |
|
"loss": 0.6528, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 4.2342342342342345, |
|
"grad_norm": 1.15625, |
|
"learning_rate": 0.0001804805289706083, |
|
"loss": 0.6505, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 4.2792792792792795, |
|
"grad_norm": 1.25, |
|
"learning_rate": 0.0001798537334435986, |
|
"loss": 0.6708, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 4.324324324324325, |
|
"grad_norm": 0.89453125, |
|
"learning_rate": 0.0001792181576566437, |
|
"loss": 0.6702, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 4.36936936936937, |
|
"grad_norm": 0.734375, |
|
"learning_rate": 0.0001785738714940231, |
|
"loss": 0.6755, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 4.414414414414415, |
|
"grad_norm": 1.46875, |
|
"learning_rate": 0.00017792094579775926, |
|
"loss": 0.6825, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 4.45945945945946, |
|
"grad_norm": 1.0390625, |
|
"learning_rate": 0.00017725945235982856, |
|
"loss": 0.6864, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 4.504504504504505, |
|
"grad_norm": 0.62890625, |
|
"learning_rate": 0.0001765894639142671, |
|
"loss": 0.6758, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.54954954954955, |
|
"grad_norm": 0.71875, |
|
"learning_rate": 0.0001759110541291736, |
|
"loss": 0.6823, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 4.594594594594595, |
|
"grad_norm": 0.6875, |
|
"learning_rate": 0.000175224297598609, |
|
"loss": 0.6882, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 4.63963963963964, |
|
"grad_norm": 0.65234375, |
|
"learning_rate": 0.00017452926983439475, |
|
"loss": 0.6922, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 4.684684684684685, |
|
"grad_norm": 0.90234375, |
|
"learning_rate": 0.0001738260472578097, |
|
"loss": 0.6787, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 4.72972972972973, |
|
"grad_norm": 0.84765625, |
|
"learning_rate": 0.00017311470719118765, |
|
"loss": 0.7028, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 4.774774774774775, |
|
"grad_norm": 0.80859375, |
|
"learning_rate": 0.00017239532784941495, |
|
"loss": 0.6851, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 4.81981981981982, |
|
"grad_norm": 1.15625, |
|
"learning_rate": 0.0001716679883313308, |
|
"loss": 0.6756, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 4.864864864864865, |
|
"grad_norm": 1.34375, |
|
"learning_rate": 0.00017093276861102988, |
|
"loss": 0.6697, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 4.90990990990991, |
|
"grad_norm": 1.4296875, |
|
"learning_rate": 0.00017018974952906884, |
|
"loss": 0.689, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 4.954954954954955, |
|
"grad_norm": 0.68359375, |
|
"learning_rate": 0.0001694390127835775, |
|
"loss": 0.6894, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 1.390625, |
|
"learning_rate": 0.00016868064092127587, |
|
"loss": 0.685, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 2.297661304473877, |
|
"eval_runtime": 1.0166, |
|
"eval_samples_per_second": 4.918, |
|
"eval_steps_per_second": 1.967, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 5.045045045045045, |
|
"grad_norm": 1.328125, |
|
"learning_rate": 0.00016791471732839776, |
|
"loss": 0.5743, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 5.09009009009009, |
|
"grad_norm": 0.7734375, |
|
"learning_rate": 0.00016714132622152206, |
|
"loss": 0.5415, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 5.135135135135135, |
|
"grad_norm": 0.78515625, |
|
"learning_rate": 0.00016636055263831285, |
|
"loss": 0.5752, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 5.18018018018018, |
|
"grad_norm": 0.91015625, |
|
"learning_rate": 0.00016557248242816903, |
|
"loss": 0.5778, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 5.225225225225225, |
|
"grad_norm": 1.015625, |
|
"learning_rate": 0.00016477720224278492, |
|
"loss": 0.5924, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 5.27027027027027, |
|
"grad_norm": 0.8046875, |
|
"learning_rate": 0.00016397479952662248, |
|
"loss": 0.5683, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 5.315315315315315, |
|
"grad_norm": 0.734375, |
|
"learning_rate": 0.0001631653625072965, |
|
"loss": 0.595, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 5.36036036036036, |
|
"grad_norm": 0.92578125, |
|
"learning_rate": 0.00016234898018587337, |
|
"loss": 0.5862, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 5.405405405405405, |
|
"grad_norm": 1.3671875, |
|
"learning_rate": 0.00016152574232708534, |
|
"loss": 0.5806, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 5.45045045045045, |
|
"grad_norm": 1.578125, |
|
"learning_rate": 0.00016069573944946025, |
|
"loss": 0.6002, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 5.495495495495495, |
|
"grad_norm": 1.1875, |
|
"learning_rate": 0.00015985906281536875, |
|
"loss": 0.6061, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 5.54054054054054, |
|
"grad_norm": 1.9765625, |
|
"learning_rate": 0.00015901580442098968, |
|
"loss": 0.6029, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 5.585585585585585, |
|
"grad_norm": 0.78515625, |
|
"learning_rate": 0.00015816605698619452, |
|
"loss": 0.5985, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 5.63063063063063, |
|
"grad_norm": 0.73046875, |
|
"learning_rate": 0.0001573099139443525, |
|
"loss": 0.5863, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 5.675675675675675, |
|
"grad_norm": 0.84375, |
|
"learning_rate": 0.00015644746943205734, |
|
"loss": 0.5927, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 5.7207207207207205, |
|
"grad_norm": 0.796875, |
|
"learning_rate": 0.00015557881827877633, |
|
"loss": 0.6072, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 5.7657657657657655, |
|
"grad_norm": 0.76171875, |
|
"learning_rate": 0.0001547040559964236, |
|
"loss": 0.6032, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 5.8108108108108105, |
|
"grad_norm": 0.7109375, |
|
"learning_rate": 0.00015382327876885797, |
|
"loss": 0.6135, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 5.8558558558558556, |
|
"grad_norm": 0.7265625, |
|
"learning_rate": 0.00015293658344130734, |
|
"loss": 0.6058, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 5.900900900900901, |
|
"grad_norm": 0.69140625, |
|
"learning_rate": 0.00015204406750972, |
|
"loss": 0.5948, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 5.945945945945946, |
|
"grad_norm": 0.65234375, |
|
"learning_rate": 0.00015114582911004466, |
|
"loss": 0.6108, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 5.990990990990991, |
|
"grad_norm": 0.79296875, |
|
"learning_rate": 0.0001502419670074398, |
|
"loss": 0.6223, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 2.4441299438476562, |
|
"eval_runtime": 1.0183, |
|
"eval_samples_per_second": 4.91, |
|
"eval_steps_per_second": 1.964, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 6.036036036036036, |
|
"grad_norm": 1.109375, |
|
"learning_rate": 0.00014933258058541415, |
|
"loss": 0.5455, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 6.081081081081081, |
|
"grad_norm": 0.703125, |
|
"learning_rate": 0.000148417769834899, |
|
"loss": 0.5079, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 6.126126126126126, |
|
"grad_norm": 0.9375, |
|
"learning_rate": 0.0001474976353432539, |
|
"loss": 0.4906, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 6.171171171171171, |
|
"grad_norm": 0.96875, |
|
"learning_rate": 0.00014657227828320635, |
|
"loss": 0.5065, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 6.216216216216216, |
|
"grad_norm": 0.75, |
|
"learning_rate": 0.0001456418004017278, |
|
"loss": 0.4978, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 6.261261261261261, |
|
"grad_norm": 0.75390625, |
|
"learning_rate": 0.00014470630400884575, |
|
"loss": 0.4966, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 6.306306306306306, |
|
"grad_norm": 0.7578125, |
|
"learning_rate": 0.00014376589196639467, |
|
"loss": 0.4981, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 6.351351351351352, |
|
"grad_norm": 0.85546875, |
|
"learning_rate": 0.00014282066767670572, |
|
"loss": 0.5238, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 6.396396396396397, |
|
"grad_norm": 0.66015625, |
|
"learning_rate": 0.0001418707350712372, |
|
"loss": 0.5052, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 6.441441441441442, |
|
"grad_norm": 0.87890625, |
|
"learning_rate": 0.00014091619859914694, |
|
"loss": 0.5045, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 6.486486486486487, |
|
"grad_norm": 0.8515625, |
|
"learning_rate": 0.0001399571632158076, |
|
"loss": 0.5169, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 6.531531531531532, |
|
"grad_norm": 0.703125, |
|
"learning_rate": 0.00013899373437126637, |
|
"loss": 0.5208, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 6.576576576576577, |
|
"grad_norm": 0.66015625, |
|
"learning_rate": 0.0001380260179986504, |
|
"loss": 0.5192, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 6.621621621621622, |
|
"grad_norm": 0.77734375, |
|
"learning_rate": 0.00013705412050251883, |
|
"loss": 0.526, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 6.666666666666667, |
|
"grad_norm": 0.84375, |
|
"learning_rate": 0.00013607814874716325, |
|
"loss": 0.541, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 6.711711711711712, |
|
"grad_norm": 0.875, |
|
"learning_rate": 0.0001350982100448575, |
|
"loss": 0.5119, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 6.756756756756757, |
|
"grad_norm": 0.7578125, |
|
"learning_rate": 0.00013411441214405829, |
|
"loss": 0.5192, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 6.801801801801802, |
|
"grad_norm": 1.0703125, |
|
"learning_rate": 0.00013312686321755761, |
|
"loss": 0.5353, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 6.846846846846847, |
|
"grad_norm": 1.0078125, |
|
"learning_rate": 0.00013213567185058888, |
|
"loss": 0.5286, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 6.891891891891892, |
|
"grad_norm": 0.88671875, |
|
"learning_rate": 0.0001311409470288874, |
|
"loss": 0.5286, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 6.936936936936937, |
|
"grad_norm": 0.796875, |
|
"learning_rate": 0.00013014279812670703, |
|
"loss": 0.5353, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 6.981981981981982, |
|
"grad_norm": 0.6796875, |
|
"learning_rate": 0.00012914133489479385, |
|
"loss": 0.5378, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 2.6714916229248047, |
|
"eval_runtime": 1.0156, |
|
"eval_samples_per_second": 4.923, |
|
"eval_steps_per_second": 1.969, |
|
"step": 777 |
|
}, |
|
{ |
|
"epoch": 7.027027027027027, |
|
"grad_norm": 0.74609375, |
|
"learning_rate": 0.0001281366674483187, |
|
"loss": 0.4628, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 7.072072072072072, |
|
"grad_norm": 0.75, |
|
"learning_rate": 0.00012712890625476953, |
|
"loss": 0.4205, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 7.117117117117117, |
|
"grad_norm": 0.84765625, |
|
"learning_rate": 0.0001261181621218051, |
|
"loss": 0.4286, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 7.162162162162162, |
|
"grad_norm": 0.8515625, |
|
"learning_rate": 0.00012510454618507106, |
|
"loss": 0.4393, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 7.207207207207207, |
|
"grad_norm": 0.765625, |
|
"learning_rate": 0.0001240881698959802, |
|
"loss": 0.4276, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 7.252252252252252, |
|
"grad_norm": 0.72265625, |
|
"learning_rate": 0.00012306914500945788, |
|
"loss": 0.4322, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 7.297297297297297, |
|
"grad_norm": 0.87890625, |
|
"learning_rate": 0.00012204758357165409, |
|
"loss": 0.442, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 7.342342342342342, |
|
"grad_norm": 0.7578125, |
|
"learning_rate": 0.00012102359790762347, |
|
"loss": 0.4401, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 7.387387387387387, |
|
"grad_norm": 0.73828125, |
|
"learning_rate": 0.00011999730060897476, |
|
"loss": 0.435, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 7.4324324324324325, |
|
"grad_norm": 0.8671875, |
|
"learning_rate": 0.00011896880452149077, |
|
"loss": 0.4448, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 7.4774774774774775, |
|
"grad_norm": 0.8203125, |
|
"learning_rate": 0.00011793822273272066, |
|
"loss": 0.4448, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 7.5225225225225225, |
|
"grad_norm": 0.78125, |
|
"learning_rate": 0.00011690566855954523, |
|
"loss": 0.4414, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 7.5675675675675675, |
|
"grad_norm": 0.8046875, |
|
"learning_rate": 0.0001158712555357175, |
|
"loss": 0.4461, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 7.612612612612613, |
|
"grad_norm": 0.78515625, |
|
"learning_rate": 0.00011483509739937903, |
|
"loss": 0.4455, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 7.657657657657658, |
|
"grad_norm": 0.84765625, |
|
"learning_rate": 0.000113797308080554, |
|
"loss": 0.4521, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 7.702702702702703, |
|
"grad_norm": 0.81640625, |
|
"learning_rate": 0.00011275800168862214, |
|
"loss": 0.4446, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 7.747747747747748, |
|
"grad_norm": 0.76171875, |
|
"learning_rate": 0.00011171729249977169, |
|
"loss": 0.4333, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 7.792792792792793, |
|
"grad_norm": 0.7734375, |
|
"learning_rate": 0.00011067529494443463, |
|
"loss": 0.4494, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 7.837837837837838, |
|
"grad_norm": 1.1796875, |
|
"learning_rate": 0.00010963212359470421, |
|
"loss": 0.4423, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 7.882882882882883, |
|
"grad_norm": 1.15625, |
|
"learning_rate": 0.00010858789315173745, |
|
"loss": 0.4424, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 7.927927927927928, |
|
"grad_norm": 0.76953125, |
|
"learning_rate": 0.00010754271843314325, |
|
"loss": 0.4491, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 7.972972972972973, |
|
"grad_norm": 0.9296875, |
|
"learning_rate": 0.00010649671436035753, |
|
"loss": 0.458, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 2.955517292022705, |
|
"eval_runtime": 1.015, |
|
"eval_samples_per_second": 4.926, |
|
"eval_steps_per_second": 1.97, |
|
"step": 888 |
|
}, |
|
{ |
|
"epoch": 8.018018018018019, |
|
"grad_norm": 0.90625, |
|
"learning_rate": 0.0001054499959460074, |
|
"loss": 0.4181, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 8.063063063063064, |
|
"grad_norm": 0.87109375, |
|
"learning_rate": 0.00010440267828126478, |
|
"loss": 0.3664, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 8.108108108108109, |
|
"grad_norm": 1.0546875, |
|
"learning_rate": 0.00010335487652319182, |
|
"loss": 0.3723, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 8.153153153153154, |
|
"grad_norm": 0.734375, |
|
"learning_rate": 0.00010230670588207873, |
|
"loss": 0.3588, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 8.198198198198199, |
|
"grad_norm": 0.859375, |
|
"learning_rate": 0.00010125828160877604, |
|
"loss": 0.3619, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 8.243243243243244, |
|
"grad_norm": 0.765625, |
|
"learning_rate": 0.00010020971898202218, |
|
"loss": 0.3651, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 8.288288288288289, |
|
"grad_norm": 0.90625, |
|
"learning_rate": 9.916113329576818e-05, |
|
"loss": 0.3742, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 8.333333333333334, |
|
"grad_norm": 0.8359375, |
|
"learning_rate": 9.811263984650056e-05, |
|
"loss": 0.3752, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 8.378378378378379, |
|
"grad_norm": 1.015625, |
|
"learning_rate": 9.706435392056394e-05, |
|
"loss": 0.3746, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 8.423423423423424, |
|
"grad_norm": 0.8046875, |
|
"learning_rate": 9.601639078148489e-05, |
|
"loss": 0.3744, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 8.468468468468469, |
|
"grad_norm": 0.82421875, |
|
"learning_rate": 9.496886565729823e-05, |
|
"loss": 0.3765, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 8.513513513513514, |
|
"grad_norm": 0.73828125, |
|
"learning_rate": 9.392189372787706e-05, |
|
"loss": 0.371, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 8.558558558558559, |
|
"grad_norm": 0.796875, |
|
"learning_rate": 9.287559011226843e-05, |
|
"loss": 0.3747, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 8.603603603603604, |
|
"grad_norm": 0.80859375, |
|
"learning_rate": 9.183006985603545e-05, |
|
"loss": 0.372, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 8.64864864864865, |
|
"grad_norm": 0.7578125, |
|
"learning_rate": 9.078544791860739e-05, |
|
"loss": 0.3764, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 8.693693693693694, |
|
"grad_norm": 0.81640625, |
|
"learning_rate": 8.974183916063968e-05, |
|
"loss": 0.375, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 8.73873873873874, |
|
"grad_norm": 0.75, |
|
"learning_rate": 8.869935833138414e-05, |
|
"loss": 0.3836, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 8.783783783783784, |
|
"grad_norm": 0.75390625, |
|
"learning_rate": 8.765812005607217e-05, |
|
"loss": 0.3804, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 8.82882882882883, |
|
"grad_norm": 0.78515625, |
|
"learning_rate": 8.6618238823311e-05, |
|
"loss": 0.3857, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 8.873873873873874, |
|
"grad_norm": 0.81640625, |
|
"learning_rate": 8.557982897249513e-05, |
|
"loss": 0.383, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 8.91891891891892, |
|
"grad_norm": 0.8671875, |
|
"learning_rate": 8.45430046812344e-05, |
|
"loss": 0.3831, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 8.963963963963964, |
|
"grad_norm": 0.83203125, |
|
"learning_rate": 8.35078799527996e-05, |
|
"loss": 0.3843, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 3.43645977973938, |
|
"eval_runtime": 1.0159, |
|
"eval_samples_per_second": 4.922, |
|
"eval_steps_per_second": 1.969, |
|
"step": 999 |
|
}, |
|
{ |
|
"epoch": 9.00900900900901, |
|
"grad_norm": 0.734375, |
|
"learning_rate": 8.247456860358725e-05, |
|
"loss": 0.3691, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 9.054054054054054, |
|
"grad_norm": 1.0546875, |
|
"learning_rate": 8.144318425060509e-05, |
|
"loss": 0.32, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 9.0990990990991, |
|
"grad_norm": 0.7890625, |
|
"learning_rate": 8.04138402989794e-05, |
|
"loss": 0.3192, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 9.144144144144144, |
|
"grad_norm": 0.88671875, |
|
"learning_rate": 7.938664992948549e-05, |
|
"loss": 0.3166, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 9.18918918918919, |
|
"grad_norm": 0.72265625, |
|
"learning_rate": 7.83617260861032e-05, |
|
"loss": 0.3123, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 9.234234234234235, |
|
"grad_norm": 0.87890625, |
|
"learning_rate": 7.733918146359815e-05, |
|
"loss": 0.3192, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 9.27927927927928, |
|
"grad_norm": 0.8046875, |
|
"learning_rate": 7.631912849513025e-05, |
|
"loss": 0.3127, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 9.324324324324325, |
|
"grad_norm": 0.765625, |
|
"learning_rate": 7.530167933989161e-05, |
|
"loss": 0.3149, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 9.36936936936937, |
|
"grad_norm": 0.7890625, |
|
"learning_rate": 7.428694587077371e-05, |
|
"loss": 0.318, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 9.414414414414415, |
|
"grad_norm": 0.8125, |
|
"learning_rate": 7.32750396620669e-05, |
|
"loss": 0.3136, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 9.45945945945946, |
|
"grad_norm": 0.859375, |
|
"learning_rate": 7.226607197719202e-05, |
|
"loss": 0.3193, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 9.504504504504505, |
|
"grad_norm": 0.75390625, |
|
"learning_rate": 7.126015375646666e-05, |
|
"loss": 0.319, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 9.54954954954955, |
|
"grad_norm": 0.75, |
|
"learning_rate": 7.025739560490675e-05, |
|
"loss": 0.3246, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 9.594594594594595, |
|
"grad_norm": 0.79296875, |
|
"learning_rate": 6.925790778006518e-05, |
|
"loss": 0.3223, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 9.63963963963964, |
|
"grad_norm": 0.77734375, |
|
"learning_rate": 6.826180017990828e-05, |
|
"loss": 0.3238, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 9.684684684684685, |
|
"grad_norm": 0.80078125, |
|
"learning_rate": 6.726918233073231e-05, |
|
"loss": 0.321, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 9.72972972972973, |
|
"grad_norm": 0.8203125, |
|
"learning_rate": 6.62801633751204e-05, |
|
"loss": 0.3161, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 9.774774774774775, |
|
"grad_norm": 0.83203125, |
|
"learning_rate": 6.52948520599419e-05, |
|
"loss": 0.3221, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 9.81981981981982, |
|
"grad_norm": 0.7734375, |
|
"learning_rate": 6.431335672439531e-05, |
|
"loss": 0.3251, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 9.864864864864865, |
|
"grad_norm": 0.7890625, |
|
"learning_rate": 6.333578528809574e-05, |
|
"loss": 0.3246, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 9.90990990990991, |
|
"grad_norm": 0.79296875, |
|
"learning_rate": 6.236224523920879e-05, |
|
"loss": 0.3238, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 9.954954954954955, |
|
"grad_norm": 0.765625, |
|
"learning_rate": 6.139284362263185e-05, |
|
"loss": 0.3194, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.7109375, |
|
"learning_rate": 6.042768702822381e-05, |
|
"loss": 0.3241, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 3.8823204040527344, |
|
"eval_runtime": 1.0143, |
|
"eval_samples_per_second": 4.929, |
|
"eval_steps_per_second": 1.972, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 10.045045045045045, |
|
"grad_norm": 0.7890625, |
|
"learning_rate": 5.9466881579085275e-05, |
|
"loss": 0.2776, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 10.09009009009009, |
|
"grad_norm": 0.7890625, |
|
"learning_rate": 5.8510532919889804e-05, |
|
"loss": 0.2767, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 10.135135135135135, |
|
"grad_norm": 0.69140625, |
|
"learning_rate": 5.7558746205267756e-05, |
|
"loss": 0.2734, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 10.18018018018018, |
|
"grad_norm": 0.8203125, |
|
"learning_rate": 5.6611626088244194e-05, |
|
"loss": 0.2792, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 10.225225225225225, |
|
"grad_norm": 0.734375, |
|
"learning_rate": 5.56692767087318e-05, |
|
"loss": 0.2753, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 10.27027027027027, |
|
"grad_norm": 0.70703125, |
|
"learning_rate": 5.4731801682080206e-05, |
|
"loss": 0.2763, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 10.315315315315315, |
|
"grad_norm": 0.7265625, |
|
"learning_rate": 5.3799304087683146e-05, |
|
"loss": 0.2772, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 10.36036036036036, |
|
"grad_norm": 0.79296875, |
|
"learning_rate": 5.28718864576442e-05, |
|
"loss": 0.2763, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 10.405405405405405, |
|
"grad_norm": 0.75, |
|
"learning_rate": 5.194965076550323e-05, |
|
"loss": 0.2766, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 10.45045045045045, |
|
"grad_norm": 0.84375, |
|
"learning_rate": 5.1032698415023963e-05, |
|
"loss": 0.2802, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 10.495495495495495, |
|
"grad_norm": 0.8203125, |
|
"learning_rate": 5.0121130229043786e-05, |
|
"loss": 0.276, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 10.54054054054054, |
|
"grad_norm": 0.79296875, |
|
"learning_rate": 4.921504643838839e-05, |
|
"loss": 0.2801, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 10.585585585585585, |
|
"grad_norm": 0.7109375, |
|
"learning_rate": 4.8314546670850594e-05, |
|
"loss": 0.2775, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 10.63063063063063, |
|
"grad_norm": 0.7421875, |
|
"learning_rate": 4.741972994023601e-05, |
|
"loss": 0.2806, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 10.675675675675675, |
|
"grad_norm": 0.7421875, |
|
"learning_rate": 4.6530694635476e-05, |
|
"loss": 0.2775, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 10.72072072072072, |
|
"grad_norm": 0.7421875, |
|
"learning_rate": 4.5647538509809416e-05, |
|
"loss": 0.2802, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 10.765765765765765, |
|
"grad_norm": 0.73828125, |
|
"learning_rate": 4.477035867003405e-05, |
|
"loss": 0.2751, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 10.81081081081081, |
|
"grad_norm": 0.7734375, |
|
"learning_rate": 4.38992515658297e-05, |
|
"loss": 0.2802, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 10.855855855855856, |
|
"grad_norm": 0.79296875, |
|
"learning_rate": 4.303431297915277e-05, |
|
"loss": 0.2748, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 10.9009009009009, |
|
"grad_norm": 0.77734375, |
|
"learning_rate": 4.2175638013704655e-05, |
|
"loss": 0.2808, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 10.945945945945946, |
|
"grad_norm": 0.7421875, |
|
"learning_rate": 4.1323321084474896e-05, |
|
"loss": 0.2795, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 10.99099099099099, |
|
"grad_norm": 0.76953125, |
|
"learning_rate": 4.0477455907359715e-05, |
|
"loss": 0.2825, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_loss": 4.404350757598877, |
|
"eval_runtime": 1.0163, |
|
"eval_samples_per_second": 4.92, |
|
"eval_steps_per_second": 1.968, |
|
"step": 1221 |
|
}, |
|
{ |
|
"epoch": 11.036036036036036, |
|
"grad_norm": 0.609375, |
|
"learning_rate": 3.963813548885751e-05, |
|
"loss": 0.2565, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 11.08108108108108, |
|
"grad_norm": 0.7109375, |
|
"learning_rate": 3.880545211584251e-05, |
|
"loss": 0.2458, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 11.126126126126126, |
|
"grad_norm": 0.6796875, |
|
"learning_rate": 3.7979497345417234e-05, |
|
"loss": 0.257, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 11.17117117117117, |
|
"grad_norm": 0.6640625, |
|
"learning_rate": 3.716036199484574e-05, |
|
"loss": 0.2449, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 11.216216216216216, |
|
"grad_norm": 0.83203125, |
|
"learning_rate": 3.634813613156753e-05, |
|
"loss": 0.2529, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 11.26126126126126, |
|
"grad_norm": 0.7421875, |
|
"learning_rate": 3.554290906329438e-05, |
|
"loss": 0.2519, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 11.306306306306306, |
|
"grad_norm": 0.734375, |
|
"learning_rate": 3.474476932819062e-05, |
|
"loss": 0.2551, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 11.35135135135135, |
|
"grad_norm": 0.69140625, |
|
"learning_rate": 3.395380468513805e-05, |
|
"loss": 0.2492, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 11.396396396396396, |
|
"grad_norm": 0.703125, |
|
"learning_rate": 3.317010210408617e-05, |
|
"loss": 0.2501, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 11.441441441441441, |
|
"grad_norm": 0.734375, |
|
"learning_rate": 3.2393747756489845e-05, |
|
"loss": 0.2493, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 11.486486486486486, |
|
"grad_norm": 0.70703125, |
|
"learning_rate": 3.162482700583402e-05, |
|
"loss": 0.2475, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 11.531531531531531, |
|
"grad_norm": 0.7578125, |
|
"learning_rate": 3.0863424398248064e-05, |
|
"loss": 0.2524, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 11.576576576576576, |
|
"grad_norm": 0.6953125, |
|
"learning_rate": 3.0109623653209163e-05, |
|
"loss": 0.2489, |
|
"step": 1285 |
|
}, |
|
{ |
|
"epoch": 11.621621621621621, |
|
"grad_norm": 0.72265625, |
|
"learning_rate": 2.936350765433713e-05, |
|
"loss": 0.2507, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 11.666666666666666, |
|
"grad_norm": 0.68359375, |
|
"learning_rate": 2.862515844028111e-05, |
|
"loss": 0.2478, |
|
"step": 1295 |
|
}, |
|
{ |
|
"epoch": 11.711711711711711, |
|
"grad_norm": 0.7109375, |
|
"learning_rate": 2.789465719569906e-05, |
|
"loss": 0.2521, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 11.756756756756756, |
|
"grad_norm": 0.6875, |
|
"learning_rate": 2.717208424233091e-05, |
|
"loss": 0.2525, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 11.801801801801801, |
|
"grad_norm": 0.7421875, |
|
"learning_rate": 2.6457519030167143e-05, |
|
"loss": 0.2566, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 11.846846846846846, |
|
"grad_norm": 0.7109375, |
|
"learning_rate": 2.57510401287128e-05, |
|
"loss": 0.2495, |
|
"step": 1315 |
|
}, |
|
{ |
|
"epoch": 11.891891891891891, |
|
"grad_norm": 0.6796875, |
|
"learning_rate": 2.5052725218348394e-05, |
|
"loss": 0.2526, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 11.936936936936936, |
|
"grad_norm": 0.71484375, |
|
"learning_rate": 2.4362651081788667e-05, |
|
"loss": 0.249, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 11.981981981981981, |
|
"grad_norm": 0.77734375, |
|
"learning_rate": 2.3680893595640043e-05, |
|
"loss": 0.2549, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 4.838156700134277, |
|
"eval_runtime": 1.0155, |
|
"eval_samples_per_second": 4.923, |
|
"eval_steps_per_second": 1.969, |
|
"step": 1332 |
|
}, |
|
{ |
|
"epoch": 12.027027027027026, |
|
"grad_norm": 0.625, |
|
"learning_rate": 2.3007527722057488e-05, |
|
"loss": 0.2452, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 12.072072072072071, |
|
"grad_norm": 0.6484375, |
|
"learning_rate": 2.234262750050241e-05, |
|
"loss": 0.2436, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 12.117117117117116, |
|
"grad_norm": 0.6171875, |
|
"learning_rate": 2.168626603960138e-05, |
|
"loss": 0.2396, |
|
"step": 1345 |
|
}, |
|
{ |
|
"epoch": 12.162162162162161, |
|
"grad_norm": 0.671875, |
|
"learning_rate": 2.1038515509107736e-05, |
|
"loss": 0.2344, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 12.207207207207206, |
|
"grad_norm": 0.71875, |
|
"learning_rate": 2.0399447131966132e-05, |
|
"loss": 0.2359, |
|
"step": 1355 |
|
}, |
|
{ |
|
"epoch": 12.252252252252251, |
|
"grad_norm": 0.65234375, |
|
"learning_rate": 1.976913117648128e-05, |
|
"loss": 0.2374, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 12.297297297297296, |
|
"grad_norm": 0.6015625, |
|
"learning_rate": 1.9147636948591653e-05, |
|
"loss": 0.2406, |
|
"step": 1365 |
|
}, |
|
{ |
|
"epoch": 12.342342342342342, |
|
"grad_norm": 0.65234375, |
|
"learning_rate": 1.8535032784249028e-05, |
|
"loss": 0.2392, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 12.387387387387387, |
|
"grad_norm": 0.703125, |
|
"learning_rate": 1.7931386041904506e-05, |
|
"loss": 0.2347, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 12.432432432432432, |
|
"grad_norm": 0.66796875, |
|
"learning_rate": 1.733676309510245e-05, |
|
"loss": 0.241, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 12.477477477477478, |
|
"grad_norm": 0.71875, |
|
"learning_rate": 1.6751229325182195e-05, |
|
"loss": 0.2392, |
|
"step": 1385 |
|
}, |
|
{ |
|
"epoch": 12.522522522522522, |
|
"grad_norm": 0.65234375, |
|
"learning_rate": 1.6174849114089064e-05, |
|
"loss": 0.2389, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 12.567567567567568, |
|
"grad_norm": 0.703125, |
|
"learning_rate": 1.5607685837295516e-05, |
|
"loss": 0.2397, |
|
"step": 1395 |
|
}, |
|
{ |
|
"epoch": 12.612612612612612, |
|
"grad_norm": 0.6640625, |
|
"learning_rate": 1.5049801856832557e-05, |
|
"loss": 0.2343, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 12.657657657657658, |
|
"grad_norm": 0.67578125, |
|
"learning_rate": 1.4501258514432836e-05, |
|
"loss": 0.2364, |
|
"step": 1405 |
|
}, |
|
{ |
|
"epoch": 12.702702702702704, |
|
"grad_norm": 0.69140625, |
|
"learning_rate": 1.3962116124785863e-05, |
|
"loss": 0.2416, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 12.747747747747749, |
|
"grad_norm": 0.6796875, |
|
"learning_rate": 1.3432433968906044e-05, |
|
"loss": 0.238, |
|
"step": 1415 |
|
}, |
|
{ |
|
"epoch": 12.792792792792794, |
|
"grad_norm": 0.625, |
|
"learning_rate": 1.2912270287614736e-05, |
|
"loss": 0.2355, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 12.837837837837839, |
|
"grad_norm": 0.65625, |
|
"learning_rate": 1.240168227513614e-05, |
|
"loss": 0.2388, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 12.882882882882884, |
|
"grad_norm": 0.6640625, |
|
"learning_rate": 1.1900726072808666e-05, |
|
"loss": 0.2351, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 12.927927927927929, |
|
"grad_norm": 0.65234375, |
|
"learning_rate": 1.1409456762911985e-05, |
|
"loss": 0.2393, |
|
"step": 1435 |
|
}, |
|
{ |
|
"epoch": 12.972972972972974, |
|
"grad_norm": 0.65234375, |
|
"learning_rate": 1.0927928362610462e-05, |
|
"loss": 0.2408, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_loss": 5.061110496520996, |
|
"eval_runtime": 1.0149, |
|
"eval_samples_per_second": 4.927, |
|
"eval_steps_per_second": 1.971, |
|
"step": 1443 |
|
}, |
|
{ |
|
"epoch": 13.018018018018019, |
|
"grad_norm": 0.68359375, |
|
"learning_rate": 1.0456193818013726e-05, |
|
"loss": 0.2395, |
|
"step": 1445 |
|
}, |
|
{ |
|
"epoch": 13.063063063063064, |
|
"grad_norm": 0.66015625, |
|
"learning_rate": 9.99430499835503e-06, |
|
"loss": 0.2342, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 13.108108108108109, |
|
"grad_norm": 0.75390625, |
|
"learning_rate": 9.542312690288036e-06, |
|
"loss": 0.2342, |
|
"step": 1455 |
|
}, |
|
{ |
|
"epoch": 13.153153153153154, |
|
"grad_norm": 0.6015625, |
|
"learning_rate": 9.100266592302542e-06, |
|
"loss": 0.2332, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 13.198198198198199, |
|
"grad_norm": 0.671875, |
|
"learning_rate": 8.668215309259997e-06, |
|
"loss": 0.2327, |
|
"step": 1465 |
|
}, |
|
{ |
|
"epoch": 13.243243243243244, |
|
"grad_norm": 0.62890625, |
|
"learning_rate": 8.246206347049079e-06, |
|
"loss": 0.2353, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 13.288288288288289, |
|
"grad_norm": 0.66796875, |
|
"learning_rate": 7.834286107362343e-06, |
|
"loss": 0.2331, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 13.333333333333334, |
|
"grad_norm": 0.67578125, |
|
"learning_rate": 7.432499882594091e-06, |
|
"loss": 0.2323, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 13.378378378378379, |
|
"grad_norm": 0.68359375, |
|
"learning_rate": 7.040891850860287e-06, |
|
"loss": 0.2354, |
|
"step": 1485 |
|
}, |
|
{ |
|
"epoch": 13.423423423423424, |
|
"grad_norm": 0.6328125, |
|
"learning_rate": 6.659505071140959e-06, |
|
"loss": 0.2345, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 13.468468468468469, |
|
"grad_norm": 0.62109375, |
|
"learning_rate": 6.2883814785457575e-06, |
|
"loss": 0.2376, |
|
"step": 1495 |
|
}, |
|
{ |
|
"epoch": 13.513513513513514, |
|
"grad_norm": 0.64453125, |
|
"learning_rate": 5.927561879702903e-06, |
|
"loss": 0.2361, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 13.558558558558559, |
|
"grad_norm": 0.72265625, |
|
"learning_rate": 5.577085948272431e-06, |
|
"loss": 0.2331, |
|
"step": 1505 |
|
}, |
|
{ |
|
"epoch": 13.603603603603604, |
|
"grad_norm": 0.609375, |
|
"learning_rate": 5.236992220583814e-06, |
|
"loss": 0.2312, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 13.64864864864865, |
|
"grad_norm": 0.6953125, |
|
"learning_rate": 4.90731809139876e-06, |
|
"loss": 0.2347, |
|
"step": 1515 |
|
}, |
|
{ |
|
"epoch": 13.693693693693694, |
|
"grad_norm": 0.62890625, |
|
"learning_rate": 4.588099809799551e-06, |
|
"loss": 0.2359, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 13.73873873873874, |
|
"grad_norm": 0.62890625, |
|
"learning_rate": 4.279372475203181e-06, |
|
"loss": 0.2321, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 13.783783783783784, |
|
"grad_norm": 0.65625, |
|
"learning_rate": 3.981170033502158e-06, |
|
"loss": 0.2326, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 13.82882882882883, |
|
"grad_norm": 0.6328125, |
|
"learning_rate": 3.6935252733319213e-06, |
|
"loss": 0.2341, |
|
"step": 1535 |
|
}, |
|
{ |
|
"epoch": 13.873873873873874, |
|
"grad_norm": 0.6171875, |
|
"learning_rate": 3.4164698224656066e-06, |
|
"loss": 0.2308, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 13.91891891891892, |
|
"grad_norm": 0.66796875, |
|
"learning_rate": 3.1500341443364556e-06, |
|
"loss": 0.2338, |
|
"step": 1545 |
|
}, |
|
{ |
|
"epoch": 13.963963963963964, |
|
"grad_norm": 0.66796875, |
|
"learning_rate": 2.8942475346882435e-06, |
|
"loss": 0.2361, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_loss": 5.106085777282715, |
|
"eval_runtime": 1.0149, |
|
"eval_samples_per_second": 4.927, |
|
"eval_steps_per_second": 1.971, |
|
"step": 1554 |
|
}, |
|
{ |
|
"epoch": 14.00900900900901, |
|
"grad_norm": 0.61328125, |
|
"learning_rate": 2.649138118354011e-06, |
|
"loss": 0.2352, |
|
"step": 1555 |
|
}, |
|
{ |
|
"epoch": 14.054054054054054, |
|
"grad_norm": 0.6796875, |
|
"learning_rate": 2.4147328461637144e-06, |
|
"loss": 0.2335, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 14.0990990990991, |
|
"grad_norm": 0.6484375, |
|
"learning_rate": 2.1910574919808304e-06, |
|
"loss": 0.2308, |
|
"step": 1565 |
|
}, |
|
{ |
|
"epoch": 14.144144144144144, |
|
"grad_norm": 0.62890625, |
|
"learning_rate": 1.978136649868345e-06, |
|
"loss": 0.2345, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 14.18918918918919, |
|
"grad_norm": 0.66015625, |
|
"learning_rate": 1.775993731384662e-06, |
|
"loss": 0.235, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 14.234234234234235, |
|
"grad_norm": 0.6796875, |
|
"learning_rate": 1.584650963009271e-06, |
|
"loss": 0.2337, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 14.27927927927928, |
|
"grad_norm": 0.68359375, |
|
"learning_rate": 1.4041293836989377e-06, |
|
"loss": 0.2319, |
|
"step": 1585 |
|
}, |
|
{ |
|
"epoch": 14.324324324324325, |
|
"grad_norm": 0.64453125, |
|
"learning_rate": 1.234448842574365e-06, |
|
"loss": 0.2332, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 14.36936936936937, |
|
"grad_norm": 0.6328125, |
|
"learning_rate": 1.075627996737627e-06, |
|
"loss": 0.23, |
|
"step": 1595 |
|
}, |
|
{ |
|
"epoch": 14.414414414414415, |
|
"grad_norm": 0.6875, |
|
"learning_rate": 9.276843092208553e-07, |
|
"loss": 0.2353, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 14.45945945945946, |
|
"grad_norm": 0.640625, |
|
"learning_rate": 7.90634047066019e-07, |
|
"loss": 0.2339, |
|
"step": 1605 |
|
}, |
|
{ |
|
"epoch": 14.504504504504505, |
|
"grad_norm": 0.6796875, |
|
"learning_rate": 6.644922795363218e-07, |
|
"loss": 0.2381, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 14.54954954954955, |
|
"grad_norm": 0.66015625, |
|
"learning_rate": 5.492728764592725e-07, |
|
"loss": 0.2335, |
|
"step": 1615 |
|
}, |
|
{ |
|
"epoch": 14.594594594594595, |
|
"grad_norm": 0.65234375, |
|
"learning_rate": 4.4498850670164906e-07, |
|
"loss": 0.2343, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 14.63963963963964, |
|
"grad_norm": 0.6015625, |
|
"learning_rate": 3.5165063677645714e-07, |
|
"loss": 0.2369, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 14.684684684684685, |
|
"grad_norm": 0.65234375, |
|
"learning_rate": 2.6926952958221674e-07, |
|
"loss": 0.2367, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 14.72972972972973, |
|
"grad_norm": 0.6328125, |
|
"learning_rate": 1.9785424327440906e-07, |
|
"loss": 0.2302, |
|
"step": 1635 |
|
}, |
|
{ |
|
"epoch": 14.774774774774775, |
|
"grad_norm": 0.6640625, |
|
"learning_rate": 1.374126302696066e-07, |
|
"loss": 0.237, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 14.81981981981982, |
|
"grad_norm": 0.65625, |
|
"learning_rate": 8.795133638197506e-08, |
|
"loss": 0.2298, |
|
"step": 1645 |
|
}, |
|
{ |
|
"epoch": 14.864864864864865, |
|
"grad_norm": 0.7109375, |
|
"learning_rate": 4.94758000925799e-08, |
|
"loss": 0.2372, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 14.90990990990991, |
|
"grad_norm": 0.62890625, |
|
"learning_rate": 2.1990251951398144e-08, |
|
"loss": 0.2312, |
|
"step": 1655 |
|
}, |
|
{ |
|
"epoch": 14.954954954954955, |
|
"grad_norm": 0.62890625, |
|
"learning_rate": 5.497714112157048e-09, |
|
"loss": 0.2311, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"grad_norm": 0.62109375, |
|
"learning_rate": 0.0, |
|
"loss": 0.2319, |
|
"step": 1665 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_loss": 5.111132621765137, |
|
"eval_runtime": 1.0137, |
|
"eval_samples_per_second": 4.932, |
|
"eval_steps_per_second": 1.973, |
|
"step": 1665 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"step": 1665, |
|
"total_flos": 2.545573832974926e+18, |
|
"train_loss": 1.4823458194016694, |
|
"train_runtime": 13248.7618, |
|
"train_samples_per_second": 2.007, |
|
"train_steps_per_second": 0.126 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 1665, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 15, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.545573832974926e+18, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|