|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.650389391707009, |
|
"eval_steps": 100, |
|
"global_step": 25300, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 2.6430506706237793, |
|
"learning_rate": 9.5e-06, |
|
"loss": 5.6141, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_cer": 0.9770320871299519, |
|
"eval_loss": 2.355087995529175, |
|
"eval_runtime": 329.8418, |
|
"eval_samples_per_second": 28.735, |
|
"eval_steps_per_second": 3.593, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 2.838773012161255, |
|
"learning_rate": 1.9500000000000003e-05, |
|
"loss": 2.1498, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_cer": 0.6385162879824802, |
|
"eval_loss": 1.961960792541504, |
|
"eval_runtime": 334.0577, |
|
"eval_samples_per_second": 28.372, |
|
"eval_steps_per_second": 3.547, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 3.848980665206909, |
|
"learning_rate": 2.95e-05, |
|
"loss": 2.6461, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"eval_cer": 0.5852479371162645, |
|
"eval_loss": 2.6564223766326904, |
|
"eval_runtime": 350.7705, |
|
"eval_samples_per_second": 27.021, |
|
"eval_steps_per_second": 3.378, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 3.9299519062042236, |
|
"learning_rate": 3.9500000000000005e-05, |
|
"loss": 2.359, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"eval_cer": 0.5907913261116108, |
|
"eval_loss": 2.6195201873779297, |
|
"eval_runtime": 341.3159, |
|
"eval_samples_per_second": 27.769, |
|
"eval_steps_per_second": 3.472, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 4.236645698547363, |
|
"learning_rate": 4.9500000000000004e-05, |
|
"loss": 4.2363, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"eval_cer": 0.624139650385202, |
|
"eval_loss": 2.457566261291504, |
|
"eval_runtime": 356.3471, |
|
"eval_samples_per_second": 26.598, |
|
"eval_steps_per_second": 3.325, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 13.769584655761719, |
|
"learning_rate": 4.9932862190812725e-05, |
|
"loss": 2.6933, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_cer": 0.6483369833013961, |
|
"eval_loss": 3.4720070362091064, |
|
"eval_runtime": 345.5879, |
|
"eval_samples_per_second": 27.426, |
|
"eval_steps_per_second": 3.429, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 4.292990207672119, |
|
"learning_rate": 4.986289752650177e-05, |
|
"loss": 4.1075, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_cer": 0.6181098705564898, |
|
"eval_loss": 3.3849761486053467, |
|
"eval_runtime": 353.3198, |
|
"eval_samples_per_second": 26.826, |
|
"eval_steps_per_second": 3.354, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 4.32611083984375, |
|
"learning_rate": 4.97922261484099e-05, |
|
"loss": 2.6907, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"eval_cer": 0.6191510891243988, |
|
"eval_loss": 2.5412750244140625, |
|
"eval_runtime": 343.4967, |
|
"eval_samples_per_second": 27.593, |
|
"eval_steps_per_second": 3.45, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 3.173403263092041, |
|
"learning_rate": 4.972155477031802e-05, |
|
"loss": 4.2294, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"eval_cer": 0.5840918423213797, |
|
"eval_loss": 2.6817102432250977, |
|
"eval_runtime": 354.5341, |
|
"eval_samples_per_second": 26.734, |
|
"eval_steps_per_second": 3.342, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 4.8158440589904785, |
|
"learning_rate": 4.965088339222615e-05, |
|
"loss": 2.2949, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_cer": 0.5736649915920379, |
|
"eval_loss": 1.9513429403305054, |
|
"eval_runtime": 345.1224, |
|
"eval_samples_per_second": 27.463, |
|
"eval_steps_per_second": 3.434, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 3.0650887489318848, |
|
"learning_rate": 4.958021201413428e-05, |
|
"loss": 2.6115, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"eval_cer": 0.5645799929607759, |
|
"eval_loss": 2.8409903049468994, |
|
"eval_runtime": 352.0824, |
|
"eval_samples_per_second": 26.92, |
|
"eval_steps_per_second": 3.366, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 3.0540664196014404, |
|
"learning_rate": 4.950954063604241e-05, |
|
"loss": 2.3453, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_cer": 0.598292499315631, |
|
"eval_loss": 1.8713030815124512, |
|
"eval_runtime": 344.1368, |
|
"eval_samples_per_second": 27.541, |
|
"eval_steps_per_second": 3.443, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 4.366485118865967, |
|
"learning_rate": 4.9438869257950535e-05, |
|
"loss": 3.2578, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"eval_cer": 0.5734816784638849, |
|
"eval_loss": 2.000699758529663, |
|
"eval_runtime": 350.6902, |
|
"eval_samples_per_second": 27.027, |
|
"eval_steps_per_second": 3.379, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 81.67564392089844, |
|
"learning_rate": 4.936819787985866e-05, |
|
"loss": 1.9657, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"eval_cer": 0.6325427241797349, |
|
"eval_loss": 3.403754472732544, |
|
"eval_runtime": 340.0489, |
|
"eval_samples_per_second": 27.872, |
|
"eval_steps_per_second": 3.485, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 3.837653875350952, |
|
"learning_rate": 4.929752650176679e-05, |
|
"loss": 2.6865, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"eval_cer": 0.5574918853388604, |
|
"eval_loss": 3.5201330184936523, |
|
"eval_runtime": 351.432, |
|
"eval_samples_per_second": 26.97, |
|
"eval_steps_per_second": 3.372, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 2.9047458171844482, |
|
"learning_rate": 4.922685512367491e-05, |
|
"loss": 2.4105, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"eval_cer": 0.5886624496499941, |
|
"eval_loss": 3.5269789695739746, |
|
"eval_runtime": 347.023, |
|
"eval_samples_per_second": 27.312, |
|
"eval_steps_per_second": 3.415, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 2.9270007610321045, |
|
"learning_rate": 4.915618374558304e-05, |
|
"loss": 2.418, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_cer": 0.624877791247898, |
|
"eval_loss": 2.498126983642578, |
|
"eval_runtime": 355.9419, |
|
"eval_samples_per_second": 26.628, |
|
"eval_steps_per_second": 3.329, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 3.458078622817993, |
|
"learning_rate": 4.908551236749117e-05, |
|
"loss": 2.6118, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"eval_cer": 0.5718074185600877, |
|
"eval_loss": 3.407224655151367, |
|
"eval_runtime": 343.8365, |
|
"eval_samples_per_second": 27.565, |
|
"eval_steps_per_second": 3.446, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 1.9933278560638428, |
|
"learning_rate": 4.90148409893993e-05, |
|
"loss": 3.2046, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_cer": 0.5539844941535333, |
|
"eval_loss": 1.928993821144104, |
|
"eval_runtime": 357.2509, |
|
"eval_samples_per_second": 26.53, |
|
"eval_steps_per_second": 3.317, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 4.917328357696533, |
|
"learning_rate": 4.8944169611307425e-05, |
|
"loss": 3.1904, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_cer": 0.5568001838019632, |
|
"eval_loss": 2.729193925857544, |
|
"eval_runtime": 341.3463, |
|
"eval_samples_per_second": 27.767, |
|
"eval_steps_per_second": 3.472, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 2.842353343963623, |
|
"learning_rate": 4.8873498233215547e-05, |
|
"loss": 2.1649, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"eval_cer": 0.544757733369833, |
|
"eval_loss": 2.421653985977173, |
|
"eval_runtime": 358.7261, |
|
"eval_samples_per_second": 26.421, |
|
"eval_steps_per_second": 3.303, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 4.818225383758545, |
|
"learning_rate": 4.880282685512368e-05, |
|
"loss": 2.2775, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"eval_cer": 0.5533832270931915, |
|
"eval_loss": 2.188101291656494, |
|
"eval_runtime": 354.4215, |
|
"eval_samples_per_second": 26.742, |
|
"eval_steps_per_second": 3.343, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 2.9436004161834717, |
|
"learning_rate": 4.87321554770318e-05, |
|
"loss": 1.6308, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"eval_cer": 0.5162586523796489, |
|
"eval_loss": 3.3528623580932617, |
|
"eval_runtime": 360.9925, |
|
"eval_samples_per_second": 26.255, |
|
"eval_steps_per_second": 3.283, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"grad_norm": 7.935622692108154, |
|
"learning_rate": 4.866148409893993e-05, |
|
"loss": 2.4462, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"eval_cer": 0.575390579171718, |
|
"eval_loss": 2.0690765380859375, |
|
"eval_runtime": 343.7001, |
|
"eval_samples_per_second": 27.576, |
|
"eval_steps_per_second": 3.448, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"grad_norm": 2.40964937210083, |
|
"learning_rate": 4.859081272084806e-05, |
|
"loss": 2.3368, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"eval_cer": 0.5851746118650033, |
|
"eval_loss": 1.9030027389526367, |
|
"eval_runtime": 352.7743, |
|
"eval_samples_per_second": 26.867, |
|
"eval_steps_per_second": 3.359, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"grad_norm": 3.7618064880371094, |
|
"learning_rate": 4.852084805653711e-05, |
|
"loss": 2.4157, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"eval_cer": 0.5566339798991045, |
|
"eval_loss": 3.213930368423462, |
|
"eval_runtime": 341.3273, |
|
"eval_samples_per_second": 27.768, |
|
"eval_steps_per_second": 3.472, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"grad_norm": 9.61824893951416, |
|
"learning_rate": 4.845017667844523e-05, |
|
"loss": 2.7299, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"eval_cer": 0.5819702983848891, |
|
"eval_loss": 1.7893775701522827, |
|
"eval_runtime": 353.5497, |
|
"eval_samples_per_second": 26.808, |
|
"eval_steps_per_second": 3.352, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"grad_norm": 2.6824772357940674, |
|
"learning_rate": 4.8379505300353364e-05, |
|
"loss": 2.934, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"eval_cer": 0.5119813460560791, |
|
"eval_loss": 2.804206609725952, |
|
"eval_runtime": 340.4666, |
|
"eval_samples_per_second": 27.838, |
|
"eval_steps_per_second": 3.481, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"grad_norm": 4.691165924072266, |
|
"learning_rate": 4.8308833922261485e-05, |
|
"loss": 2.0012, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"eval_cer": 0.5138047006374409, |
|
"eval_loss": 3.412604331970215, |
|
"eval_runtime": 349.7896, |
|
"eval_samples_per_second": 27.096, |
|
"eval_steps_per_second": 3.388, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"grad_norm": 2.8883514404296875, |
|
"learning_rate": 4.823816254416961e-05, |
|
"loss": 3.5496, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_cer": 0.5417074029173673, |
|
"eval_loss": 1.7423540353775024, |
|
"eval_runtime": 339.3199, |
|
"eval_samples_per_second": 27.932, |
|
"eval_steps_per_second": 3.492, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"grad_norm": 3.9499669075012207, |
|
"learning_rate": 4.816749116607774e-05, |
|
"loss": 2.4353, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"eval_cer": 0.5479229400492746, |
|
"eval_loss": 3.4527170658111572, |
|
"eval_runtime": 354.1425, |
|
"eval_samples_per_second": 26.763, |
|
"eval_steps_per_second": 3.346, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"grad_norm": 2.9536569118499756, |
|
"learning_rate": 4.809681978798587e-05, |
|
"loss": 2.9787, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"eval_cer": 0.5677500879903015, |
|
"eval_loss": 2.6674227714538574, |
|
"eval_runtime": 344.0522, |
|
"eval_samples_per_second": 27.548, |
|
"eval_steps_per_second": 3.444, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"grad_norm": 3.094930648803711, |
|
"learning_rate": 4.8026148409894e-05, |
|
"loss": 2.2166, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"eval_cer": 0.5857514371749247, |
|
"eval_loss": 3.2282423973083496, |
|
"eval_runtime": 358.5567, |
|
"eval_samples_per_second": 26.434, |
|
"eval_steps_per_second": 3.305, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"grad_norm": 5.626856803894043, |
|
"learning_rate": 4.795547703180212e-05, |
|
"loss": 2.6222, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"eval_cer": 0.5495043213014743, |
|
"eval_loss": 1.7686785459518433, |
|
"eval_runtime": 342.362, |
|
"eval_samples_per_second": 27.684, |
|
"eval_steps_per_second": 3.461, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"grad_norm": 5.24758768081665, |
|
"learning_rate": 4.7884805653710253e-05, |
|
"loss": 2.078, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"eval_cer": 0.5179402448085723, |
|
"eval_loss": 2.244272470474243, |
|
"eval_runtime": 354.1685, |
|
"eval_samples_per_second": 26.761, |
|
"eval_steps_per_second": 3.346, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"grad_norm": 6.822329044342041, |
|
"learning_rate": 4.7814134275618375e-05, |
|
"loss": 2.7694, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"eval_cer": 0.5457598451370693, |
|
"eval_loss": 2.137202501296997, |
|
"eval_runtime": 345.4818, |
|
"eval_samples_per_second": 27.434, |
|
"eval_steps_per_second": 3.43, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"grad_norm": 2.648332357406616, |
|
"learning_rate": 4.77434628975265e-05, |
|
"loss": 1.74, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"eval_cer": 0.5568441789527199, |
|
"eval_loss": 3.1145806312561035, |
|
"eval_runtime": 362.0538, |
|
"eval_samples_per_second": 26.178, |
|
"eval_steps_per_second": 3.273, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 17.834945678710938, |
|
"learning_rate": 4.767279151943463e-05, |
|
"loss": 1.7586, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_cer": 0.5344653122678034, |
|
"eval_loss": 1.8897360563278198, |
|
"eval_runtime": 345.2891, |
|
"eval_samples_per_second": 27.449, |
|
"eval_steps_per_second": 3.432, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"grad_norm": 7.307125568389893, |
|
"learning_rate": 4.760212014134276e-05, |
|
"loss": 1.6227, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"eval_cer": 0.551083258378632, |
|
"eval_loss": 1.9913257360458374, |
|
"eval_runtime": 354.7374, |
|
"eval_samples_per_second": 26.718, |
|
"eval_steps_per_second": 3.34, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"grad_norm": 4.344304084777832, |
|
"learning_rate": 4.753144876325089e-05, |
|
"loss": 3.1757, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"eval_cer": 0.5003299636306754, |
|
"eval_loss": 1.8998807668685913, |
|
"eval_runtime": 348.4123, |
|
"eval_samples_per_second": 27.203, |
|
"eval_steps_per_second": 3.401, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"grad_norm": 2.8654837608337402, |
|
"learning_rate": 4.746077738515901e-05, |
|
"loss": 2.226, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"eval_cer": 0.5324904188338352, |
|
"eval_loss": 3.042088031768799, |
|
"eval_runtime": 354.1261, |
|
"eval_samples_per_second": 26.764, |
|
"eval_steps_per_second": 3.346, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"grad_norm": 7.595367908477783, |
|
"learning_rate": 4.7390106007067143e-05, |
|
"loss": 2.7925, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"eval_cer": 0.5470479253842243, |
|
"eval_loss": 3.0831844806671143, |
|
"eval_runtime": 344.2619, |
|
"eval_samples_per_second": 27.531, |
|
"eval_steps_per_second": 3.442, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"grad_norm": 7.425869941711426, |
|
"learning_rate": 4.7319434628975265e-05, |
|
"loss": 3.2591, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"eval_cer": 0.5522955691994837, |
|
"eval_loss": 1.7015308141708374, |
|
"eval_runtime": 353.5748, |
|
"eval_samples_per_second": 26.806, |
|
"eval_steps_per_second": 3.351, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"grad_norm": 2.405174493789673, |
|
"learning_rate": 4.724876325088339e-05, |
|
"loss": 2.373, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"eval_cer": 0.5220977865550819, |
|
"eval_loss": 2.095752477645874, |
|
"eval_runtime": 344.3236, |
|
"eval_samples_per_second": 27.526, |
|
"eval_steps_per_second": 3.442, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"grad_norm": 3.823211908340454, |
|
"learning_rate": 4.717809187279153e-05, |
|
"loss": 2.3919, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"eval_cer": 0.5331259043447656, |
|
"eval_loss": 2.41994571685791, |
|
"eval_runtime": 353.9416, |
|
"eval_samples_per_second": 26.778, |
|
"eval_steps_per_second": 3.348, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"grad_norm": 2.0702412128448486, |
|
"learning_rate": 4.710742049469965e-05, |
|
"loss": 2.4842, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"eval_cer": 0.5329523679167807, |
|
"eval_loss": 3.652872085571289, |
|
"eval_runtime": 339.8921, |
|
"eval_samples_per_second": 27.885, |
|
"eval_steps_per_second": 3.486, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"grad_norm": 3.5580544471740723, |
|
"learning_rate": 4.703745583038869e-05, |
|
"loss": 3.577, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"eval_cer": 0.5378284971256502, |
|
"eval_loss": 2.777487277984619, |
|
"eval_runtime": 356.1415, |
|
"eval_samples_per_second": 26.613, |
|
"eval_steps_per_second": 3.327, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"grad_norm": 2.3930351734161377, |
|
"learning_rate": 4.6966784452296826e-05, |
|
"loss": 2.733, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"eval_cer": 0.5305424113253294, |
|
"eval_loss": 3.2367630004882812, |
|
"eval_runtime": 346.0446, |
|
"eval_samples_per_second": 27.39, |
|
"eval_steps_per_second": 3.424, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"grad_norm": 4.198298931121826, |
|
"learning_rate": 4.6896113074204954e-05, |
|
"loss": 1.616, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"eval_cer": 0.523026573071057, |
|
"eval_loss": 2.150926351547241, |
|
"eval_runtime": 371.9418, |
|
"eval_samples_per_second": 25.482, |
|
"eval_steps_per_second": 3.186, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"grad_norm": 3.6696269512176514, |
|
"learning_rate": 4.6825441696113075e-05, |
|
"loss": 1.8252, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"eval_cer": 0.5257933792186461, |
|
"eval_loss": 3.1695752143859863, |
|
"eval_runtime": 351.2533, |
|
"eval_samples_per_second": 26.983, |
|
"eval_steps_per_second": 3.374, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"grad_norm": 5.002595901489258, |
|
"learning_rate": 4.67547703180212e-05, |
|
"loss": 2.0594, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"eval_cer": 0.5161682179030933, |
|
"eval_loss": 2.4667067527770996, |
|
"eval_runtime": 377.3711, |
|
"eval_samples_per_second": 25.116, |
|
"eval_steps_per_second": 3.14, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"grad_norm": 3.2300162315368652, |
|
"learning_rate": 4.668409893992933e-05, |
|
"loss": 1.8512, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"eval_cer": 0.5493185639982793, |
|
"eval_loss": 1.743632435798645, |
|
"eval_runtime": 361.1881, |
|
"eval_samples_per_second": 26.241, |
|
"eval_steps_per_second": 3.281, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"grad_norm": 2.109961986541748, |
|
"learning_rate": 4.661342756183746e-05, |
|
"loss": 2.4585, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"eval_cer": 0.5222273278323101, |
|
"eval_loss": 1.6703613996505737, |
|
"eval_runtime": 374.7053, |
|
"eval_samples_per_second": 25.295, |
|
"eval_steps_per_second": 3.162, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"grad_norm": 3.7831201553344727, |
|
"learning_rate": 4.654275618374558e-05, |
|
"loss": 1.6107, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"eval_cer": 0.5470601462594346, |
|
"eval_loss": 2.0870959758758545, |
|
"eval_runtime": 367.5169, |
|
"eval_samples_per_second": 25.789, |
|
"eval_steps_per_second": 3.224, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"grad_norm": 12.008744239807129, |
|
"learning_rate": 4.6472084805653715e-05, |
|
"loss": 2.3883, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"eval_cer": 0.5516771929138478, |
|
"eval_loss": 3.582747459411621, |
|
"eval_runtime": 376.938, |
|
"eval_samples_per_second": 25.145, |
|
"eval_steps_per_second": 3.144, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"grad_norm": 2.4807026386260986, |
|
"learning_rate": 4.6401413427561844e-05, |
|
"loss": 2.1991, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"eval_cer": 0.5373152203668218, |
|
"eval_loss": 3.5865066051483154, |
|
"eval_runtime": 360.1614, |
|
"eval_samples_per_second": 26.316, |
|
"eval_steps_per_second": 3.29, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 6.418155670166016, |
|
"learning_rate": 4.6330742049469965e-05, |
|
"loss": 2.3905, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"eval_cer": 0.527748719252278, |
|
"eval_loss": 2.6406588554382324, |
|
"eval_runtime": 379.816, |
|
"eval_samples_per_second": 24.954, |
|
"eval_steps_per_second": 3.12, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"grad_norm": 2.4806301593780518, |
|
"learning_rate": 4.626007067137809e-05, |
|
"loss": 3.3218, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"eval_cer": 0.5217727112744907, |
|
"eval_loss": 1.8109639883041382, |
|
"eval_runtime": 351.8213, |
|
"eval_samples_per_second": 26.94, |
|
"eval_steps_per_second": 3.368, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"grad_norm": 2.4776511192321777, |
|
"learning_rate": 4.618939929328622e-05, |
|
"loss": 2.3876, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"eval_cer": 0.5089310156036134, |
|
"eval_loss": 3.134875774383545, |
|
"eval_runtime": 372.2436, |
|
"eval_samples_per_second": 25.462, |
|
"eval_steps_per_second": 3.183, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"grad_norm": 9.97956657409668, |
|
"learning_rate": 4.611872791519435e-05, |
|
"loss": 4.1527, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"eval_cer": 0.4981815337687224, |
|
"eval_loss": 1.7425427436828613, |
|
"eval_runtime": 363.658, |
|
"eval_samples_per_second": 26.063, |
|
"eval_steps_per_second": 3.259, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"grad_norm": 3.353976249694824, |
|
"learning_rate": 4.604805653710248e-05, |
|
"loss": 1.7398, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"eval_cer": 0.5255465175394001, |
|
"eval_loss": 2.6437551975250244, |
|
"eval_runtime": 380.4559, |
|
"eval_samples_per_second": 24.912, |
|
"eval_steps_per_second": 3.115, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"grad_norm": 2.671917676925659, |
|
"learning_rate": 4.5978091872791526e-05, |
|
"loss": 2.2359, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"eval_cer": 0.537452094169176, |
|
"eval_loss": 3.4509007930755615, |
|
"eval_runtime": 364.3511, |
|
"eval_samples_per_second": 26.013, |
|
"eval_steps_per_second": 3.252, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"grad_norm": 10.607972145080566, |
|
"learning_rate": 4.590742049469965e-05, |
|
"loss": 3.6419, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"eval_cer": 0.5374105431934614, |
|
"eval_loss": 2.2622523307800293, |
|
"eval_runtime": 370.8325, |
|
"eval_samples_per_second": 25.559, |
|
"eval_steps_per_second": 3.196, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"grad_norm": 4.678300857543945, |
|
"learning_rate": 4.5836749116607775e-05, |
|
"loss": 1.7309, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"eval_cer": 0.5418393883696375, |
|
"eval_loss": 1.8007577657699585, |
|
"eval_runtime": 357.8741, |
|
"eval_samples_per_second": 26.484, |
|
"eval_steps_per_second": 3.311, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"grad_norm": 3.6259689331054688, |
|
"learning_rate": 4.57660777385159e-05, |
|
"loss": 2.784, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"eval_cer": 0.5228799225685347, |
|
"eval_loss": 2.9312586784362793, |
|
"eval_runtime": 371.6433, |
|
"eval_samples_per_second": 25.503, |
|
"eval_steps_per_second": 3.189, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"grad_norm": 1.8296568393707275, |
|
"learning_rate": 4.569540636042403e-05, |
|
"loss": 2.4269, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"eval_cer": 0.5211445582886864, |
|
"eval_loss": 3.522822618484497, |
|
"eval_runtime": 359.0335, |
|
"eval_samples_per_second": 26.399, |
|
"eval_steps_per_second": 3.301, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"grad_norm": 23.777395248413086, |
|
"learning_rate": 4.562473498233216e-05, |
|
"loss": 1.5294, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"eval_cer": 0.5097864768683275, |
|
"eval_loss": 2.758403778076172, |
|
"eval_runtime": 375.8984, |
|
"eval_samples_per_second": 25.214, |
|
"eval_steps_per_second": 3.152, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"grad_norm": 5.520616054534912, |
|
"learning_rate": 4.555406360424028e-05, |
|
"loss": 1.6177, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"eval_cer": 0.5417905048687967, |
|
"eval_loss": 2.8678946495056152, |
|
"eval_runtime": 360.269, |
|
"eval_samples_per_second": 26.308, |
|
"eval_steps_per_second": 3.289, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"grad_norm": 12.704540252685547, |
|
"learning_rate": 4.5483392226148416e-05, |
|
"loss": 2.6892, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"eval_cer": 0.5306426225020531, |
|
"eval_loss": 2.7854502201080322, |
|
"eval_runtime": 372.5558, |
|
"eval_samples_per_second": 25.44, |
|
"eval_steps_per_second": 3.181, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"grad_norm": 3.1302738189697266, |
|
"learning_rate": 4.541272084805654e-05, |
|
"loss": 3.1467, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"eval_cer": 0.5148948027061906, |
|
"eval_loss": 2.9890220165252686, |
|
"eval_runtime": 359.2287, |
|
"eval_samples_per_second": 26.384, |
|
"eval_steps_per_second": 3.299, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"grad_norm": 1.750848412513733, |
|
"learning_rate": 4.5342049469964665e-05, |
|
"loss": 2.1825, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"eval_cer": 0.5565606546478432, |
|
"eval_loss": 3.195390224456787, |
|
"eval_runtime": 366.6828, |
|
"eval_samples_per_second": 25.848, |
|
"eval_steps_per_second": 3.232, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"grad_norm": 237.01815795898438, |
|
"learning_rate": 4.527137809187279e-05, |
|
"loss": 3.0545, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"eval_cer": 0.5361493488717688, |
|
"eval_loss": 2.276334524154663, |
|
"eval_runtime": 373.4974, |
|
"eval_samples_per_second": 25.376, |
|
"eval_steps_per_second": 3.173, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"grad_norm": 2.9628891944885254, |
|
"learning_rate": 4.520070671378092e-05, |
|
"loss": 1.9898, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"eval_cer": 0.5218631457510461, |
|
"eval_loss": 1.94766104221344, |
|
"eval_runtime": 364.5953, |
|
"eval_samples_per_second": 25.996, |
|
"eval_steps_per_second": 3.25, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"grad_norm": 19.71111488342285, |
|
"learning_rate": 4.513003533568905e-05, |
|
"loss": 1.7744, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"eval_cer": 0.5048052481326503, |
|
"eval_loss": 2.699909210205078, |
|
"eval_runtime": 377.4252, |
|
"eval_samples_per_second": 25.112, |
|
"eval_steps_per_second": 3.14, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"grad_norm": 3.2357943058013916, |
|
"learning_rate": 4.505936395759717e-05, |
|
"loss": 1.8872, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"eval_cer": 0.5123968558132259, |
|
"eval_loss": 2.0029726028442383, |
|
"eval_runtime": 372.4224, |
|
"eval_samples_per_second": 25.45, |
|
"eval_steps_per_second": 3.182, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"grad_norm": 2.642812967300415, |
|
"learning_rate": 4.4988692579505305e-05, |
|
"loss": 3.1774, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"eval_cer": 0.5154300770403973, |
|
"eval_loss": 2.36077618598938, |
|
"eval_runtime": 375.4381, |
|
"eval_samples_per_second": 25.245, |
|
"eval_steps_per_second": 3.156, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"grad_norm": 4.245635509490967, |
|
"learning_rate": 4.491802120141343e-05, |
|
"loss": 1.7844, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"eval_cer": 0.5169381330413358, |
|
"eval_loss": 2.6845099925994873, |
|
"eval_runtime": 355.3776, |
|
"eval_samples_per_second": 26.67, |
|
"eval_steps_per_second": 3.334, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"grad_norm": 2.799055576324463, |
|
"learning_rate": 4.4847349823321555e-05, |
|
"loss": 3.4812, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"eval_cer": 0.49670036369324627, |
|
"eval_loss": 3.4256324768066406, |
|
"eval_runtime": 374.5672, |
|
"eval_samples_per_second": 25.304, |
|
"eval_steps_per_second": 3.164, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"grad_norm": 2.1845884323120117, |
|
"learning_rate": 4.477667844522968e-05, |
|
"loss": 3.0965, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"eval_cer": 0.5227894880919792, |
|
"eval_loss": 1.759318232536316, |
|
"eval_runtime": 366.7135, |
|
"eval_samples_per_second": 25.846, |
|
"eval_steps_per_second": 3.231, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"grad_norm": 4.01229190826416, |
|
"learning_rate": 4.470600706713781e-05, |
|
"loss": 2.008, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"eval_cer": 0.5001319854522701, |
|
"eval_loss": 1.97179114818573, |
|
"eval_runtime": 377.7336, |
|
"eval_samples_per_second": 25.092, |
|
"eval_steps_per_second": 3.137, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"grad_norm": 1.3265995979309082, |
|
"learning_rate": 4.463533568904594e-05, |
|
"loss": 1.7854, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"eval_cer": 0.48717785772945915, |
|
"eval_loss": 1.6064122915267944, |
|
"eval_runtime": 358.3216, |
|
"eval_samples_per_second": 26.451, |
|
"eval_steps_per_second": 3.307, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"grad_norm": 3.6079511642456055, |
|
"learning_rate": 4.456466431095407e-05, |
|
"loss": 1.7713, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"eval_cer": 0.495703140276094, |
|
"eval_loss": 2.7158584594726562, |
|
"eval_runtime": 373.6599, |
|
"eval_samples_per_second": 25.365, |
|
"eval_steps_per_second": 3.171, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"grad_norm": 3.969160318374634, |
|
"learning_rate": 4.4493992932862195e-05, |
|
"loss": 3.3072, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"eval_cer": 0.49535606742012434, |
|
"eval_loss": 1.9461767673492432, |
|
"eval_runtime": 357.0354, |
|
"eval_samples_per_second": 26.546, |
|
"eval_steps_per_second": 3.319, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"grad_norm": 1.3938627243041992, |
|
"learning_rate": 4.442332155477032e-05, |
|
"loss": 2.1757, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"eval_cer": 0.5164395213327597, |
|
"eval_loss": 1.6015843152999878, |
|
"eval_runtime": 375.0822, |
|
"eval_samples_per_second": 25.269, |
|
"eval_steps_per_second": 3.159, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"grad_norm": 7.331507682800293, |
|
"learning_rate": 4.4352650176678445e-05, |
|
"loss": 2.3463, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"eval_cer": 0.4924132806695084, |
|
"eval_loss": 3.185905933380127, |
|
"eval_runtime": 357.5938, |
|
"eval_samples_per_second": 26.505, |
|
"eval_steps_per_second": 3.314, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"grad_norm": 2.3522162437438965, |
|
"learning_rate": 4.428197879858658e-05, |
|
"loss": 1.9984, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"eval_cer": 0.5185977278948809, |
|
"eval_loss": 2.115999221801758, |
|
"eval_runtime": 377.0919, |
|
"eval_samples_per_second": 25.134, |
|
"eval_steps_per_second": 3.142, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"grad_norm": 5.214267730712891, |
|
"learning_rate": 4.42113074204947e-05, |
|
"loss": 1.5957, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"eval_cer": 0.5082393140667162, |
|
"eval_loss": 1.6540412902832031, |
|
"eval_runtime": 357.3421, |
|
"eval_samples_per_second": 26.524, |
|
"eval_steps_per_second": 3.316, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"grad_norm": 14.154474258422852, |
|
"learning_rate": 4.414063604240283e-05, |
|
"loss": 1.6646, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"eval_cer": 0.49412175902389427, |
|
"eval_loss": 2.7898120880126953, |
|
"eval_runtime": 372.8281, |
|
"eval_samples_per_second": 25.422, |
|
"eval_steps_per_second": 3.178, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"grad_norm": 6.580379486083984, |
|
"learning_rate": 4.406996466431096e-05, |
|
"loss": 1.9858, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"eval_cer": 0.5368801572093387, |
|
"eval_loss": 3.0827128887176514, |
|
"eval_runtime": 359.3692, |
|
"eval_samples_per_second": 26.374, |
|
"eval_steps_per_second": 3.297, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"grad_norm": 4.16035270690918, |
|
"learning_rate": 4.3999293286219085e-05, |
|
"loss": 2.312, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"eval_cer": 0.503619823237261, |
|
"eval_loss": 1.6672168970108032, |
|
"eval_runtime": 375.3507, |
|
"eval_samples_per_second": 25.251, |
|
"eval_steps_per_second": 3.157, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"grad_norm": 1.9299957752227783, |
|
"learning_rate": 4.392862190812721e-05, |
|
"loss": 2.0043, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"eval_cer": 0.4974531696061945, |
|
"eval_loss": 3.044623613357544, |
|
"eval_runtime": 365.1986, |
|
"eval_samples_per_second": 25.953, |
|
"eval_steps_per_second": 3.245, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"grad_norm": 1.68573796749115, |
|
"learning_rate": 4.3857950530035335e-05, |
|
"loss": 2.5474, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"eval_cer": 0.5048394665832389, |
|
"eval_loss": 2.7017831802368164, |
|
"eval_runtime": 374.6095, |
|
"eval_samples_per_second": 25.301, |
|
"eval_steps_per_second": 3.163, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"grad_norm": 4.48902702331543, |
|
"learning_rate": 4.378727915194347e-05, |
|
"loss": 2.2683, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"eval_cer": 0.49138917132689375, |
|
"eval_loss": 1.5836848020553589, |
|
"eval_runtime": 360.7206, |
|
"eval_samples_per_second": 26.275, |
|
"eval_steps_per_second": 3.285, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"grad_norm": 2.788163661956787, |
|
"learning_rate": 4.371731448763251e-05, |
|
"loss": 2.4278, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"eval_cer": 0.5205579562785968, |
|
"eval_loss": 2.4102938175201416, |
|
"eval_runtime": 376.1786, |
|
"eval_samples_per_second": 25.195, |
|
"eval_steps_per_second": 3.15, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 2.6063647270202637, |
|
"learning_rate": 4.364664310954063e-05, |
|
"loss": 1.5116, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_cer": 0.4881310859958547, |
|
"eval_loss": 1.728480339050293, |
|
"eval_runtime": 352.8942, |
|
"eval_samples_per_second": 26.858, |
|
"eval_steps_per_second": 3.358, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"grad_norm": 5.240590572357178, |
|
"learning_rate": 4.357597173144877e-05, |
|
"loss": 2.2426, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"eval_cer": 0.49515320089163506, |
|
"eval_loss": 2.496321201324463, |
|
"eval_runtime": 373.8495, |
|
"eval_samples_per_second": 25.352, |
|
"eval_steps_per_second": 3.17, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"grad_norm": 2.557533025741577, |
|
"learning_rate": 4.3505300353356896e-05, |
|
"loss": 1.8649, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"eval_cer": 0.5054896171444214, |
|
"eval_loss": 3.00071120262146, |
|
"eval_runtime": 362.1744, |
|
"eval_samples_per_second": 26.17, |
|
"eval_steps_per_second": 3.272, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"grad_norm": 27.368144989013672, |
|
"learning_rate": 4.343462897526502e-05, |
|
"loss": 1.6976, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"eval_cer": 0.4800359782566188, |
|
"eval_loss": 2.5932295322418213, |
|
"eval_runtime": 371.4743, |
|
"eval_samples_per_second": 25.515, |
|
"eval_steps_per_second": 3.19, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"grad_norm": 4.671775817871094, |
|
"learning_rate": 4.336395759717315e-05, |
|
"loss": 1.7712, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"eval_cer": 0.4871509718039967, |
|
"eval_loss": 2.37919282913208, |
|
"eval_runtime": 357.2849, |
|
"eval_samples_per_second": 26.528, |
|
"eval_steps_per_second": 3.317, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"grad_norm": 2.77481746673584, |
|
"learning_rate": 4.329328621908127e-05, |
|
"loss": 1.45, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"eval_cer": 0.49421952602557584, |
|
"eval_loss": 1.6775341033935547, |
|
"eval_runtime": 376.5454, |
|
"eval_samples_per_second": 25.171, |
|
"eval_steps_per_second": 3.147, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"grad_norm": 7.3151140213012695, |
|
"learning_rate": 4.32226148409894e-05, |
|
"loss": 1.5163, |
|
"step": 10100 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"eval_cer": 0.49458370810683977, |
|
"eval_loss": 2.463066577911377, |
|
"eval_runtime": 349.9183, |
|
"eval_samples_per_second": 27.086, |
|
"eval_steps_per_second": 3.387, |
|
"step": 10100 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"grad_norm": 3.947378635406494, |
|
"learning_rate": 4.315194346289753e-05, |
|
"loss": 1.4527, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"eval_cer": 0.49508965234054203, |
|
"eval_loss": 3.2406046390533447, |
|
"eval_runtime": 377.1597, |
|
"eval_samples_per_second": 25.13, |
|
"eval_steps_per_second": 3.142, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"grad_norm": 7.427024841308594, |
|
"learning_rate": 4.308127208480566e-05, |
|
"loss": 1.4446, |
|
"step": 10300 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"eval_cer": 0.48428639865472606, |
|
"eval_loss": 2.7247695922851562, |
|
"eval_runtime": 356.4628, |
|
"eval_samples_per_second": 26.589, |
|
"eval_steps_per_second": 3.324, |
|
"step": 10300 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"grad_norm": 3.1798312664031982, |
|
"learning_rate": 4.3010600706713785e-05, |
|
"loss": 2.4877, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"eval_cer": 0.48304475773336986, |
|
"eval_loss": 2.2158772945404053, |
|
"eval_runtime": 376.4543, |
|
"eval_samples_per_second": 25.177, |
|
"eval_steps_per_second": 3.148, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"grad_norm": 2.0509414672851562, |
|
"learning_rate": 4.293992932862191e-05, |
|
"loss": 1.9214, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"eval_cer": 0.4835262602166517, |
|
"eval_loss": 1.906830906867981, |
|
"eval_runtime": 357.384, |
|
"eval_samples_per_second": 26.52, |
|
"eval_steps_per_second": 3.316, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"grad_norm": 3.71893048286438, |
|
"learning_rate": 4.286925795053004e-05, |
|
"loss": 1.4426, |
|
"step": 10600 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"eval_cer": 0.48283211450471236, |
|
"eval_loss": 2.5203747749328613, |
|
"eval_runtime": 378.5225, |
|
"eval_samples_per_second": 25.039, |
|
"eval_steps_per_second": 3.131, |
|
"step": 10600 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"grad_norm": 3.5400538444519043, |
|
"learning_rate": 4.279858657243816e-05, |
|
"loss": 2.5204, |
|
"step": 10700 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"eval_cer": 0.496866567596105, |
|
"eval_loss": 3.1510612964630127, |
|
"eval_runtime": 360.9029, |
|
"eval_samples_per_second": 26.262, |
|
"eval_steps_per_second": 3.283, |
|
"step": 10700 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"grad_norm": 3.434525966644287, |
|
"learning_rate": 4.272791519434629e-05, |
|
"loss": 1.605, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"eval_cer": 0.4771103007312972, |
|
"eval_loss": 3.0746238231658936, |
|
"eval_runtime": 370.845, |
|
"eval_samples_per_second": 25.558, |
|
"eval_steps_per_second": 3.195, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"grad_norm": 6.7493815422058105, |
|
"learning_rate": 4.265724381625442e-05, |
|
"loss": 2.153, |
|
"step": 10900 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"eval_cer": 0.4859728794337335, |
|
"eval_loss": 2.0545401573181152, |
|
"eval_runtime": 361.9049, |
|
"eval_samples_per_second": 26.189, |
|
"eval_steps_per_second": 3.274, |
|
"step": 10900 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"grad_norm": 1.7070709466934204, |
|
"learning_rate": 4.258657243816255e-05, |
|
"loss": 1.8732, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"eval_cer": 0.4698926518321536, |
|
"eval_loss": 2.4386019706726074, |
|
"eval_runtime": 373.6335, |
|
"eval_samples_per_second": 25.367, |
|
"eval_steps_per_second": 3.172, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"grad_norm": 11.575754165649414, |
|
"learning_rate": 4.2515901060070675e-05, |
|
"loss": 1.9211, |
|
"step": 11100 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"eval_cer": 0.5030527746275077, |
|
"eval_loss": 2.1613056659698486, |
|
"eval_runtime": 353.1639, |
|
"eval_samples_per_second": 26.837, |
|
"eval_steps_per_second": 3.355, |
|
"step": 11100 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"grad_norm": 5.623499393463135, |
|
"learning_rate": 4.24452296819788e-05, |
|
"loss": 2.6473, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"eval_cer": 0.5010998787689179, |
|
"eval_loss": 2.048527240753174, |
|
"eval_runtime": 372.3781, |
|
"eval_samples_per_second": 25.453, |
|
"eval_steps_per_second": 3.182, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"grad_norm": 4.054663181304932, |
|
"learning_rate": 4.237455830388693e-05, |
|
"loss": 1.4888, |
|
"step": 11300 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"eval_cer": 0.48732939658206564, |
|
"eval_loss": 2.4912757873535156, |
|
"eval_runtime": 353.9893, |
|
"eval_samples_per_second": 26.775, |
|
"eval_steps_per_second": 3.348, |
|
"step": 11300 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"grad_norm": 1.9226583242416382, |
|
"learning_rate": 4.230388692579505e-05, |
|
"loss": 1.4556, |
|
"step": 11400 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"eval_cer": 0.4807105705682218, |
|
"eval_loss": 1.4841840267181396, |
|
"eval_runtime": 376.2491, |
|
"eval_samples_per_second": 25.191, |
|
"eval_steps_per_second": 3.15, |
|
"step": 11400 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"grad_norm": 6.85697603225708, |
|
"learning_rate": 4.223321554770318e-05, |
|
"loss": 1.4391, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"eval_cer": 0.48008730593250165, |
|
"eval_loss": 3.111499547958374, |
|
"eval_runtime": 358.9952, |
|
"eval_samples_per_second": 26.401, |
|
"eval_steps_per_second": 3.301, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"grad_norm": 12.522397994995117, |
|
"learning_rate": 4.216254416961131e-05, |
|
"loss": 1.4244, |
|
"step": 11600 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"eval_cer": 0.48068612881780143, |
|
"eval_loss": 2.598745822906494, |
|
"eval_runtime": 385.9814, |
|
"eval_samples_per_second": 24.556, |
|
"eval_steps_per_second": 3.07, |
|
"step": 11600 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"grad_norm": 3.1015026569366455, |
|
"learning_rate": 4.209187279151944e-05, |
|
"loss": 3.7378, |
|
"step": 11700 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"eval_cer": 0.47458791208791207, |
|
"eval_loss": 2.3908824920654297, |
|
"eval_runtime": 373.2148, |
|
"eval_samples_per_second": 25.396, |
|
"eval_steps_per_second": 3.175, |
|
"step": 11700 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"grad_norm": 86.87032318115234, |
|
"learning_rate": 4.2021201413427565e-05, |
|
"loss": 2.8329, |
|
"step": 11800 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"eval_cer": 0.4754898126784248, |
|
"eval_loss": 2.441450357437134, |
|
"eval_runtime": 446.0173, |
|
"eval_samples_per_second": 21.25, |
|
"eval_steps_per_second": 2.657, |
|
"step": 11800 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"grad_norm": 2.7503468990325928, |
|
"learning_rate": 4.195053003533569e-05, |
|
"loss": 2.4912, |
|
"step": 11900 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"eval_cer": 0.488764127331743, |
|
"eval_loss": 1.6247801780700684, |
|
"eval_runtime": 361.3079, |
|
"eval_samples_per_second": 26.232, |
|
"eval_steps_per_second": 3.28, |
|
"step": 11900 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"grad_norm": 2.511701822280884, |
|
"learning_rate": 4.187985865724382e-05, |
|
"loss": 2.009, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"eval_cer": 0.46521938915177347, |
|
"eval_loss": 1.8090691566467285, |
|
"eval_runtime": 401.8599, |
|
"eval_samples_per_second": 23.585, |
|
"eval_steps_per_second": 2.949, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"grad_norm": 4.231322765350342, |
|
"learning_rate": 4.180918727915194e-05, |
|
"loss": 1.6484, |
|
"step": 12100 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"eval_cer": 0.483240291736733, |
|
"eval_loss": 1.89494788646698, |
|
"eval_runtime": 367.7673, |
|
"eval_samples_per_second": 25.772, |
|
"eval_steps_per_second": 3.222, |
|
"step": 12100 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"grad_norm": 3.436452865600586, |
|
"learning_rate": 4.173851590106007e-05, |
|
"loss": 1.4205, |
|
"step": 12200 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"eval_cer": 0.48146582065621213, |
|
"eval_loss": 2.3205745220184326, |
|
"eval_runtime": 384.4394, |
|
"eval_samples_per_second": 24.654, |
|
"eval_steps_per_second": 3.082, |
|
"step": 12200 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"grad_norm": 1.96918523311615, |
|
"learning_rate": 4.16678445229682e-05, |
|
"loss": 1.3964, |
|
"step": 12300 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"eval_cer": 0.4692033944702984, |
|
"eval_loss": 2.9126245975494385, |
|
"eval_runtime": 358.8304, |
|
"eval_samples_per_second": 26.414, |
|
"eval_steps_per_second": 3.302, |
|
"step": 12300 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"grad_norm": 6.869575500488281, |
|
"learning_rate": 4.159717314487633e-05, |
|
"loss": 2.0721, |
|
"step": 12400 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"eval_cer": 0.4858066755308748, |
|
"eval_loss": 3.2426090240478516, |
|
"eval_runtime": 377.7703, |
|
"eval_samples_per_second": 25.089, |
|
"eval_steps_per_second": 3.137, |
|
"step": 12400 |
|
}, |
|
{ |
|
"epoch": 5.26, |
|
"grad_norm": 2.511401414871216, |
|
"learning_rate": 4.1526501766784455e-05, |
|
"loss": 1.747, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 5.26, |
|
"eval_cer": 0.5091607680575652, |
|
"eval_loss": 3.057870864868164, |
|
"eval_runtime": 381.9144, |
|
"eval_samples_per_second": 24.817, |
|
"eval_steps_per_second": 3.103, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"grad_norm": 9.076600074768066, |
|
"learning_rate": 4.1455830388692577e-05, |
|
"loss": 2.0271, |
|
"step": 12600 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"eval_cer": 0.48266591060185365, |
|
"eval_loss": 2.858898639678955, |
|
"eval_runtime": 405.4609, |
|
"eval_samples_per_second": 23.376, |
|
"eval_steps_per_second": 2.923, |
|
"step": 12600 |
|
}, |
|
{ |
|
"epoch": 5.35, |
|
"grad_norm": 1.6710706949234009, |
|
"learning_rate": 4.138515901060071e-05, |
|
"loss": 1.7331, |
|
"step": 12700 |
|
}, |
|
{ |
|
"epoch": 5.35, |
|
"eval_cer": 0.501048551093035, |
|
"eval_loss": 2.9208521842956543, |
|
"eval_runtime": 361.6906, |
|
"eval_samples_per_second": 26.205, |
|
"eval_steps_per_second": 3.276, |
|
"step": 12700 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"grad_norm": 1.902030348777771, |
|
"learning_rate": 4.131448763250883e-05, |
|
"loss": 2.1405, |
|
"step": 12800 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"eval_cer": 0.4769636502287748, |
|
"eval_loss": 2.750805139541626, |
|
"eval_runtime": 383.778, |
|
"eval_samples_per_second": 24.697, |
|
"eval_steps_per_second": 3.088, |
|
"step": 12800 |
|
}, |
|
{ |
|
"epoch": 5.43, |
|
"grad_norm": 5.2548041343688965, |
|
"learning_rate": 4.124381625441696e-05, |
|
"loss": 1.967, |
|
"step": 12900 |
|
}, |
|
{ |
|
"epoch": 5.43, |
|
"eval_cer": 0.4813216143287318, |
|
"eval_loss": 2.39349627494812, |
|
"eval_runtime": 366.8876, |
|
"eval_samples_per_second": 25.834, |
|
"eval_steps_per_second": 3.23, |
|
"step": 12900 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"grad_norm": 3.4912497997283936, |
|
"learning_rate": 4.1173144876325096e-05, |
|
"loss": 2.5075, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"eval_cer": 0.48205731101638577, |
|
"eval_loss": 2.9853949546813965, |
|
"eval_runtime": 382.1207, |
|
"eval_samples_per_second": 24.804, |
|
"eval_steps_per_second": 3.101, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"grad_norm": 3.671482801437378, |
|
"learning_rate": 4.110247349823322e-05, |
|
"loss": 2.3712, |
|
"step": 13100 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"eval_cer": 0.47388154550076256, |
|
"eval_loss": 2.6945626735687256, |
|
"eval_runtime": 374.115, |
|
"eval_samples_per_second": 25.334, |
|
"eval_steps_per_second": 3.167, |
|
"step": 13100 |
|
}, |
|
{ |
|
"epoch": 5.56, |
|
"grad_norm": 3.400202512741089, |
|
"learning_rate": 4.1031802120141345e-05, |
|
"loss": 2.035, |
|
"step": 13200 |
|
}, |
|
{ |
|
"epoch": 5.56, |
|
"eval_cer": 0.4721559579210825, |
|
"eval_loss": 2.723193645477295, |
|
"eval_runtime": 442.6463, |
|
"eval_samples_per_second": 21.412, |
|
"eval_steps_per_second": 2.677, |
|
"step": 13200 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"grad_norm": 28.22166633605957, |
|
"learning_rate": 4.0961130742049467e-05, |
|
"loss": 2.583, |
|
"step": 13300 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"eval_cer": 0.4679446443236479, |
|
"eval_loss": 2.848484516143799, |
|
"eval_runtime": 385.7533, |
|
"eval_samples_per_second": 24.57, |
|
"eval_steps_per_second": 3.072, |
|
"step": 13300 |
|
}, |
|
{ |
|
"epoch": 5.64, |
|
"grad_norm": 2.6984710693359375, |
|
"learning_rate": 4.08904593639576e-05, |
|
"loss": 1.4041, |
|
"step": 13400 |
|
}, |
|
{ |
|
"epoch": 5.64, |
|
"eval_cer": 0.46995620038324665, |
|
"eval_loss": 2.8926751613616943, |
|
"eval_runtime": 379.5665, |
|
"eval_samples_per_second": 24.971, |
|
"eval_steps_per_second": 3.122, |
|
"step": 13400 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"grad_norm": 28.124893188476562, |
|
"learning_rate": 4.081978798586573e-05, |
|
"loss": 1.3813, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"eval_cer": 0.453462907199562, |
|
"eval_loss": 3.064070701599121, |
|
"eval_runtime": 364.1898, |
|
"eval_samples_per_second": 26.025, |
|
"eval_steps_per_second": 3.254, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 5.73, |
|
"grad_norm": 4.778401851654053, |
|
"learning_rate": 4.074911660777385e-05, |
|
"loss": 1.9508, |
|
"step": 13600 |
|
}, |
|
{ |
|
"epoch": 5.73, |
|
"eval_cer": 0.4695724649016464, |
|
"eval_loss": 1.914655327796936, |
|
"eval_runtime": 378.9026, |
|
"eval_samples_per_second": 25.014, |
|
"eval_steps_per_second": 3.127, |
|
"step": 13600 |
|
}, |
|
{ |
|
"epoch": 5.77, |
|
"grad_norm": 4.1898345947265625, |
|
"learning_rate": 4.0678445229681986e-05, |
|
"loss": 2.1823, |
|
"step": 13700 |
|
}, |
|
{ |
|
"epoch": 5.77, |
|
"eval_cer": 0.47729850220953424, |
|
"eval_loss": 1.71451997756958, |
|
"eval_runtime": 365.8718, |
|
"eval_samples_per_second": 25.905, |
|
"eval_steps_per_second": 3.239, |
|
"step": 13700 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"grad_norm": 1.8314547538757324, |
|
"learning_rate": 4.060777385159011e-05, |
|
"loss": 2.3976, |
|
"step": 13800 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"eval_cer": 0.4735149192444566, |
|
"eval_loss": 2.7786667346954346, |
|
"eval_runtime": 377.8869, |
|
"eval_samples_per_second": 25.082, |
|
"eval_steps_per_second": 3.136, |
|
"step": 13800 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"grad_norm": 3.4417295455932617, |
|
"learning_rate": 4.0537102473498235e-05, |
|
"loss": 2.0057, |
|
"step": 13900 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"eval_cer": 0.47049147471745334, |
|
"eval_loss": 3.252101182937622, |
|
"eval_runtime": 365.7598, |
|
"eval_samples_per_second": 25.913, |
|
"eval_steps_per_second": 3.24, |
|
"step": 13900 |
|
}, |
|
{ |
|
"epoch": 5.89, |
|
"grad_norm": 4.99106502532959, |
|
"learning_rate": 4.0466431095406356e-05, |
|
"loss": 1.7663, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 5.89, |
|
"eval_cer": 0.4712345039302335, |
|
"eval_loss": 2.6297221183776855, |
|
"eval_runtime": 401.5004, |
|
"eval_samples_per_second": 23.606, |
|
"eval_steps_per_second": 2.951, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 5.94, |
|
"grad_norm": 4.448285102844238, |
|
"learning_rate": 4.039575971731449e-05, |
|
"loss": 1.842, |
|
"step": 14100 |
|
}, |
|
{ |
|
"epoch": 5.94, |
|
"eval_cer": 0.46505562942395684, |
|
"eval_loss": 2.5416791439056396, |
|
"eval_runtime": 383.7378, |
|
"eval_samples_per_second": 24.699, |
|
"eval_steps_per_second": 3.088, |
|
"step": 14100 |
|
}, |
|
{ |
|
"epoch": 5.98, |
|
"grad_norm": 2.196765184402466, |
|
"learning_rate": 4.032579505300353e-05, |
|
"loss": 2.0249, |
|
"step": 14200 |
|
}, |
|
{ |
|
"epoch": 5.98, |
|
"eval_cer": 0.4723050525986469, |
|
"eval_loss": 1.9896740913391113, |
|
"eval_runtime": 382.0154, |
|
"eval_samples_per_second": 24.811, |
|
"eval_steps_per_second": 3.102, |
|
"step": 14200 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"grad_norm": 2.4070873260498047, |
|
"learning_rate": 4.025512367491166e-05, |
|
"loss": 2.6354, |
|
"step": 14300 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"eval_cer": 0.46424660748504165, |
|
"eval_loss": 1.524404525756836, |
|
"eval_runtime": 361.2522, |
|
"eval_samples_per_second": 26.237, |
|
"eval_steps_per_second": 3.28, |
|
"step": 14300 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"grad_norm": 2.6403534412384033, |
|
"learning_rate": 4.018445229681979e-05, |
|
"loss": 1.4007, |
|
"step": 14400 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"eval_cer": 0.4687561104376051, |
|
"eval_loss": 2.6799304485321045, |
|
"eval_runtime": 377.8259, |
|
"eval_samples_per_second": 25.086, |
|
"eval_steps_per_second": 3.136, |
|
"step": 14400 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"grad_norm": 16.431684494018555, |
|
"learning_rate": 4.011378091872792e-05, |
|
"loss": 1.3673, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"eval_cer": 0.4627141097336827, |
|
"eval_loss": 2.739257335662842, |
|
"eval_runtime": 365.917, |
|
"eval_samples_per_second": 25.902, |
|
"eval_steps_per_second": 3.238, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 6.15, |
|
"grad_norm": 1.2545260190963745, |
|
"learning_rate": 4.0043109540636045e-05, |
|
"loss": 1.398, |
|
"step": 14600 |
|
}, |
|
{ |
|
"epoch": 6.15, |
|
"eval_cer": 0.46276054905948144, |
|
"eval_loss": 2.623035430908203, |
|
"eval_runtime": 380.5058, |
|
"eval_samples_per_second": 24.909, |
|
"eval_steps_per_second": 3.114, |
|
"step": 14600 |
|
}, |
|
{ |
|
"epoch": 6.19, |
|
"grad_norm": 4.757970333099365, |
|
"learning_rate": 3.9972438162544173e-05, |
|
"loss": 1.7177, |
|
"step": 14700 |
|
}, |
|
{ |
|
"epoch": 6.19, |
|
"eval_cer": 0.4671600641351531, |
|
"eval_loss": 3.1201841831207275, |
|
"eval_runtime": 364.6269, |
|
"eval_samples_per_second": 25.994, |
|
"eval_steps_per_second": 3.25, |
|
"step": 14700 |
|
}, |
|
{ |
|
"epoch": 6.23, |
|
"grad_norm": 2.604834794998169, |
|
"learning_rate": 3.99017667844523e-05, |
|
"loss": 1.4781, |
|
"step": 14800 |
|
}, |
|
{ |
|
"epoch": 6.23, |
|
"eval_cer": 0.4741137421297564, |
|
"eval_loss": 2.4959936141967773, |
|
"eval_runtime": 380.3259, |
|
"eval_samples_per_second": 24.921, |
|
"eval_steps_per_second": 3.116, |
|
"step": 14800 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"grad_norm": 5.067722797393799, |
|
"learning_rate": 3.983109540636042e-05, |
|
"loss": 1.6941, |
|
"step": 14900 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"eval_cer": 0.4551322787532752, |
|
"eval_loss": 2.24949049949646, |
|
"eval_runtime": 359.186, |
|
"eval_samples_per_second": 26.387, |
|
"eval_steps_per_second": 3.299, |
|
"step": 14900 |
|
}, |
|
{ |
|
"epoch": 6.31, |
|
"grad_norm": 4.323084354400635, |
|
"learning_rate": 3.976042402826856e-05, |
|
"loss": 1.4552, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 6.31, |
|
"eval_cer": 0.459111395721716, |
|
"eval_loss": 2.0859220027923584, |
|
"eval_runtime": 380.4099, |
|
"eval_samples_per_second": 24.915, |
|
"eval_steps_per_second": 3.115, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 6.36, |
|
"grad_norm": 3.9104325771331787, |
|
"learning_rate": 3.968975265017668e-05, |
|
"loss": 2.836, |
|
"step": 15100 |
|
}, |
|
{ |
|
"epoch": 6.36, |
|
"eval_cer": 0.47354180516991906, |
|
"eval_loss": 1.5567090511322021, |
|
"eval_runtime": 368.3958, |
|
"eval_samples_per_second": 25.728, |
|
"eval_steps_per_second": 3.217, |
|
"step": 15100 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"grad_norm": 22.448034286499023, |
|
"learning_rate": 3.961908127208481e-05, |
|
"loss": 1.3858, |
|
"step": 15200 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"eval_cer": 0.45766688827187046, |
|
"eval_loss": 1.999880313873291, |
|
"eval_runtime": 380.7492, |
|
"eval_samples_per_second": 24.893, |
|
"eval_steps_per_second": 3.112, |
|
"step": 15200 |
|
}, |
|
{ |
|
"epoch": 6.44, |
|
"grad_norm": 10.558205604553223, |
|
"learning_rate": 3.9548409893992935e-05, |
|
"loss": 1.3921, |
|
"step": 15300 |
|
}, |
|
{ |
|
"epoch": 6.44, |
|
"eval_cer": 0.47037904266551955, |
|
"eval_loss": 2.6411163806915283, |
|
"eval_runtime": 363.6122, |
|
"eval_samples_per_second": 26.066, |
|
"eval_steps_per_second": 3.259, |
|
"step": 15300 |
|
}, |
|
{ |
|
"epoch": 6.48, |
|
"grad_norm": 32.08698654174805, |
|
"learning_rate": 3.947773851590106e-05, |
|
"loss": 2.0638, |
|
"step": 15400 |
|
}, |
|
{ |
|
"epoch": 6.48, |
|
"eval_cer": 0.4723588244495718, |
|
"eval_loss": 2.9272561073303223, |
|
"eval_runtime": 386.8412, |
|
"eval_samples_per_second": 24.501, |
|
"eval_steps_per_second": 3.063, |
|
"step": 15400 |
|
}, |
|
{ |
|
"epoch": 6.52, |
|
"grad_norm": 1.8787578344345093, |
|
"learning_rate": 3.940706713780919e-05, |
|
"loss": 1.3608, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 6.52, |
|
"eval_cer": 0.4626187869070431, |
|
"eval_loss": 3.0016987323760986, |
|
"eval_runtime": 353.2265, |
|
"eval_samples_per_second": 26.833, |
|
"eval_steps_per_second": 3.355, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 6.57, |
|
"grad_norm": 5.287090301513672, |
|
"learning_rate": 3.933639575971731e-05, |
|
"loss": 1.6601, |
|
"step": 15600 |
|
}, |
|
{ |
|
"epoch": 6.57, |
|
"eval_cer": 0.4719262054671307, |
|
"eval_loss": 2.0440595149993896, |
|
"eval_runtime": 377.8824, |
|
"eval_samples_per_second": 25.082, |
|
"eval_steps_per_second": 3.136, |
|
"step": 15600 |
|
}, |
|
{ |
|
"epoch": 6.61, |
|
"grad_norm": 2.0426106452941895, |
|
"learning_rate": 3.926572438162545e-05, |
|
"loss": 1.9773, |
|
"step": 15700 |
|
}, |
|
{ |
|
"epoch": 6.61, |
|
"eval_cer": 0.4639239763794924, |
|
"eval_loss": 2.3498713970184326, |
|
"eval_runtime": 365.2548, |
|
"eval_samples_per_second": 25.949, |
|
"eval_steps_per_second": 3.244, |
|
"step": 15700 |
|
}, |
|
{ |
|
"epoch": 6.65, |
|
"grad_norm": 2.1095080375671387, |
|
"learning_rate": 3.919505300353357e-05, |
|
"loss": 2.2505, |
|
"step": 15800 |
|
}, |
|
{ |
|
"epoch": 6.65, |
|
"eval_cer": 0.4647232216182394, |
|
"eval_loss": 1.8471035957336426, |
|
"eval_runtime": 388.913, |
|
"eval_samples_per_second": 24.37, |
|
"eval_steps_per_second": 3.047, |
|
"step": 15800 |
|
}, |
|
{ |
|
"epoch": 6.69, |
|
"grad_norm": 3.6614439487457275, |
|
"learning_rate": 3.91243816254417e-05, |
|
"loss": 1.9341, |
|
"step": 15900 |
|
}, |
|
{ |
|
"epoch": 6.69, |
|
"eval_cer": 0.4613698134605608, |
|
"eval_loss": 2.8519198894500732, |
|
"eval_runtime": 363.022, |
|
"eval_samples_per_second": 26.109, |
|
"eval_steps_per_second": 3.264, |
|
"step": 15900 |
|
}, |
|
{ |
|
"epoch": 6.74, |
|
"grad_norm": 8.302891731262207, |
|
"learning_rate": 3.9053710247349825e-05, |
|
"loss": 1.4603, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 6.74, |
|
"eval_cer": 0.47225128074772205, |
|
"eval_loss": 1.5901261568069458, |
|
"eval_runtime": 376.5314, |
|
"eval_samples_per_second": 25.172, |
|
"eval_steps_per_second": 3.147, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 6.78, |
|
"grad_norm": 3.2967019081115723, |
|
"learning_rate": 3.898303886925795e-05, |
|
"loss": 1.5493, |
|
"step": 16100 |
|
}, |
|
{ |
|
"epoch": 6.78, |
|
"eval_cer": 0.47416751398068124, |
|
"eval_loss": 1.6550699472427368, |
|
"eval_runtime": 363.1067, |
|
"eval_samples_per_second": 26.103, |
|
"eval_steps_per_second": 3.264, |
|
"step": 16100 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"grad_norm": 2.848189115524292, |
|
"learning_rate": 3.891236749116608e-05, |
|
"loss": 1.5642, |
|
"step": 16200 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"eval_cer": 0.4807154589183059, |
|
"eval_loss": 2.1456046104431152, |
|
"eval_runtime": 376.8904, |
|
"eval_samples_per_second": 25.148, |
|
"eval_steps_per_second": 3.144, |
|
"step": 16200 |
|
}, |
|
{ |
|
"epoch": 6.86, |
|
"grad_norm": 4.148609638214111, |
|
"learning_rate": 3.88416961130742e-05, |
|
"loss": 1.379, |
|
"step": 16300 |
|
}, |
|
{ |
|
"epoch": 6.86, |
|
"eval_cer": 0.4555746744358844, |
|
"eval_loss": 1.7261931896209717, |
|
"eval_runtime": 360.5421, |
|
"eval_samples_per_second": 26.288, |
|
"eval_steps_per_second": 3.287, |
|
"step": 16300 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"grad_norm": 8.180316925048828, |
|
"learning_rate": 3.877102473498234e-05, |
|
"loss": 1.6474, |
|
"step": 16400 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"eval_cer": 0.4800946384576278, |
|
"eval_loss": 2.309068202972412, |
|
"eval_runtime": 377.3872, |
|
"eval_samples_per_second": 25.115, |
|
"eval_steps_per_second": 3.14, |
|
"step": 16400 |
|
}, |
|
{ |
|
"epoch": 6.95, |
|
"grad_norm": 1.7500585317611694, |
|
"learning_rate": 3.870035335689046e-05, |
|
"loss": 1.3755, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 6.95, |
|
"eval_cer": 0.4641219545578976, |
|
"eval_loss": 1.5506832599639893, |
|
"eval_runtime": 359.5594, |
|
"eval_samples_per_second": 26.36, |
|
"eval_steps_per_second": 3.296, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 6.99, |
|
"grad_norm": 1.8204373121261597, |
|
"learning_rate": 3.862968197879859e-05, |
|
"loss": 1.3562, |
|
"step": 16600 |
|
}, |
|
{ |
|
"epoch": 6.99, |
|
"eval_cer": 0.46081498572601776, |
|
"eval_loss": 2.0778937339782715, |
|
"eval_runtime": 381.9051, |
|
"eval_samples_per_second": 24.818, |
|
"eval_steps_per_second": 3.103, |
|
"step": 16600 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"grad_norm": 2.9875051975250244, |
|
"learning_rate": 3.8559010600706715e-05, |
|
"loss": 1.5916, |
|
"step": 16700 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"eval_cer": 0.4623352626021665, |
|
"eval_loss": 1.5354750156402588, |
|
"eval_runtime": 364.3123, |
|
"eval_samples_per_second": 26.016, |
|
"eval_steps_per_second": 3.253, |
|
"step": 16700 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"grad_norm": 4.643332004547119, |
|
"learning_rate": 3.848833922261484e-05, |
|
"loss": 1.3365, |
|
"step": 16800 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"eval_cer": 0.46146024793711626, |
|
"eval_loss": 1.8595181703567505, |
|
"eval_runtime": 374.6805, |
|
"eval_samples_per_second": 25.296, |
|
"eval_steps_per_second": 3.163, |
|
"step": 16800 |
|
}, |
|
{ |
|
"epoch": 7.11, |
|
"grad_norm": 2.433307409286499, |
|
"learning_rate": 3.841766784452297e-05, |
|
"loss": 1.6175, |
|
"step": 16900 |
|
}, |
|
{ |
|
"epoch": 7.11, |
|
"eval_cer": 0.468870986664581, |
|
"eval_loss": 1.7079046964645386, |
|
"eval_runtime": 364.7345, |
|
"eval_samples_per_second": 25.986, |
|
"eval_steps_per_second": 3.249, |
|
"step": 16900 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"grad_norm": 4.391873359680176, |
|
"learning_rate": 3.834699646643109e-05, |
|
"loss": 1.3334, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"eval_cer": 0.4652682726526143, |
|
"eval_loss": 2.7629072666168213, |
|
"eval_runtime": 380.8752, |
|
"eval_samples_per_second": 24.885, |
|
"eval_steps_per_second": 3.111, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"grad_norm": 7.738865375518799, |
|
"learning_rate": 3.827632508833923e-05, |
|
"loss": 1.4858, |
|
"step": 17100 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"eval_cer": 0.4701737319619882, |
|
"eval_loss": 2.189877986907959, |
|
"eval_runtime": 363.6078, |
|
"eval_samples_per_second": 26.067, |
|
"eval_steps_per_second": 3.259, |
|
"step": 17100 |
|
}, |
|
{ |
|
"epoch": 7.24, |
|
"grad_norm": Infinity, |
|
"learning_rate": 3.820636042402827e-05, |
|
"loss": 1.5317, |
|
"step": 17200 |
|
}, |
|
{ |
|
"epoch": 7.24, |
|
"eval_cer": 0.47163290446208594, |
|
"eval_loss": 1.9506334066390991, |
|
"eval_runtime": 380.9688, |
|
"eval_samples_per_second": 24.879, |
|
"eval_steps_per_second": 3.11, |
|
"step": 17200 |
|
}, |
|
{ |
|
"epoch": 7.28, |
|
"grad_norm": 19.556819915771484, |
|
"learning_rate": 3.81356890459364e-05, |
|
"loss": 1.3515, |
|
"step": 17300 |
|
}, |
|
{ |
|
"epoch": 7.28, |
|
"eval_cer": 0.45843191506002895, |
|
"eval_loss": 2.2836623191833496, |
|
"eval_runtime": 361.9084, |
|
"eval_samples_per_second": 26.189, |
|
"eval_steps_per_second": 3.274, |
|
"step": 17300 |
|
}, |
|
{ |
|
"epoch": 7.32, |
|
"grad_norm": 3.9697859287261963, |
|
"learning_rate": 3.8065017667844525e-05, |
|
"loss": 1.3348, |
|
"step": 17400 |
|
}, |
|
{ |
|
"epoch": 7.32, |
|
"eval_cer": 0.460401920143913, |
|
"eval_loss": 2.626167058944702, |
|
"eval_runtime": 377.1422, |
|
"eval_samples_per_second": 25.131, |
|
"eval_steps_per_second": 3.142, |
|
"step": 17400 |
|
}, |
|
{ |
|
"epoch": 7.37, |
|
"grad_norm": 2.342355251312256, |
|
"learning_rate": 3.7994346289752653e-05, |
|
"loss": 2.0519, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 7.37, |
|
"eval_cer": 0.4643712604121857, |
|
"eval_loss": 1.923600196838379, |
|
"eval_runtime": 360.7805, |
|
"eval_samples_per_second": 26.271, |
|
"eval_steps_per_second": 3.285, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 7.41, |
|
"grad_norm": 1.692319393157959, |
|
"learning_rate": 3.7923674911660775e-05, |
|
"loss": 1.4731, |
|
"step": 17600 |
|
}, |
|
{ |
|
"epoch": 7.41, |
|
"eval_cer": 0.4658597630127879, |
|
"eval_loss": 1.808614730834961, |
|
"eval_runtime": 377.9004, |
|
"eval_samples_per_second": 25.081, |
|
"eval_steps_per_second": 3.136, |
|
"step": 17600 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"grad_norm": 2.243326187133789, |
|
"learning_rate": 3.78530035335689e-05, |
|
"loss": 1.3005, |
|
"step": 17700 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"eval_cer": 0.45969066520667945, |
|
"eval_loss": 1.8014905452728271, |
|
"eval_runtime": 369.7047, |
|
"eval_samples_per_second": 25.637, |
|
"eval_steps_per_second": 3.205, |
|
"step": 17700 |
|
}, |
|
{ |
|
"epoch": 7.49, |
|
"grad_norm": 3.0026488304138184, |
|
"learning_rate": 3.778233215547704e-05, |
|
"loss": 1.3286, |
|
"step": 17800 |
|
}, |
|
{ |
|
"epoch": 7.49, |
|
"eval_cer": 0.44898517852254505, |
|
"eval_loss": 1.3857167959213257, |
|
"eval_runtime": 387.1419, |
|
"eval_samples_per_second": 24.482, |
|
"eval_steps_per_second": 3.061, |
|
"step": 17800 |
|
}, |
|
{ |
|
"epoch": 7.54, |
|
"grad_norm": 4.501519203186035, |
|
"learning_rate": 3.771166077738516e-05, |
|
"loss": 1.3409, |
|
"step": 17900 |
|
}, |
|
{ |
|
"epoch": 7.54, |
|
"eval_cer": 0.4575813421453991, |
|
"eval_loss": 2.416992664337158, |
|
"eval_runtime": 359.7708, |
|
"eval_samples_per_second": 26.345, |
|
"eval_steps_per_second": 3.294, |
|
"step": 17900 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"grad_norm": 10.005617141723633, |
|
"learning_rate": 3.764098939929329e-05, |
|
"loss": 2.1197, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"eval_cer": 0.4567307692307692, |
|
"eval_loss": 1.900498628616333, |
|
"eval_runtime": 386.3412, |
|
"eval_samples_per_second": 24.533, |
|
"eval_steps_per_second": 3.067, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 7.62, |
|
"grad_norm": 3.4333086013793945, |
|
"learning_rate": 3.7570318021201415e-05, |
|
"loss": 1.3268, |
|
"step": 18100 |
|
}, |
|
{ |
|
"epoch": 7.62, |
|
"eval_cer": 0.4478413046028704, |
|
"eval_loss": 1.8988970518112183, |
|
"eval_runtime": 368.4314, |
|
"eval_samples_per_second": 25.725, |
|
"eval_steps_per_second": 3.216, |
|
"step": 18100 |
|
}, |
|
{ |
|
"epoch": 7.66, |
|
"grad_norm": 4.548975944519043, |
|
"learning_rate": 3.749964664310954e-05, |
|
"loss": 1.3146, |
|
"step": 18200 |
|
}, |
|
{ |
|
"epoch": 7.66, |
|
"eval_cer": 0.4571047280122013, |
|
"eval_loss": 2.1630055904388428, |
|
"eval_runtime": 388.4273, |
|
"eval_samples_per_second": 24.401, |
|
"eval_steps_per_second": 3.051, |
|
"step": 18200 |
|
}, |
|
{ |
|
"epoch": 7.7, |
|
"grad_norm": 1.7991045713424683, |
|
"learning_rate": 3.742897526501767e-05, |
|
"loss": 2.0035, |
|
"step": 18300 |
|
}, |
|
{ |
|
"epoch": 7.7, |
|
"eval_cer": 0.45147579289038364, |
|
"eval_loss": 2.4393255710601807, |
|
"eval_runtime": 378.3611, |
|
"eval_samples_per_second": 25.05, |
|
"eval_steps_per_second": 3.132, |
|
"step": 18300 |
|
}, |
|
{ |
|
"epoch": 7.75, |
|
"grad_norm": 2.6425940990448, |
|
"learning_rate": 3.73583038869258e-05, |
|
"loss": 1.3294, |
|
"step": 18400 |
|
}, |
|
{ |
|
"epoch": 7.75, |
|
"eval_cer": 0.4522603730788784, |
|
"eval_loss": 1.833477258682251, |
|
"eval_runtime": 381.8605, |
|
"eval_samples_per_second": 24.821, |
|
"eval_steps_per_second": 3.103, |
|
"step": 18400 |
|
}, |
|
{ |
|
"epoch": 7.79, |
|
"grad_norm": 4.755228042602539, |
|
"learning_rate": 3.728763250883393e-05, |
|
"loss": 1.417, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 7.79, |
|
"eval_cer": 0.47154491416057254, |
|
"eval_loss": 1.8104172945022583, |
|
"eval_runtime": 382.6075, |
|
"eval_samples_per_second": 24.772, |
|
"eval_steps_per_second": 3.097, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 7.83, |
|
"grad_norm": 2.874833822250366, |
|
"learning_rate": 3.721696113074205e-05, |
|
"loss": 1.3215, |
|
"step": 18600 |
|
}, |
|
{ |
|
"epoch": 7.83, |
|
"eval_cer": 0.45071076610222516, |
|
"eval_loss": 3.382107973098755, |
|
"eval_runtime": 367.7162, |
|
"eval_samples_per_second": 25.775, |
|
"eval_steps_per_second": 3.223, |
|
"step": 18600 |
|
}, |
|
{ |
|
"epoch": 7.87, |
|
"grad_norm": 2.091411590576172, |
|
"learning_rate": 3.714628975265018e-05, |
|
"loss": 1.7498, |
|
"step": 18700 |
|
}, |
|
{ |
|
"epoch": 7.87, |
|
"eval_cer": 0.44587129951898635, |
|
"eval_loss": 2.4873616695404053, |
|
"eval_runtime": 381.5796, |
|
"eval_samples_per_second": 24.839, |
|
"eval_steps_per_second": 3.106, |
|
"step": 18700 |
|
}, |
|
{ |
|
"epoch": 7.91, |
|
"grad_norm": 2.1210122108459473, |
|
"learning_rate": 3.7075618374558305e-05, |
|
"loss": 1.4463, |
|
"step": 18800 |
|
}, |
|
{ |
|
"epoch": 7.91, |
|
"eval_cer": 0.4508696374799578, |
|
"eval_loss": 2.1447994709014893, |
|
"eval_runtime": 360.7307, |
|
"eval_samples_per_second": 26.274, |
|
"eval_steps_per_second": 3.285, |
|
"step": 18800 |
|
}, |
|
{ |
|
"epoch": 7.96, |
|
"grad_norm": 9.81248950958252, |
|
"learning_rate": 3.700494699646643e-05, |
|
"loss": 1.327, |
|
"step": 18900 |
|
}, |
|
{ |
|
"epoch": 7.96, |
|
"eval_cer": 0.45252678815846076, |
|
"eval_loss": 2.999907970428467, |
|
"eval_runtime": 382.5678, |
|
"eval_samples_per_second": 24.775, |
|
"eval_steps_per_second": 3.097, |
|
"step": 18900 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 2.9528186321258545, |
|
"learning_rate": 3.693427561837456e-05, |
|
"loss": 1.3228, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_cer": 0.4471960423917719, |
|
"eval_loss": 1.6979167461395264, |
|
"eval_runtime": 363.6541, |
|
"eval_samples_per_second": 26.063, |
|
"eval_steps_per_second": 3.259, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"grad_norm": 3.4124321937561035, |
|
"learning_rate": 3.686360424028269e-05, |
|
"loss": 1.2656, |
|
"step": 19100 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"eval_cer": 0.4476115521489187, |
|
"eval_loss": 2.0440304279327393, |
|
"eval_runtime": 381.9207, |
|
"eval_samples_per_second": 24.817, |
|
"eval_steps_per_second": 3.103, |
|
"step": 19100 |
|
}, |
|
{ |
|
"epoch": 8.08, |
|
"grad_norm": 4.451693058013916, |
|
"learning_rate": 3.679293286219082e-05, |
|
"loss": 1.2961, |
|
"step": 19200 |
|
}, |
|
{ |
|
"epoch": 8.08, |
|
"eval_cer": 0.44471520472410153, |
|
"eval_loss": 1.5803910493850708, |
|
"eval_runtime": 361.691, |
|
"eval_samples_per_second": 26.205, |
|
"eval_steps_per_second": 3.276, |
|
"step": 19200 |
|
}, |
|
{ |
|
"epoch": 8.12, |
|
"grad_norm": 2.6323907375335693, |
|
"learning_rate": 3.672226148409894e-05, |
|
"loss": 1.2874, |
|
"step": 19300 |
|
}, |
|
{ |
|
"epoch": 8.12, |
|
"eval_cer": 0.4615849008642603, |
|
"eval_loss": 1.754168152809143, |
|
"eval_runtime": 385.1298, |
|
"eval_samples_per_second": 24.61, |
|
"eval_steps_per_second": 3.077, |
|
"step": 19300 |
|
}, |
|
{ |
|
"epoch": 8.17, |
|
"grad_norm": 2.5475683212280273, |
|
"learning_rate": 3.665159010600707e-05, |
|
"loss": 1.5227, |
|
"step": 19400 |
|
}, |
|
{ |
|
"epoch": 8.17, |
|
"eval_cer": 0.45406906260998786, |
|
"eval_loss": 1.954167127609253, |
|
"eval_runtime": 364.6983, |
|
"eval_samples_per_second": 25.989, |
|
"eval_steps_per_second": 3.249, |
|
"step": 19400 |
|
}, |
|
{ |
|
"epoch": 8.21, |
|
"grad_norm": 1.623938798904419, |
|
"learning_rate": 3.6580918727915195e-05, |
|
"loss": 1.2988, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 8.21, |
|
"eval_cer": 0.4476164404990028, |
|
"eval_loss": 2.2084550857543945, |
|
"eval_runtime": 384.5947, |
|
"eval_samples_per_second": 24.644, |
|
"eval_steps_per_second": 3.081, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 8.25, |
|
"grad_norm": 3.2966043949127197, |
|
"learning_rate": 3.6510954063604243e-05, |
|
"loss": 1.5739, |
|
"step": 19600 |
|
}, |
|
{ |
|
"epoch": 8.25, |
|
"eval_cer": 0.451500234640804, |
|
"eval_loss": 1.4389057159423828, |
|
"eval_runtime": 366.8675, |
|
"eval_samples_per_second": 25.835, |
|
"eval_steps_per_second": 3.23, |
|
"step": 19600 |
|
}, |
|
{ |
|
"epoch": 8.29, |
|
"grad_norm": 2.2112064361572266, |
|
"learning_rate": 3.6440282685512365e-05, |
|
"loss": 1.4271, |
|
"step": 19700 |
|
}, |
|
{ |
|
"epoch": 8.29, |
|
"eval_cer": 0.4538075358804896, |
|
"eval_loss": 2.505011796951294, |
|
"eval_runtime": 384.1903, |
|
"eval_samples_per_second": 24.67, |
|
"eval_steps_per_second": 3.084, |
|
"step": 19700 |
|
}, |
|
{ |
|
"epoch": 8.34, |
|
"grad_norm": 3.7650928497314453, |
|
"learning_rate": 3.63696113074205e-05, |
|
"loss": 1.665, |
|
"step": 19800 |
|
}, |
|
{ |
|
"epoch": 8.34, |
|
"eval_cer": 0.4431655977474483, |
|
"eval_loss": 2.0151588916778564, |
|
"eval_runtime": 364.0023, |
|
"eval_samples_per_second": 26.038, |
|
"eval_steps_per_second": 3.255, |
|
"step": 19800 |
|
}, |
|
{ |
|
"epoch": 8.38, |
|
"grad_norm": 1.848604679107666, |
|
"learning_rate": 3.629893992932862e-05, |
|
"loss": 2.2543, |
|
"step": 19900 |
|
}, |
|
{ |
|
"epoch": 8.38, |
|
"eval_cer": 0.4581728325055727, |
|
"eval_loss": 1.8045251369476318, |
|
"eval_runtime": 381.1374, |
|
"eval_samples_per_second": 24.868, |
|
"eval_steps_per_second": 3.109, |
|
"step": 19900 |
|
}, |
|
{ |
|
"epoch": 8.42, |
|
"grad_norm": 34.23902893066406, |
|
"learning_rate": 3.622826855123675e-05, |
|
"loss": 1.313, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 8.42, |
|
"eval_cer": 0.4448887411520864, |
|
"eval_loss": 2.140085458755493, |
|
"eval_runtime": 364.9023, |
|
"eval_samples_per_second": 25.974, |
|
"eval_steps_per_second": 3.247, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 8.46, |
|
"grad_norm": 1.8063780069351196, |
|
"learning_rate": 3.615759717314488e-05, |
|
"loss": 2.0001, |
|
"step": 20100 |
|
}, |
|
{ |
|
"epoch": 8.46, |
|
"eval_cer": 0.44904628289859605, |
|
"eval_loss": 3.0254170894622803, |
|
"eval_runtime": 380.5352, |
|
"eval_samples_per_second": 24.907, |
|
"eval_steps_per_second": 3.114, |
|
"step": 20100 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"grad_norm": 4.191483974456787, |
|
"learning_rate": 3.6086925795053005e-05, |
|
"loss": 1.2751, |
|
"step": 20200 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"eval_cer": 0.4562517109225294, |
|
"eval_loss": 4.0329742431640625, |
|
"eval_runtime": 362.926, |
|
"eval_samples_per_second": 26.116, |
|
"eval_steps_per_second": 3.265, |
|
"step": 20200 |
|
}, |
|
{ |
|
"epoch": 8.55, |
|
"grad_norm": 6.447911262512207, |
|
"learning_rate": 3.6016254416961133e-05, |
|
"loss": 2.11, |
|
"step": 20300 |
|
}, |
|
{ |
|
"epoch": 8.55, |
|
"eval_cer": 0.43633657267998904, |
|
"eval_loss": 3.3446898460388184, |
|
"eval_runtime": 385.9035, |
|
"eval_samples_per_second": 24.561, |
|
"eval_steps_per_second": 3.071, |
|
"step": 20300 |
|
}, |
|
{ |
|
"epoch": 8.59, |
|
"grad_norm": 2.172891139984131, |
|
"learning_rate": 3.5945583038869255e-05, |
|
"loss": 1.4999, |
|
"step": 20400 |
|
}, |
|
{ |
|
"epoch": 8.59, |
|
"eval_cer": 0.4393355754565719, |
|
"eval_loss": 3.0930521488189697, |
|
"eval_runtime": 363.4753, |
|
"eval_samples_per_second": 26.076, |
|
"eval_steps_per_second": 3.26, |
|
"step": 20400 |
|
}, |
|
{ |
|
"epoch": 8.63, |
|
"grad_norm": 3.379791498184204, |
|
"learning_rate": 3.587491166077739e-05, |
|
"loss": 1.4381, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 8.63, |
|
"eval_cer": 0.4450402800046928, |
|
"eval_loss": 2.9096951484680176, |
|
"eval_runtime": 384.973, |
|
"eval_samples_per_second": 24.62, |
|
"eval_steps_per_second": 3.078, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 8.67, |
|
"grad_norm": 2.5090067386627197, |
|
"learning_rate": 3.580424028268551e-05, |
|
"loss": 1.6658, |
|
"step": 20600 |
|
}, |
|
{ |
|
"epoch": 8.67, |
|
"eval_cer": 0.4514855695905518, |
|
"eval_loss": 2.3211405277252197, |
|
"eval_runtime": 367.2057, |
|
"eval_samples_per_second": 25.811, |
|
"eval_steps_per_second": 3.227, |
|
"step": 20600 |
|
}, |
|
{ |
|
"epoch": 8.71, |
|
"grad_norm": 2.2183985710144043, |
|
"learning_rate": 3.573356890459364e-05, |
|
"loss": 2.1266, |
|
"step": 20700 |
|
}, |
|
{ |
|
"epoch": 8.71, |
|
"eval_cer": 0.45651323765202767, |
|
"eval_loss": 1.70187246799469, |
|
"eval_runtime": 382.2617, |
|
"eval_samples_per_second": 24.795, |
|
"eval_steps_per_second": 3.1, |
|
"step": 20700 |
|
}, |
|
{ |
|
"epoch": 8.76, |
|
"grad_norm": 1.713640570640564, |
|
"learning_rate": 3.5662897526501774e-05, |
|
"loss": 1.3038, |
|
"step": 20800 |
|
}, |
|
{ |
|
"epoch": 8.76, |
|
"eval_cer": 0.44218548355559034, |
|
"eval_loss": 1.8959708213806152, |
|
"eval_runtime": 362.6249, |
|
"eval_samples_per_second": 26.137, |
|
"eval_steps_per_second": 3.268, |
|
"step": 20800 |
|
}, |
|
{ |
|
"epoch": 8.8, |
|
"grad_norm": 10.084565162658691, |
|
"learning_rate": 3.5592226148409895e-05, |
|
"loss": 1.3031, |
|
"step": 20900 |
|
}, |
|
{ |
|
"epoch": 8.8, |
|
"eval_cer": 0.44013237652027687, |
|
"eval_loss": 1.9289778470993042, |
|
"eval_runtime": 383.5806, |
|
"eval_samples_per_second": 24.709, |
|
"eval_steps_per_second": 3.089, |
|
"step": 20900 |
|
}, |
|
{ |
|
"epoch": 8.84, |
|
"grad_norm": 2.5064008235931396, |
|
"learning_rate": 3.552155477031802e-05, |
|
"loss": 2.0089, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 8.84, |
|
"eval_cer": 0.4491318290250675, |
|
"eval_loss": 2.749382734298706, |
|
"eval_runtime": 372.5311, |
|
"eval_samples_per_second": 25.442, |
|
"eval_steps_per_second": 3.181, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 8.88, |
|
"grad_norm": 1.8727614879608154, |
|
"learning_rate": 3.5450883392226145e-05, |
|
"loss": 2.5929, |
|
"step": 21100 |
|
}, |
|
{ |
|
"epoch": 8.88, |
|
"eval_cer": 0.4467780884595831, |
|
"eval_loss": 2.1603004932403564, |
|
"eval_runtime": 382.1598, |
|
"eval_samples_per_second": 24.801, |
|
"eval_steps_per_second": 3.101, |
|
"step": 21100 |
|
}, |
|
{ |
|
"epoch": 8.92, |
|
"grad_norm": 2.88053560256958, |
|
"learning_rate": 3.538021201413428e-05, |
|
"loss": 1.2652, |
|
"step": 21200 |
|
}, |
|
{ |
|
"epoch": 8.92, |
|
"eval_cer": 0.44302872394509407, |
|
"eval_loss": 2.7272582054138184, |
|
"eval_runtime": 364.629, |
|
"eval_samples_per_second": 25.994, |
|
"eval_steps_per_second": 3.25, |
|
"step": 21200 |
|
}, |
|
{ |
|
"epoch": 8.97, |
|
"grad_norm": 3.520254135131836, |
|
"learning_rate": 3.53095406360424e-05, |
|
"loss": 1.2819, |
|
"step": 21300 |
|
}, |
|
{ |
|
"epoch": 8.97, |
|
"eval_cer": 0.44700784091353485, |
|
"eval_loss": 1.840844988822937, |
|
"eval_runtime": 393.1165, |
|
"eval_samples_per_second": 24.11, |
|
"eval_steps_per_second": 3.014, |
|
"step": 21300 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"grad_norm": 1.5368082523345947, |
|
"learning_rate": 3.523886925795053e-05, |
|
"loss": 1.2587, |
|
"step": 21400 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"eval_cer": 0.43732890774705724, |
|
"eval_loss": 2.2236878871917725, |
|
"eval_runtime": 363.5106, |
|
"eval_samples_per_second": 26.074, |
|
"eval_steps_per_second": 3.26, |
|
"step": 21400 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"grad_norm": 2.522552251815796, |
|
"learning_rate": 3.5168197879858664e-05, |
|
"loss": 1.2512, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"eval_cer": 0.44793418325446793, |
|
"eval_loss": 2.5103232860565186, |
|
"eval_runtime": 384.7009, |
|
"eval_samples_per_second": 24.637, |
|
"eval_steps_per_second": 3.08, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"grad_norm": 17.708566665649414, |
|
"learning_rate": 3.5097526501766785e-05, |
|
"loss": 1.2248, |
|
"step": 21600 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"eval_cer": 0.4432878064995503, |
|
"eval_loss": 2.5621256828308105, |
|
"eval_runtime": 370.664, |
|
"eval_samples_per_second": 25.57, |
|
"eval_steps_per_second": 3.197, |
|
"step": 21600 |
|
}, |
|
{ |
|
"epoch": 9.13, |
|
"grad_norm": 2.740123987197876, |
|
"learning_rate": 3.502685512367491e-05, |
|
"loss": 1.3298, |
|
"step": 21700 |
|
}, |
|
{ |
|
"epoch": 9.13, |
|
"eval_cer": 0.441982617027101, |
|
"eval_loss": 2.49664568901062, |
|
"eval_runtime": 372.6231, |
|
"eval_samples_per_second": 25.436, |
|
"eval_steps_per_second": 3.18, |
|
"step": 21700 |
|
}, |
|
{ |
|
"epoch": 9.18, |
|
"grad_norm": 7.561341762542725, |
|
"learning_rate": 3.495689045936396e-05, |
|
"loss": 1.2998, |
|
"step": 21800 |
|
}, |
|
{ |
|
"epoch": 9.18, |
|
"eval_cer": 0.43526113566149155, |
|
"eval_loss": 3.151437282562256, |
|
"eval_runtime": 381.1035, |
|
"eval_samples_per_second": 24.87, |
|
"eval_steps_per_second": 3.109, |
|
"step": 21800 |
|
}, |
|
{ |
|
"epoch": 9.22, |
|
"grad_norm": 2.588192939758301, |
|
"learning_rate": 3.488621908127209e-05, |
|
"loss": 1.2493, |
|
"step": 21900 |
|
}, |
|
{ |
|
"epoch": 9.22, |
|
"eval_cer": 0.46399241328066954, |
|
"eval_loss": 1.8955408334732056, |
|
"eval_runtime": 369.5234, |
|
"eval_samples_per_second": 25.649, |
|
"eval_steps_per_second": 3.207, |
|
"step": 21900 |
|
}, |
|
{ |
|
"epoch": 9.26, |
|
"grad_norm": 2.3700144290924072, |
|
"learning_rate": 3.481554770318021e-05, |
|
"loss": 1.3379, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 9.26, |
|
"eval_cer": 0.4379032888819366, |
|
"eval_loss": 1.7394232749938965, |
|
"eval_runtime": 383.407, |
|
"eval_samples_per_second": 24.72, |
|
"eval_steps_per_second": 3.091, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"grad_norm": 2.381225824356079, |
|
"learning_rate": 3.4744876325088346e-05, |
|
"loss": 2.2422, |
|
"step": 22100 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"eval_cer": 0.43050965937976615, |
|
"eval_loss": 1.691564917564392, |
|
"eval_runtime": 362.7906, |
|
"eval_samples_per_second": 26.125, |
|
"eval_steps_per_second": 3.266, |
|
"step": 22100 |
|
}, |
|
{ |
|
"epoch": 9.35, |
|
"grad_norm": 4.20334005355835, |
|
"learning_rate": 3.467420494699647e-05, |
|
"loss": 1.239, |
|
"step": 22200 |
|
}, |
|
{ |
|
"epoch": 9.35, |
|
"eval_cer": 0.44948623440616325, |
|
"eval_loss": 3.0320708751678467, |
|
"eval_runtime": 386.6159, |
|
"eval_samples_per_second": 24.515, |
|
"eval_steps_per_second": 3.065, |
|
"step": 22200 |
|
}, |
|
{ |
|
"epoch": 9.39, |
|
"grad_norm": 12.748970985412598, |
|
"learning_rate": 3.4603533568904595e-05, |
|
"loss": 1.622, |
|
"step": 22300 |
|
}, |
|
{ |
|
"epoch": 9.39, |
|
"eval_cer": 0.4492564819522115, |
|
"eval_loss": 2.2297956943511963, |
|
"eval_runtime": 368.8399, |
|
"eval_samples_per_second": 25.697, |
|
"eval_steps_per_second": 3.213, |
|
"step": 22300 |
|
}, |
|
{ |
|
"epoch": 9.43, |
|
"grad_norm": 10.591804504394531, |
|
"learning_rate": 3.4532862190812723e-05, |
|
"loss": 1.4811, |
|
"step": 22400 |
|
}, |
|
{ |
|
"epoch": 9.43, |
|
"eval_cer": 0.44453922412107466, |
|
"eval_loss": 2.3979556560516357, |
|
"eval_runtime": 383.7637, |
|
"eval_samples_per_second": 24.697, |
|
"eval_steps_per_second": 3.088, |
|
"step": 22400 |
|
}, |
|
{ |
|
"epoch": 9.47, |
|
"grad_norm": 7.108492374420166, |
|
"learning_rate": 3.446219081272085e-05, |
|
"loss": 1.5169, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 9.47, |
|
"eval_cer": 0.4425838840874428, |
|
"eval_loss": 1.763051152229309, |
|
"eval_runtime": 371.2126, |
|
"eval_samples_per_second": 25.533, |
|
"eval_steps_per_second": 3.192, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 9.51, |
|
"grad_norm": 2.171220302581787, |
|
"learning_rate": 3.439151943462898e-05, |
|
"loss": 1.2667, |
|
"step": 22600 |
|
}, |
|
{ |
|
"epoch": 9.51, |
|
"eval_cer": 0.44492784795275897, |
|
"eval_loss": 1.6504524946212769, |
|
"eval_runtime": 384.6648, |
|
"eval_samples_per_second": 24.64, |
|
"eval_steps_per_second": 3.081, |
|
"step": 22600 |
|
}, |
|
{ |
|
"epoch": 9.56, |
|
"grad_norm": 4.868325233459473, |
|
"learning_rate": 3.43208480565371e-05, |
|
"loss": 1.2425, |
|
"step": 22700 |
|
}, |
|
{ |
|
"epoch": 9.56, |
|
"eval_cer": 0.4337261937350905, |
|
"eval_loss": 1.6726810932159424, |
|
"eval_runtime": 360.9955, |
|
"eval_samples_per_second": 26.255, |
|
"eval_steps_per_second": 3.283, |
|
"step": 22700 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"grad_norm": 4.784325122833252, |
|
"learning_rate": 3.4250176678445236e-05, |
|
"loss": 1.2519, |
|
"step": 22800 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"eval_cer": 0.4394284541081694, |
|
"eval_loss": 2.026627540588379, |
|
"eval_runtime": 383.883, |
|
"eval_samples_per_second": 24.69, |
|
"eval_steps_per_second": 3.087, |
|
"step": 22800 |
|
}, |
|
{ |
|
"epoch": 9.64, |
|
"grad_norm": 34.55485534667969, |
|
"learning_rate": 3.417950530035336e-05, |
|
"loss": 1.2678, |
|
"step": 22900 |
|
}, |
|
{ |
|
"epoch": 9.64, |
|
"eval_cer": 0.45279075906300104, |
|
"eval_loss": 1.6792824268341064, |
|
"eval_runtime": 362.9993, |
|
"eval_samples_per_second": 26.11, |
|
"eval_steps_per_second": 3.264, |
|
"step": 22900 |
|
}, |
|
{ |
|
"epoch": 9.68, |
|
"grad_norm": 8.905746459960938, |
|
"learning_rate": 3.4108833922261485e-05, |
|
"loss": 1.2559, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 9.68, |
|
"eval_cer": 0.452297035704509, |
|
"eval_loss": 1.6737189292907715, |
|
"eval_runtime": 383.7761, |
|
"eval_samples_per_second": 24.697, |
|
"eval_steps_per_second": 3.088, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 9.72, |
|
"grad_norm": 3.8121345043182373, |
|
"learning_rate": 3.4038162544169613e-05, |
|
"loss": 1.2646, |
|
"step": 23100 |
|
}, |
|
{ |
|
"epoch": 9.72, |
|
"eval_cer": 0.45129247976223064, |
|
"eval_loss": 1.4810179471969604, |
|
"eval_runtime": 361.7355, |
|
"eval_samples_per_second": 26.201, |
|
"eval_steps_per_second": 3.276, |
|
"step": 23100 |
|
}, |
|
{ |
|
"epoch": 9.77, |
|
"grad_norm": 1.6551660299301147, |
|
"learning_rate": 3.396749116607774e-05, |
|
"loss": 1.2812, |
|
"step": 23200 |
|
}, |
|
{ |
|
"epoch": 9.77, |
|
"eval_cer": 0.4401079347698565, |
|
"eval_loss": 1.4080007076263428, |
|
"eval_runtime": 387.5504, |
|
"eval_samples_per_second": 24.456, |
|
"eval_steps_per_second": 3.058, |
|
"step": 23200 |
|
}, |
|
{ |
|
"epoch": 9.81, |
|
"grad_norm": 1.3945401906967163, |
|
"learning_rate": 3.389681978798587e-05, |
|
"loss": 1.2539, |
|
"step": 23300 |
|
}, |
|
{ |
|
"epoch": 9.81, |
|
"eval_cer": 0.44230280395760824, |
|
"eval_loss": 1.8802071809768677, |
|
"eval_runtime": 376.3421, |
|
"eval_samples_per_second": 25.185, |
|
"eval_steps_per_second": 3.149, |
|
"step": 23300 |
|
}, |
|
{ |
|
"epoch": 9.85, |
|
"grad_norm": 19.793508529663086, |
|
"learning_rate": 3.382614840989399e-05, |
|
"loss": 1.9455, |
|
"step": 23400 |
|
}, |
|
{ |
|
"epoch": 9.85, |
|
"eval_cer": 0.45229947987955105, |
|
"eval_loss": 1.607908010482788, |
|
"eval_runtime": 388.4585, |
|
"eval_samples_per_second": 24.399, |
|
"eval_steps_per_second": 3.051, |
|
"step": 23400 |
|
}, |
|
{ |
|
"epoch": 9.89, |
|
"grad_norm": 25.258953094482422, |
|
"learning_rate": 3.3755477031802126e-05, |
|
"loss": 1.2352, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 9.89, |
|
"eval_cer": 0.43993439834187165, |
|
"eval_loss": 1.3818862438201904, |
|
"eval_runtime": 363.0875, |
|
"eval_samples_per_second": 26.104, |
|
"eval_steps_per_second": 3.264, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 9.93, |
|
"grad_norm": 9.951347351074219, |
|
"learning_rate": 3.368480565371025e-05, |
|
"loss": 1.5735, |
|
"step": 23600 |
|
}, |
|
{ |
|
"epoch": 9.93, |
|
"eval_cer": 0.4422074811309687, |
|
"eval_loss": 1.6464565992355347, |
|
"eval_runtime": 386.065, |
|
"eval_samples_per_second": 24.55, |
|
"eval_steps_per_second": 3.069, |
|
"step": 23600 |
|
}, |
|
{ |
|
"epoch": 9.98, |
|
"grad_norm": 4.360719203948975, |
|
"learning_rate": 3.3614134275618375e-05, |
|
"loss": 1.6236, |
|
"step": 23700 |
|
}, |
|
{ |
|
"epoch": 9.98, |
|
"eval_cer": 0.44366909780610847, |
|
"eval_loss": 1.770129919052124, |
|
"eval_runtime": 361.9008, |
|
"eval_samples_per_second": 26.189, |
|
"eval_steps_per_second": 3.274, |
|
"step": 23700 |
|
}, |
|
{ |
|
"epoch": 10.02, |
|
"grad_norm": 12.59276294708252, |
|
"learning_rate": 3.35434628975265e-05, |
|
"loss": 1.2522, |
|
"step": 23800 |
|
}, |
|
{ |
|
"epoch": 10.02, |
|
"eval_cer": 0.43885651714833207, |
|
"eval_loss": 2.233562707901001, |
|
"eval_runtime": 381.3188, |
|
"eval_samples_per_second": 24.856, |
|
"eval_steps_per_second": 3.108, |
|
"step": 23800 |
|
}, |
|
{ |
|
"epoch": 10.06, |
|
"grad_norm": 6.893917560577393, |
|
"learning_rate": 3.347279151943463e-05, |
|
"loss": 1.2052, |
|
"step": 23900 |
|
}, |
|
{ |
|
"epoch": 10.06, |
|
"eval_cer": 0.44252522388643384, |
|
"eval_loss": 2.4695940017700195, |
|
"eval_runtime": 366.61, |
|
"eval_samples_per_second": 25.853, |
|
"eval_steps_per_second": 3.232, |
|
"step": 23900 |
|
}, |
|
{ |
|
"epoch": 10.1, |
|
"grad_norm": 2.430925130844116, |
|
"learning_rate": 3.340212014134276e-05, |
|
"loss": 1.185, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 10.1, |
|
"eval_cer": 0.4357133080442689, |
|
"eval_loss": 1.5434983968734741, |
|
"eval_runtime": 385.0716, |
|
"eval_samples_per_second": 24.614, |
|
"eval_steps_per_second": 3.077, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 10.15, |
|
"grad_norm": 2.367770195007324, |
|
"learning_rate": 3.333144876325088e-05, |
|
"loss": 1.2225, |
|
"step": 24100 |
|
}, |
|
{ |
|
"epoch": 10.15, |
|
"eval_cer": 0.4268898361425052, |
|
"eval_loss": 1.602295160293579, |
|
"eval_runtime": 368.0631, |
|
"eval_samples_per_second": 25.751, |
|
"eval_steps_per_second": 3.22, |
|
"step": 24100 |
|
}, |
|
{ |
|
"epoch": 10.19, |
|
"grad_norm": 2.6913535594940186, |
|
"learning_rate": 3.3260777385159016e-05, |
|
"loss": 1.2071, |
|
"step": 24200 |
|
}, |
|
{ |
|
"epoch": 10.19, |
|
"eval_cer": 0.43098382933792184, |
|
"eval_loss": 1.9375296831130981, |
|
"eval_runtime": 388.5754, |
|
"eval_samples_per_second": 24.392, |
|
"eval_steps_per_second": 3.05, |
|
"step": 24200 |
|
}, |
|
{ |
|
"epoch": 10.23, |
|
"grad_norm": 2.8861985206604004, |
|
"learning_rate": 3.319010600706714e-05, |
|
"loss": 1.2745, |
|
"step": 24300 |
|
}, |
|
{ |
|
"epoch": 10.23, |
|
"eval_cer": 0.43681563098822884, |
|
"eval_loss": 1.8604010343551636, |
|
"eval_runtime": 365.4924, |
|
"eval_samples_per_second": 25.932, |
|
"eval_steps_per_second": 3.242, |
|
"step": 24300 |
|
}, |
|
{ |
|
"epoch": 10.27, |
|
"grad_norm": 3.54598069190979, |
|
"learning_rate": 3.3119434628975265e-05, |
|
"loss": 1.3486, |
|
"step": 24400 |
|
}, |
|
{ |
|
"epoch": 10.27, |
|
"eval_cer": 0.4357133080442689, |
|
"eval_loss": 1.3347864151000977, |
|
"eval_runtime": 380.5736, |
|
"eval_samples_per_second": 24.905, |
|
"eval_steps_per_second": 3.114, |
|
"step": 24400 |
|
}, |
|
{ |
|
"epoch": 10.31, |
|
"grad_norm": 2.1198437213897705, |
|
"learning_rate": 3.304876325088339e-05, |
|
"loss": 1.1866, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 10.31, |
|
"eval_cer": 0.4299792733956435, |
|
"eval_loss": 1.308254361152649, |
|
"eval_runtime": 368.9525, |
|
"eval_samples_per_second": 25.689, |
|
"eval_steps_per_second": 3.212, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 10.36, |
|
"grad_norm": 2.029782295227051, |
|
"learning_rate": 3.297809187279152e-05, |
|
"loss": 1.1697, |
|
"step": 24600 |
|
}, |
|
{ |
|
"epoch": 10.36, |
|
"eval_cer": 0.42697293809393455, |
|
"eval_loss": 1.4594690799713135, |
|
"eval_runtime": 389.4322, |
|
"eval_samples_per_second": 24.338, |
|
"eval_steps_per_second": 3.043, |
|
"step": 24600 |
|
}, |
|
{ |
|
"epoch": 10.4, |
|
"grad_norm": 2.0395631790161133, |
|
"learning_rate": 3.290742049469965e-05, |
|
"loss": 1.1793, |
|
"step": 24700 |
|
}, |
|
{ |
|
"epoch": 10.4, |
|
"eval_cer": 0.42983506706816316, |
|
"eval_loss": 1.475653886795044, |
|
"eval_runtime": 374.0567, |
|
"eval_samples_per_second": 25.338, |
|
"eval_steps_per_second": 3.168, |
|
"step": 24700 |
|
}, |
|
{ |
|
"epoch": 10.44, |
|
"grad_norm": 6.735607624053955, |
|
"learning_rate": 3.283674911660777e-05, |
|
"loss": 1.4708, |
|
"step": 24800 |
|
}, |
|
{ |
|
"epoch": 10.44, |
|
"eval_cer": 0.4388076336474913, |
|
"eval_loss": 1.9250705242156982, |
|
"eval_runtime": 386.263, |
|
"eval_samples_per_second": 24.538, |
|
"eval_steps_per_second": 3.068, |
|
"step": 24800 |
|
}, |
|
{ |
|
"epoch": 10.48, |
|
"grad_norm": 1.8092114925384521, |
|
"learning_rate": 3.2766077738515906e-05, |
|
"loss": 1.1854, |
|
"step": 24900 |
|
}, |
|
{ |
|
"epoch": 10.48, |
|
"eval_cer": 0.4322132493840679, |
|
"eval_loss": 1.8587048053741455, |
|
"eval_runtime": 371.4948, |
|
"eval_samples_per_second": 25.513, |
|
"eval_steps_per_second": 3.19, |
|
"step": 24900 |
|
}, |
|
{ |
|
"epoch": 10.52, |
|
"grad_norm": 3.406538724899292, |
|
"learning_rate": 3.269540636042403e-05, |
|
"loss": 1.2144, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 10.52, |
|
"eval_cer": 0.4353711235383833, |
|
"eval_loss": 1.5310605764389038, |
|
"eval_runtime": 388.4081, |
|
"eval_samples_per_second": 24.402, |
|
"eval_steps_per_second": 3.051, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 10.57, |
|
"grad_norm": 21.588300704956055, |
|
"learning_rate": 3.2624734982332155e-05, |
|
"loss": 1.1869, |
|
"step": 25100 |
|
}, |
|
{ |
|
"epoch": 10.57, |
|
"eval_cer": 0.41679294904383873, |
|
"eval_loss": 1.753686785697937, |
|
"eval_runtime": 363.8235, |
|
"eval_samples_per_second": 26.051, |
|
"eval_steps_per_second": 3.257, |
|
"step": 25100 |
|
}, |
|
{ |
|
"epoch": 10.61, |
|
"grad_norm": 2.433943033218384, |
|
"learning_rate": 3.255406360424029e-05, |
|
"loss": 1.186, |
|
"step": 25200 |
|
}, |
|
{ |
|
"epoch": 10.61, |
|
"eval_cer": 0.4293755621602597, |
|
"eval_loss": 2.9843032360076904, |
|
"eval_runtime": 383.8866, |
|
"eval_samples_per_second": 24.69, |
|
"eval_steps_per_second": 3.087, |
|
"step": 25200 |
|
}, |
|
{ |
|
"epoch": 10.65, |
|
"grad_norm": 2.10840106010437, |
|
"learning_rate": 3.248339222614841e-05, |
|
"loss": 1.2008, |
|
"step": 25300 |
|
}, |
|
{ |
|
"epoch": 10.65, |
|
"eval_cer": 0.42626657150678504, |
|
"eval_loss": 2.721179485321045, |
|
"eval_runtime": 367.0761, |
|
"eval_samples_per_second": 25.82, |
|
"eval_steps_per_second": 3.228, |
|
"step": 25300 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 71250, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 30, |
|
"save_steps": 100, |
|
"total_flos": 2.772097477056956e+20, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|