|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9888444537992003, |
|
"eval_steps": 100, |
|
"global_step": 7100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 2.6430506706237793, |
|
"learning_rate": 9.5e-06, |
|
"loss": 5.6141, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_cer": 0.9770320871299519, |
|
"eval_loss": 2.355087995529175, |
|
"eval_runtime": 329.8418, |
|
"eval_samples_per_second": 28.735, |
|
"eval_steps_per_second": 3.593, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 2.838773012161255, |
|
"learning_rate": 1.9500000000000003e-05, |
|
"loss": 2.1498, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_cer": 0.6385162879824802, |
|
"eval_loss": 1.961960792541504, |
|
"eval_runtime": 334.0577, |
|
"eval_samples_per_second": 28.372, |
|
"eval_steps_per_second": 3.547, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 3.848980665206909, |
|
"learning_rate": 2.95e-05, |
|
"loss": 2.6461, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"eval_cer": 0.5852479371162645, |
|
"eval_loss": 2.6564223766326904, |
|
"eval_runtime": 350.7705, |
|
"eval_samples_per_second": 27.021, |
|
"eval_steps_per_second": 3.378, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 3.9299519062042236, |
|
"learning_rate": 3.9500000000000005e-05, |
|
"loss": 2.359, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"eval_cer": 0.5907913261116108, |
|
"eval_loss": 2.6195201873779297, |
|
"eval_runtime": 341.3159, |
|
"eval_samples_per_second": 27.769, |
|
"eval_steps_per_second": 3.472, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 4.236645698547363, |
|
"learning_rate": 4.9500000000000004e-05, |
|
"loss": 4.2363, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"eval_cer": 0.624139650385202, |
|
"eval_loss": 2.457566261291504, |
|
"eval_runtime": 356.3471, |
|
"eval_samples_per_second": 26.598, |
|
"eval_steps_per_second": 3.325, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 13.769584655761719, |
|
"learning_rate": 4.9932862190812725e-05, |
|
"loss": 2.6933, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_cer": 0.6483369833013961, |
|
"eval_loss": 3.4720070362091064, |
|
"eval_runtime": 345.5879, |
|
"eval_samples_per_second": 27.426, |
|
"eval_steps_per_second": 3.429, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 4.292990207672119, |
|
"learning_rate": 4.986289752650177e-05, |
|
"loss": 4.1075, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_cer": 0.6181098705564898, |
|
"eval_loss": 3.3849761486053467, |
|
"eval_runtime": 353.3198, |
|
"eval_samples_per_second": 26.826, |
|
"eval_steps_per_second": 3.354, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 4.32611083984375, |
|
"learning_rate": 4.97922261484099e-05, |
|
"loss": 2.6907, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"eval_cer": 0.6191510891243988, |
|
"eval_loss": 2.5412750244140625, |
|
"eval_runtime": 343.4967, |
|
"eval_samples_per_second": 27.593, |
|
"eval_steps_per_second": 3.45, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 3.173403263092041, |
|
"learning_rate": 4.972155477031802e-05, |
|
"loss": 4.2294, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"eval_cer": 0.5840918423213797, |
|
"eval_loss": 2.6817102432250977, |
|
"eval_runtime": 354.5341, |
|
"eval_samples_per_second": 26.734, |
|
"eval_steps_per_second": 3.342, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 4.8158440589904785, |
|
"learning_rate": 4.965088339222615e-05, |
|
"loss": 2.2949, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_cer": 0.5736649915920379, |
|
"eval_loss": 1.9513429403305054, |
|
"eval_runtime": 345.1224, |
|
"eval_samples_per_second": 27.463, |
|
"eval_steps_per_second": 3.434, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 3.0650887489318848, |
|
"learning_rate": 4.958021201413428e-05, |
|
"loss": 2.6115, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"eval_cer": 0.5645799929607759, |
|
"eval_loss": 2.8409903049468994, |
|
"eval_runtime": 352.0824, |
|
"eval_samples_per_second": 26.92, |
|
"eval_steps_per_second": 3.366, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 3.0540664196014404, |
|
"learning_rate": 4.950954063604241e-05, |
|
"loss": 2.3453, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_cer": 0.598292499315631, |
|
"eval_loss": 1.8713030815124512, |
|
"eval_runtime": 344.1368, |
|
"eval_samples_per_second": 27.541, |
|
"eval_steps_per_second": 3.443, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 4.366485118865967, |
|
"learning_rate": 4.9438869257950535e-05, |
|
"loss": 3.2578, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"eval_cer": 0.5734816784638849, |
|
"eval_loss": 2.000699758529663, |
|
"eval_runtime": 350.6902, |
|
"eval_samples_per_second": 27.027, |
|
"eval_steps_per_second": 3.379, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 81.67564392089844, |
|
"learning_rate": 4.936819787985866e-05, |
|
"loss": 1.9657, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"eval_cer": 0.6325427241797349, |
|
"eval_loss": 3.403754472732544, |
|
"eval_runtime": 340.0489, |
|
"eval_samples_per_second": 27.872, |
|
"eval_steps_per_second": 3.485, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 3.837653875350952, |
|
"learning_rate": 4.929752650176679e-05, |
|
"loss": 2.6865, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"eval_cer": 0.5574918853388604, |
|
"eval_loss": 3.5201330184936523, |
|
"eval_runtime": 351.432, |
|
"eval_samples_per_second": 26.97, |
|
"eval_steps_per_second": 3.372, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 2.9047458171844482, |
|
"learning_rate": 4.922685512367491e-05, |
|
"loss": 2.4105, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"eval_cer": 0.5886624496499941, |
|
"eval_loss": 3.5269789695739746, |
|
"eval_runtime": 347.023, |
|
"eval_samples_per_second": 27.312, |
|
"eval_steps_per_second": 3.415, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 2.9270007610321045, |
|
"learning_rate": 4.915618374558304e-05, |
|
"loss": 2.418, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_cer": 0.624877791247898, |
|
"eval_loss": 2.498126983642578, |
|
"eval_runtime": 355.9419, |
|
"eval_samples_per_second": 26.628, |
|
"eval_steps_per_second": 3.329, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 3.458078622817993, |
|
"learning_rate": 4.908551236749117e-05, |
|
"loss": 2.6118, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"eval_cer": 0.5718074185600877, |
|
"eval_loss": 3.407224655151367, |
|
"eval_runtime": 343.8365, |
|
"eval_samples_per_second": 27.565, |
|
"eval_steps_per_second": 3.446, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 1.9933278560638428, |
|
"learning_rate": 4.90148409893993e-05, |
|
"loss": 3.2046, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_cer": 0.5539844941535333, |
|
"eval_loss": 1.928993821144104, |
|
"eval_runtime": 357.2509, |
|
"eval_samples_per_second": 26.53, |
|
"eval_steps_per_second": 3.317, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 4.917328357696533, |
|
"learning_rate": 4.8944169611307425e-05, |
|
"loss": 3.1904, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_cer": 0.5568001838019632, |
|
"eval_loss": 2.729193925857544, |
|
"eval_runtime": 341.3463, |
|
"eval_samples_per_second": 27.767, |
|
"eval_steps_per_second": 3.472, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 2.842353343963623, |
|
"learning_rate": 4.8873498233215547e-05, |
|
"loss": 2.1649, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"eval_cer": 0.544757733369833, |
|
"eval_loss": 2.421653985977173, |
|
"eval_runtime": 358.7261, |
|
"eval_samples_per_second": 26.421, |
|
"eval_steps_per_second": 3.303, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 4.818225383758545, |
|
"learning_rate": 4.880282685512368e-05, |
|
"loss": 2.2775, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"eval_cer": 0.5533832270931915, |
|
"eval_loss": 2.188101291656494, |
|
"eval_runtime": 354.4215, |
|
"eval_samples_per_second": 26.742, |
|
"eval_steps_per_second": 3.343, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 2.9436004161834717, |
|
"learning_rate": 4.87321554770318e-05, |
|
"loss": 1.6308, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"eval_cer": 0.5162586523796489, |
|
"eval_loss": 3.3528623580932617, |
|
"eval_runtime": 360.9925, |
|
"eval_samples_per_second": 26.255, |
|
"eval_steps_per_second": 3.283, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"grad_norm": 7.935622692108154, |
|
"learning_rate": 4.866148409893993e-05, |
|
"loss": 2.4462, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"eval_cer": 0.575390579171718, |
|
"eval_loss": 2.0690765380859375, |
|
"eval_runtime": 343.7001, |
|
"eval_samples_per_second": 27.576, |
|
"eval_steps_per_second": 3.448, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"grad_norm": 2.40964937210083, |
|
"learning_rate": 4.859081272084806e-05, |
|
"loss": 2.3368, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"eval_cer": 0.5851746118650033, |
|
"eval_loss": 1.9030027389526367, |
|
"eval_runtime": 352.7743, |
|
"eval_samples_per_second": 26.867, |
|
"eval_steps_per_second": 3.359, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"grad_norm": 3.7618064880371094, |
|
"learning_rate": 4.852084805653711e-05, |
|
"loss": 2.4157, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"eval_cer": 0.5566339798991045, |
|
"eval_loss": 3.213930368423462, |
|
"eval_runtime": 341.3273, |
|
"eval_samples_per_second": 27.768, |
|
"eval_steps_per_second": 3.472, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"grad_norm": 9.61824893951416, |
|
"learning_rate": 4.845017667844523e-05, |
|
"loss": 2.7299, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"eval_cer": 0.5819702983848891, |
|
"eval_loss": 1.7893775701522827, |
|
"eval_runtime": 353.5497, |
|
"eval_samples_per_second": 26.808, |
|
"eval_steps_per_second": 3.352, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"grad_norm": 2.6824772357940674, |
|
"learning_rate": 4.8379505300353364e-05, |
|
"loss": 2.934, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"eval_cer": 0.5119813460560791, |
|
"eval_loss": 2.804206609725952, |
|
"eval_runtime": 340.4666, |
|
"eval_samples_per_second": 27.838, |
|
"eval_steps_per_second": 3.481, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"grad_norm": 4.691165924072266, |
|
"learning_rate": 4.8308833922261485e-05, |
|
"loss": 2.0012, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"eval_cer": 0.5138047006374409, |
|
"eval_loss": 3.412604331970215, |
|
"eval_runtime": 349.7896, |
|
"eval_samples_per_second": 27.096, |
|
"eval_steps_per_second": 3.388, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"grad_norm": 2.8883514404296875, |
|
"learning_rate": 4.823816254416961e-05, |
|
"loss": 3.5496, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_cer": 0.5417074029173673, |
|
"eval_loss": 1.7423540353775024, |
|
"eval_runtime": 339.3199, |
|
"eval_samples_per_second": 27.932, |
|
"eval_steps_per_second": 3.492, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"grad_norm": 3.9499669075012207, |
|
"learning_rate": 4.816749116607774e-05, |
|
"loss": 2.4353, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"eval_cer": 0.5479229400492746, |
|
"eval_loss": 3.4527170658111572, |
|
"eval_runtime": 354.1425, |
|
"eval_samples_per_second": 26.763, |
|
"eval_steps_per_second": 3.346, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"grad_norm": 2.9536569118499756, |
|
"learning_rate": 4.809681978798587e-05, |
|
"loss": 2.9787, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"eval_cer": 0.5677500879903015, |
|
"eval_loss": 2.6674227714538574, |
|
"eval_runtime": 344.0522, |
|
"eval_samples_per_second": 27.548, |
|
"eval_steps_per_second": 3.444, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"grad_norm": 3.094930648803711, |
|
"learning_rate": 4.8026148409894e-05, |
|
"loss": 2.2166, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"eval_cer": 0.5857514371749247, |
|
"eval_loss": 3.2282423973083496, |
|
"eval_runtime": 358.5567, |
|
"eval_samples_per_second": 26.434, |
|
"eval_steps_per_second": 3.305, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"grad_norm": 5.626856803894043, |
|
"learning_rate": 4.795547703180212e-05, |
|
"loss": 2.6222, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"eval_cer": 0.5495043213014743, |
|
"eval_loss": 1.7686785459518433, |
|
"eval_runtime": 342.362, |
|
"eval_samples_per_second": 27.684, |
|
"eval_steps_per_second": 3.461, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"grad_norm": 5.24758768081665, |
|
"learning_rate": 4.7884805653710253e-05, |
|
"loss": 2.078, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"eval_cer": 0.5179402448085723, |
|
"eval_loss": 2.244272470474243, |
|
"eval_runtime": 354.1685, |
|
"eval_samples_per_second": 26.761, |
|
"eval_steps_per_second": 3.346, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"grad_norm": 6.822329044342041, |
|
"learning_rate": 4.7814134275618375e-05, |
|
"loss": 2.7694, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"eval_cer": 0.5457598451370693, |
|
"eval_loss": 2.137202501296997, |
|
"eval_runtime": 345.4818, |
|
"eval_samples_per_second": 27.434, |
|
"eval_steps_per_second": 3.43, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"grad_norm": 2.648332357406616, |
|
"learning_rate": 4.77434628975265e-05, |
|
"loss": 1.74, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"eval_cer": 0.5568441789527199, |
|
"eval_loss": 3.1145806312561035, |
|
"eval_runtime": 362.0538, |
|
"eval_samples_per_second": 26.178, |
|
"eval_steps_per_second": 3.273, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 17.834945678710938, |
|
"learning_rate": 4.767279151943463e-05, |
|
"loss": 1.7586, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_cer": 0.5344653122678034, |
|
"eval_loss": 1.8897360563278198, |
|
"eval_runtime": 345.2891, |
|
"eval_samples_per_second": 27.449, |
|
"eval_steps_per_second": 3.432, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"grad_norm": 7.307125568389893, |
|
"learning_rate": 4.760212014134276e-05, |
|
"loss": 1.6227, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"eval_cer": 0.551083258378632, |
|
"eval_loss": 1.9913257360458374, |
|
"eval_runtime": 354.7374, |
|
"eval_samples_per_second": 26.718, |
|
"eval_steps_per_second": 3.34, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"grad_norm": 4.344304084777832, |
|
"learning_rate": 4.753144876325089e-05, |
|
"loss": 3.1757, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"eval_cer": 0.5003299636306754, |
|
"eval_loss": 1.8998807668685913, |
|
"eval_runtime": 348.4123, |
|
"eval_samples_per_second": 27.203, |
|
"eval_steps_per_second": 3.401, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"grad_norm": 2.8654837608337402, |
|
"learning_rate": 4.746077738515901e-05, |
|
"loss": 2.226, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"eval_cer": 0.5324904188338352, |
|
"eval_loss": 3.042088031768799, |
|
"eval_runtime": 354.1261, |
|
"eval_samples_per_second": 26.764, |
|
"eval_steps_per_second": 3.346, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"grad_norm": 7.595367908477783, |
|
"learning_rate": 4.7390106007067143e-05, |
|
"loss": 2.7925, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"eval_cer": 0.5470479253842243, |
|
"eval_loss": 3.0831844806671143, |
|
"eval_runtime": 344.2619, |
|
"eval_samples_per_second": 27.531, |
|
"eval_steps_per_second": 3.442, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"grad_norm": 7.425869941711426, |
|
"learning_rate": 4.7319434628975265e-05, |
|
"loss": 3.2591, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"eval_cer": 0.5522955691994837, |
|
"eval_loss": 1.7015308141708374, |
|
"eval_runtime": 353.5748, |
|
"eval_samples_per_second": 26.806, |
|
"eval_steps_per_second": 3.351, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"grad_norm": 2.405174493789673, |
|
"learning_rate": 4.724876325088339e-05, |
|
"loss": 2.373, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"eval_cer": 0.5220977865550819, |
|
"eval_loss": 2.095752477645874, |
|
"eval_runtime": 344.3236, |
|
"eval_samples_per_second": 27.526, |
|
"eval_steps_per_second": 3.442, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"grad_norm": 3.823211908340454, |
|
"learning_rate": 4.717809187279153e-05, |
|
"loss": 2.3919, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"eval_cer": 0.5331259043447656, |
|
"eval_loss": 2.41994571685791, |
|
"eval_runtime": 353.9416, |
|
"eval_samples_per_second": 26.778, |
|
"eval_steps_per_second": 3.348, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"grad_norm": 2.0702412128448486, |
|
"learning_rate": 4.710742049469965e-05, |
|
"loss": 2.4842, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"eval_cer": 0.5329523679167807, |
|
"eval_loss": 3.652872085571289, |
|
"eval_runtime": 339.8921, |
|
"eval_samples_per_second": 27.885, |
|
"eval_steps_per_second": 3.486, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"grad_norm": 3.5580544471740723, |
|
"learning_rate": 4.703745583038869e-05, |
|
"loss": 3.577, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"eval_cer": 0.5378284971256502, |
|
"eval_loss": 2.777487277984619, |
|
"eval_runtime": 356.1415, |
|
"eval_samples_per_second": 26.613, |
|
"eval_steps_per_second": 3.327, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"grad_norm": 2.3930351734161377, |
|
"learning_rate": 4.6966784452296826e-05, |
|
"loss": 2.733, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"eval_cer": 0.5305424113253294, |
|
"eval_loss": 3.2367630004882812, |
|
"eval_runtime": 346.0446, |
|
"eval_samples_per_second": 27.39, |
|
"eval_steps_per_second": 3.424, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"grad_norm": 4.198298931121826, |
|
"learning_rate": 4.6896113074204954e-05, |
|
"loss": 1.616, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"eval_cer": 0.523026573071057, |
|
"eval_loss": 2.150926351547241, |
|
"eval_runtime": 371.9418, |
|
"eval_samples_per_second": 25.482, |
|
"eval_steps_per_second": 3.186, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"grad_norm": 3.6696269512176514, |
|
"learning_rate": 4.6825441696113075e-05, |
|
"loss": 1.8252, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"eval_cer": 0.5257933792186461, |
|
"eval_loss": 3.1695752143859863, |
|
"eval_runtime": 351.2533, |
|
"eval_samples_per_second": 26.983, |
|
"eval_steps_per_second": 3.374, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"grad_norm": 5.002595901489258, |
|
"learning_rate": 4.67547703180212e-05, |
|
"loss": 2.0594, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"eval_cer": 0.5161682179030933, |
|
"eval_loss": 2.4667067527770996, |
|
"eval_runtime": 377.3711, |
|
"eval_samples_per_second": 25.116, |
|
"eval_steps_per_second": 3.14, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"grad_norm": 3.2300162315368652, |
|
"learning_rate": 4.668409893992933e-05, |
|
"loss": 1.8512, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"eval_cer": 0.5493185639982793, |
|
"eval_loss": 1.743632435798645, |
|
"eval_runtime": 361.1881, |
|
"eval_samples_per_second": 26.241, |
|
"eval_steps_per_second": 3.281, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"grad_norm": 2.109961986541748, |
|
"learning_rate": 4.661342756183746e-05, |
|
"loss": 2.4585, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"eval_cer": 0.5222273278323101, |
|
"eval_loss": 1.6703613996505737, |
|
"eval_runtime": 374.7053, |
|
"eval_samples_per_second": 25.295, |
|
"eval_steps_per_second": 3.162, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"grad_norm": 3.7831201553344727, |
|
"learning_rate": 4.654275618374558e-05, |
|
"loss": 1.6107, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"eval_cer": 0.5470601462594346, |
|
"eval_loss": 2.0870959758758545, |
|
"eval_runtime": 367.5169, |
|
"eval_samples_per_second": 25.789, |
|
"eval_steps_per_second": 3.224, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"grad_norm": 12.008744239807129, |
|
"learning_rate": 4.6472084805653715e-05, |
|
"loss": 2.3883, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"eval_cer": 0.5516771929138478, |
|
"eval_loss": 3.582747459411621, |
|
"eval_runtime": 376.938, |
|
"eval_samples_per_second": 25.145, |
|
"eval_steps_per_second": 3.144, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"grad_norm": 2.4807026386260986, |
|
"learning_rate": 4.6401413427561844e-05, |
|
"loss": 2.1991, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"eval_cer": 0.5373152203668218, |
|
"eval_loss": 3.5865066051483154, |
|
"eval_runtime": 360.1614, |
|
"eval_samples_per_second": 26.316, |
|
"eval_steps_per_second": 3.29, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 6.418155670166016, |
|
"learning_rate": 4.6330742049469965e-05, |
|
"loss": 2.3905, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"eval_cer": 0.527748719252278, |
|
"eval_loss": 2.6406588554382324, |
|
"eval_runtime": 379.816, |
|
"eval_samples_per_second": 24.954, |
|
"eval_steps_per_second": 3.12, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"grad_norm": 2.4806301593780518, |
|
"learning_rate": 4.626007067137809e-05, |
|
"loss": 3.3218, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"eval_cer": 0.5217727112744907, |
|
"eval_loss": 1.8109639883041382, |
|
"eval_runtime": 351.8213, |
|
"eval_samples_per_second": 26.94, |
|
"eval_steps_per_second": 3.368, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"grad_norm": 2.4776511192321777, |
|
"learning_rate": 4.618939929328622e-05, |
|
"loss": 2.3876, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"eval_cer": 0.5089310156036134, |
|
"eval_loss": 3.134875774383545, |
|
"eval_runtime": 372.2436, |
|
"eval_samples_per_second": 25.462, |
|
"eval_steps_per_second": 3.183, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"grad_norm": 9.97956657409668, |
|
"learning_rate": 4.611872791519435e-05, |
|
"loss": 4.1527, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"eval_cer": 0.4981815337687224, |
|
"eval_loss": 1.7425427436828613, |
|
"eval_runtime": 363.658, |
|
"eval_samples_per_second": 26.063, |
|
"eval_steps_per_second": 3.259, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"grad_norm": 3.353976249694824, |
|
"learning_rate": 4.604805653710248e-05, |
|
"loss": 1.7398, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"eval_cer": 0.5255465175394001, |
|
"eval_loss": 2.6437551975250244, |
|
"eval_runtime": 380.4559, |
|
"eval_samples_per_second": 24.912, |
|
"eval_steps_per_second": 3.115, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"grad_norm": 2.671917676925659, |
|
"learning_rate": 4.5978091872791526e-05, |
|
"loss": 2.2359, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"eval_cer": 0.537452094169176, |
|
"eval_loss": 3.4509007930755615, |
|
"eval_runtime": 364.3511, |
|
"eval_samples_per_second": 26.013, |
|
"eval_steps_per_second": 3.252, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"grad_norm": 10.607972145080566, |
|
"learning_rate": 4.590742049469965e-05, |
|
"loss": 3.6419, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"eval_cer": 0.5374105431934614, |
|
"eval_loss": 2.2622523307800293, |
|
"eval_runtime": 370.8325, |
|
"eval_samples_per_second": 25.559, |
|
"eval_steps_per_second": 3.196, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"grad_norm": 4.678300857543945, |
|
"learning_rate": 4.5836749116607775e-05, |
|
"loss": 1.7309, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"eval_cer": 0.5418393883696375, |
|
"eval_loss": 1.8007577657699585, |
|
"eval_runtime": 357.8741, |
|
"eval_samples_per_second": 26.484, |
|
"eval_steps_per_second": 3.311, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"grad_norm": 3.6259689331054688, |
|
"learning_rate": 4.57660777385159e-05, |
|
"loss": 2.784, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"eval_cer": 0.5228799225685347, |
|
"eval_loss": 2.9312586784362793, |
|
"eval_runtime": 371.6433, |
|
"eval_samples_per_second": 25.503, |
|
"eval_steps_per_second": 3.189, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"grad_norm": 1.8296568393707275, |
|
"learning_rate": 4.569540636042403e-05, |
|
"loss": 2.4269, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"eval_cer": 0.5211445582886864, |
|
"eval_loss": 3.522822618484497, |
|
"eval_runtime": 359.0335, |
|
"eval_samples_per_second": 26.399, |
|
"eval_steps_per_second": 3.301, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"grad_norm": 23.777395248413086, |
|
"learning_rate": 4.562473498233216e-05, |
|
"loss": 1.5294, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"eval_cer": 0.5097864768683275, |
|
"eval_loss": 2.758403778076172, |
|
"eval_runtime": 375.8984, |
|
"eval_samples_per_second": 25.214, |
|
"eval_steps_per_second": 3.152, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"grad_norm": 5.520616054534912, |
|
"learning_rate": 4.555406360424028e-05, |
|
"loss": 1.6177, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"eval_cer": 0.5417905048687967, |
|
"eval_loss": 2.8678946495056152, |
|
"eval_runtime": 360.269, |
|
"eval_samples_per_second": 26.308, |
|
"eval_steps_per_second": 3.289, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"grad_norm": 12.704540252685547, |
|
"learning_rate": 4.5483392226148416e-05, |
|
"loss": 2.6892, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"eval_cer": 0.5306426225020531, |
|
"eval_loss": 2.7854502201080322, |
|
"eval_runtime": 372.5558, |
|
"eval_samples_per_second": 25.44, |
|
"eval_steps_per_second": 3.181, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"grad_norm": 3.1302738189697266, |
|
"learning_rate": 4.541272084805654e-05, |
|
"loss": 3.1467, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"eval_cer": 0.5148948027061906, |
|
"eval_loss": 2.9890220165252686, |
|
"eval_runtime": 359.2287, |
|
"eval_samples_per_second": 26.384, |
|
"eval_steps_per_second": 3.299, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"grad_norm": 1.750848412513733, |
|
"learning_rate": 4.5342049469964665e-05, |
|
"loss": 2.1825, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"eval_cer": 0.5565606546478432, |
|
"eval_loss": 3.195390224456787, |
|
"eval_runtime": 366.6828, |
|
"eval_samples_per_second": 25.848, |
|
"eval_steps_per_second": 3.232, |
|
"step": 7100 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 71250, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 30, |
|
"save_steps": 100, |
|
"total_flos": 7.774285790233235e+19, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|