|
{ |
|
"best_metric": 1.4430099725723267, |
|
"best_model_checkpoint": "./zhko_mbartLarge_19p_run3/checkpoint-6250", |
|
"epoch": 3.589375448671931, |
|
"eval_steps": 1250, |
|
"global_step": 10000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.1666666666666665e-05, |
|
"loss": 2.5039, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 2.333333333333333e-05, |
|
"loss": 1.957, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"eval_bleu": 12.1283, |
|
"eval_gen_len": 15.4356, |
|
"eval_loss": 1.7572811841964722, |
|
"eval_runtime": 683.8515, |
|
"eval_samples_per_second": 16.293, |
|
"eval_steps_per_second": 1.019, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 3.5e-05, |
|
"loss": 1.815, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 3.4336115326251896e-05, |
|
"loss": 1.7411, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 3.367223065250379e-05, |
|
"loss": 1.6568, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"eval_bleu": 14.3528, |
|
"eval_gen_len": 14.9632, |
|
"eval_loss": 1.5792840719223022, |
|
"eval_runtime": 663.5666, |
|
"eval_samples_per_second": 16.791, |
|
"eval_steps_per_second": 1.05, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 3.300834597875569e-05, |
|
"loss": 1.5955, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 3.234446130500758e-05, |
|
"loss": 1.4964, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"eval_bleu": 16.0215, |
|
"eval_gen_len": 14.9365, |
|
"eval_loss": 1.4805573225021362, |
|
"eval_runtime": 662.6976, |
|
"eval_samples_per_second": 16.813, |
|
"eval_steps_per_second": 1.052, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 3.168057663125948e-05, |
|
"loss": 1.3799, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 3.101669195751138e-05, |
|
"loss": 1.2679, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 3.0352807283763275e-05, |
|
"loss": 1.2204, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"eval_bleu": 16.5619, |
|
"eval_gen_len": 14.8621, |
|
"eval_loss": 1.4681929349899292, |
|
"eval_runtime": 647.0579, |
|
"eval_samples_per_second": 17.219, |
|
"eval_steps_per_second": 1.077, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 2.968892261001517e-05, |
|
"loss": 1.2044, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 2.9025037936267067e-05, |
|
"loss": 1.1678, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"eval_bleu": 16.9926, |
|
"eval_gen_len": 14.7721, |
|
"eval_loss": 1.4430099725723267, |
|
"eval_runtime": 652.3055, |
|
"eval_samples_per_second": 17.081, |
|
"eval_steps_per_second": 1.069, |
|
"step": 6250 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 2.8361153262518967e-05, |
|
"loss": 1.0948, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 2.7697268588770863e-05, |
|
"loss": 1.004, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 2.7033383915022762e-05, |
|
"loss": 0.9448, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"eval_bleu": 17.0327, |
|
"eval_gen_len": 14.5641, |
|
"eval_loss": 1.4615132808685303, |
|
"eval_runtime": 639.6142, |
|
"eval_samples_per_second": 17.42, |
|
"eval_steps_per_second": 1.09, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 2.6369499241274658e-05, |
|
"loss": 0.9289, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 2.570561456752655e-05, |
|
"loss": 0.9083, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"eval_bleu": 17.863, |
|
"eval_gen_len": 14.6946, |
|
"eval_loss": 1.4744235277175903, |
|
"eval_runtime": 639.363, |
|
"eval_samples_per_second": 17.427, |
|
"eval_steps_per_second": 1.09, |
|
"step": 8750 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 2.504172989377845e-05, |
|
"loss": 0.8756, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 2.4377845220030346e-05, |
|
"loss": 0.8075, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 2.3713960546282245e-05, |
|
"loss": 0.7389, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"eval_bleu": 17.3256, |
|
"eval_gen_len": 14.7824, |
|
"eval_loss": 1.512853741645813, |
|
"eval_runtime": 646.652, |
|
"eval_samples_per_second": 17.23, |
|
"eval_steps_per_second": 1.078, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"step": 10000, |
|
"total_flos": 6.9348152573952e+17, |
|
"train_loss": 1.3154443878173827, |
|
"train_runtime": 22326.8302, |
|
"train_samples_per_second": 39.928, |
|
"train_steps_per_second": 1.248 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 27860, |
|
"num_train_epochs": 10, |
|
"save_steps": 1250, |
|
"total_flos": 6.9348152573952e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|