|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 50.0, |
|
"global_step": 3100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 39.687320709228516, |
|
"eval_runtime": 2.8335, |
|
"eval_samples_per_second": 43.056, |
|
"eval_steps_per_second": 5.647, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 39.516841888427734, |
|
"eval_runtime": 2.9783, |
|
"eval_samples_per_second": 40.963, |
|
"eval_steps_per_second": 5.372, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 38.932273864746094, |
|
"eval_runtime": 3.1472, |
|
"eval_samples_per_second": 38.765, |
|
"eval_steps_per_second": 5.084, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 37.797115325927734, |
|
"eval_runtime": 3.1415, |
|
"eval_samples_per_second": 38.835, |
|
"eval_steps_per_second": 5.093, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 36.89667510986328, |
|
"eval_runtime": 3.1742, |
|
"eval_samples_per_second": 38.435, |
|
"eval_steps_per_second": 5.041, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 35.38439178466797, |
|
"eval_runtime": 3.1525, |
|
"eval_samples_per_second": 38.699, |
|
"eval_steps_per_second": 5.075, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 34.643463134765625, |
|
"eval_runtime": 3.1776, |
|
"eval_samples_per_second": 38.394, |
|
"eval_steps_per_second": 5.035, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 33.6444206237793, |
|
"eval_runtime": 3.1827, |
|
"eval_samples_per_second": 38.332, |
|
"eval_steps_per_second": 5.027, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 8.06, |
|
"learning_rate": 5.870967741935484e-08, |
|
"loss": 39.4735, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 32.891536712646484, |
|
"eval_runtime": 3.2424, |
|
"eval_samples_per_second": 37.627, |
|
"eval_steps_per_second": 4.935, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 31.138931274414062, |
|
"eval_runtime": 3.1172, |
|
"eval_samples_per_second": 39.138, |
|
"eval_steps_per_second": 5.133, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_loss": 30.020915985107422, |
|
"eval_runtime": 3.1861, |
|
"eval_samples_per_second": 38.291, |
|
"eval_steps_per_second": 5.022, |
|
"step": 682 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 28.798078536987305, |
|
"eval_runtime": 3.158, |
|
"eval_samples_per_second": 38.632, |
|
"eval_steps_per_second": 5.066, |
|
"step": 744 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_loss": 27.6728458404541, |
|
"eval_runtime": 3.1725, |
|
"eval_samples_per_second": 38.456, |
|
"eval_steps_per_second": 5.043, |
|
"step": 806 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_loss": 26.61111068725586, |
|
"eval_runtime": 3.1711, |
|
"eval_samples_per_second": 38.472, |
|
"eval_steps_per_second": 5.046, |
|
"step": 868 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_loss": 25.55615234375, |
|
"eval_runtime": 3.1645, |
|
"eval_samples_per_second": 38.553, |
|
"eval_steps_per_second": 5.056, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_loss": 24.428285598754883, |
|
"eval_runtime": 3.1894, |
|
"eval_samples_per_second": 38.251, |
|
"eval_steps_per_second": 5.017, |
|
"step": 992 |
|
}, |
|
{ |
|
"epoch": 16.13, |
|
"learning_rate": 4.741935483870968e-08, |
|
"loss": 29.1909, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_loss": 23.227224349975586, |
|
"eval_runtime": 3.3011, |
|
"eval_samples_per_second": 36.957, |
|
"eval_steps_per_second": 4.847, |
|
"step": 1054 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_loss": 22.856584548950195, |
|
"eval_runtime": 3.1351, |
|
"eval_samples_per_second": 38.914, |
|
"eval_steps_per_second": 5.103, |
|
"step": 1116 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_loss": 22.174100875854492, |
|
"eval_runtime": 3.2113, |
|
"eval_samples_per_second": 37.99, |
|
"eval_steps_per_second": 4.982, |
|
"step": 1178 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_loss": 21.41921615600586, |
|
"eval_runtime": 3.1698, |
|
"eval_samples_per_second": 38.488, |
|
"eval_steps_per_second": 5.048, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"eval_loss": 20.49114227294922, |
|
"eval_runtime": 3.202, |
|
"eval_samples_per_second": 38.101, |
|
"eval_steps_per_second": 4.997, |
|
"step": 1302 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_loss": 20.245670318603516, |
|
"eval_runtime": 3.1786, |
|
"eval_samples_per_second": 38.382, |
|
"eval_steps_per_second": 5.034, |
|
"step": 1364 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"eval_loss": 19.58565902709961, |
|
"eval_runtime": 3.205, |
|
"eval_samples_per_second": 38.065, |
|
"eval_steps_per_second": 4.992, |
|
"step": 1426 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_loss": 19.287837982177734, |
|
"eval_runtime": 3.1902, |
|
"eval_samples_per_second": 38.242, |
|
"eval_steps_per_second": 5.015, |
|
"step": 1488 |
|
}, |
|
{ |
|
"epoch": 24.19, |
|
"learning_rate": 3.612903225806452e-08, |
|
"loss": 21.7917, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_loss": 18.806514739990234, |
|
"eval_runtime": 3.3778, |
|
"eval_samples_per_second": 36.118, |
|
"eval_steps_per_second": 4.737, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_loss": 18.336509704589844, |
|
"eval_runtime": 3.1436, |
|
"eval_samples_per_second": 38.81, |
|
"eval_steps_per_second": 5.09, |
|
"step": 1612 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"eval_loss": 18.16900062561035, |
|
"eval_runtime": 3.2259, |
|
"eval_samples_per_second": 37.819, |
|
"eval_steps_per_second": 4.96, |
|
"step": 1674 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_loss": 17.55986213684082, |
|
"eval_runtime": 3.1926, |
|
"eval_samples_per_second": 38.213, |
|
"eval_steps_per_second": 5.012, |
|
"step": 1736 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"eval_loss": 17.429519653320312, |
|
"eval_runtime": 3.19, |
|
"eval_samples_per_second": 38.245, |
|
"eval_steps_per_second": 5.016, |
|
"step": 1798 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_loss": 17.191831588745117, |
|
"eval_runtime": 3.2186, |
|
"eval_samples_per_second": 37.905, |
|
"eval_steps_per_second": 4.971, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"eval_loss": 16.991268157958984, |
|
"eval_runtime": 3.2004, |
|
"eval_samples_per_second": 38.12, |
|
"eval_steps_per_second": 4.999, |
|
"step": 1922 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_loss": 16.737815856933594, |
|
"eval_runtime": 3.2002, |
|
"eval_samples_per_second": 38.122, |
|
"eval_steps_per_second": 5.0, |
|
"step": 1984 |
|
}, |
|
{ |
|
"epoch": 32.26, |
|
"learning_rate": 2.483870967741936e-08, |
|
"loss": 18.0017, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"eval_loss": 16.585847854614258, |
|
"eval_runtime": 3.3866, |
|
"eval_samples_per_second": 36.024, |
|
"eval_steps_per_second": 4.725, |
|
"step": 2046 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"eval_loss": 16.41578483581543, |
|
"eval_runtime": 3.1469, |
|
"eval_samples_per_second": 38.769, |
|
"eval_steps_per_second": 5.084, |
|
"step": 2108 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_loss": 16.26023292541504, |
|
"eval_runtime": 3.2254, |
|
"eval_samples_per_second": 37.825, |
|
"eval_steps_per_second": 4.961, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"eval_loss": 16.033435821533203, |
|
"eval_runtime": 3.1846, |
|
"eval_samples_per_second": 38.309, |
|
"eval_steps_per_second": 5.024, |
|
"step": 2232 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"eval_loss": 16.080913543701172, |
|
"eval_runtime": 3.2091, |
|
"eval_samples_per_second": 38.017, |
|
"eval_steps_per_second": 4.986, |
|
"step": 2294 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"eval_loss": 15.7175931930542, |
|
"eval_runtime": 3.1978, |
|
"eval_samples_per_second": 38.151, |
|
"eval_steps_per_second": 5.003, |
|
"step": 2356 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"eval_loss": 15.824816703796387, |
|
"eval_runtime": 3.2153, |
|
"eval_samples_per_second": 37.944, |
|
"eval_steps_per_second": 4.976, |
|
"step": 2418 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_loss": 15.852823257446289, |
|
"eval_runtime": 3.1937, |
|
"eval_samples_per_second": 38.2, |
|
"eval_steps_per_second": 5.01, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 40.32, |
|
"learning_rate": 1.3548387096774193e-08, |
|
"loss": 16.124, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"eval_loss": 15.77718734741211, |
|
"eval_runtime": 3.377, |
|
"eval_samples_per_second": 36.127, |
|
"eval_steps_per_second": 4.738, |
|
"step": 2542 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"eval_loss": 15.797329902648926, |
|
"eval_runtime": 3.1524, |
|
"eval_samples_per_second": 38.701, |
|
"eval_steps_per_second": 5.075, |
|
"step": 2604 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"eval_loss": 15.45718002319336, |
|
"eval_runtime": 3.2372, |
|
"eval_samples_per_second": 37.686, |
|
"eval_steps_per_second": 4.942, |
|
"step": 2666 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"eval_loss": 15.622541427612305, |
|
"eval_runtime": 3.2026, |
|
"eval_samples_per_second": 38.094, |
|
"eval_steps_per_second": 4.996, |
|
"step": 2728 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_loss": 15.430057525634766, |
|
"eval_runtime": 3.1974, |
|
"eval_samples_per_second": 38.156, |
|
"eval_steps_per_second": 5.004, |
|
"step": 2790 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"eval_loss": 15.735359191894531, |
|
"eval_runtime": 3.2169, |
|
"eval_samples_per_second": 37.925, |
|
"eval_steps_per_second": 4.974, |
|
"step": 2852 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"eval_loss": 15.529631614685059, |
|
"eval_runtime": 3.2177, |
|
"eval_samples_per_second": 37.916, |
|
"eval_steps_per_second": 4.973, |
|
"step": 2914 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"eval_loss": 15.421342849731445, |
|
"eval_runtime": 3.225, |
|
"eval_samples_per_second": 37.829, |
|
"eval_steps_per_second": 4.961, |
|
"step": 2976 |
|
}, |
|
{ |
|
"epoch": 48.39, |
|
"learning_rate": 2.2580645161290324e-09, |
|
"loss": 15.3787, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"eval_loss": 15.512802124023438, |
|
"eval_runtime": 3.3809, |
|
"eval_samples_per_second": 36.085, |
|
"eval_steps_per_second": 4.732, |
|
"step": 3038 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_loss": 15.596056938171387, |
|
"eval_runtime": 3.151, |
|
"eval_samples_per_second": 38.718, |
|
"eval_steps_per_second": 5.078, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"step": 3100, |
|
"total_flos": 1620261687859200.0, |
|
"train_loss": 23.064505024571574, |
|
"train_runtime": 2425.9558, |
|
"train_samples_per_second": 10.12, |
|
"train_steps_per_second": 1.278 |
|
} |
|
], |
|
"max_steps": 3100, |
|
"num_train_epochs": 50, |
|
"total_flos": 1620261687859200.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|