|
{ |
|
"best_metric": 2.4751908779144287, |
|
"best_model_checkpoint": "output/6ix9ine/checkpoint-205", |
|
"epoch": 5.0, |
|
"global_step": 205, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.742060843007589e-06, |
|
"loss": 3.316, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.8312641592473922e-05, |
|
"loss": 3.5414, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 3.88355754965355e-05, |
|
"loss": 3.3994, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 6.347351557997133e-05, |
|
"loss": 3.2221, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 8.882020496458803e-05, |
|
"loss": 3.2784, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00011137140040750911, |
|
"loss": 3.376, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00012800934269961248, |
|
"loss": 3.0065, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0001364337946790438, |
|
"loss": 3.118, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 3.159865617752075, |
|
"eval_runtime": 2.6213, |
|
"eval_samples_per_second": 20.982, |
|
"eval_steps_per_second": 2.67, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.0001354800547756731, |
|
"loss": 3.1208, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00012527997991807724, |
|
"loss": 2.8559, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00010724375598316453, |
|
"loss": 2.9927, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 8.386493606940326e-05, |
|
"loss": 2.9799, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 5.8375700540314535e-05, |
|
"loss": 2.9474, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 3.4300000000000054e-05, |
|
"loss": 2.8929, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 1.496636030269317e-05, |
|
"loss": 2.821, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 3.04770552307078e-06, |
|
"loss": 2.7814, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 3.0470073223114014, |
|
"eval_runtime": 2.6251, |
|
"eval_samples_per_second": 20.952, |
|
"eval_steps_per_second": 2.667, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 1.9181951337103552e-07, |
|
"loss": 2.7319, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 6.793535661894039e-06, |
|
"loss": 2.7967, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 2.194015018891494e-05, |
|
"loss": 2.9303, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 4.3537605728465236e-05, |
|
"loss": 2.6571, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 6.859999999999997e-05, |
|
"loss": 2.816, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 9.36623942715347e-05, |
|
"loss": 2.8496, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 0.00011525984981108502, |
|
"loss": 2.9189, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 0.00013040646433810595, |
|
"loss": 2.8699, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 0.00013700818048662894, |
|
"loss": 2.5821, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 2.900149345397949, |
|
"eval_runtime": 2.5511, |
|
"eval_samples_per_second": 20.775, |
|
"eval_steps_per_second": 2.744, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 0.00013415229447692926, |
|
"loss": 2.6428, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 0.00012223363969730686, |
|
"loss": 2.6782, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 0.00010290000000000009, |
|
"loss": 2.6065, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 7.882429945968553e-05, |
|
"loss": 2.5807, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 5.333506393059687e-05, |
|
"loss": 2.5988, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 2.9956244016835525e-05, |
|
"loss": 2.5281, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 1.1920020081922795e-05, |
|
"loss": 2.6676, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 1.7199452243269073e-06, |
|
"loss": 2.6813, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 2.566208600997925, |
|
"eval_runtime": 2.6398, |
|
"eval_samples_per_second": 20.835, |
|
"eval_steps_per_second": 2.652, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 9.63352496067802e-06, |
|
"loss": 2.2438, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 2.7012936807688713e-05, |
|
"loss": 2.4722, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 5.042226894930893e-05, |
|
"loss": 2.5159, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 7.646728298233023e-05, |
|
"loss": 2.2916, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 0.00010137157954994105, |
|
"loss": 2.3591, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 0.00012152415773578518, |
|
"loss": 2.7456, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 0.0001340029924949682, |
|
"loss": 2.4208, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 0.0001369987139612046, |
|
"loss": 2.3052, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 2.4751908779144287, |
|
"eval_runtime": 3.015, |
|
"eval_samples_per_second": 21.227, |
|
"eval_steps_per_second": 2.653, |
|
"step": 205 |
|
} |
|
], |
|
"max_steps": 205, |
|
"num_train_epochs": 5, |
|
"total_flos": 210470731776000.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|