|
{ |
|
"best_metric": 2.3717236518859863, |
|
"best_model_checkpoint": "output/queen/checkpoint-136", |
|
"epoch": 2.0, |
|
"global_step": 136, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00013537785052914354, |
|
"loss": 3.396, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0001300082017869573, |
|
"loss": 3.2987, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0001213763104094255, |
|
"loss": 2.8576, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000109940736055617, |
|
"loss": 2.9968, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 9.630898093421199e-05, |
|
"loss": 2.9324, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 8.120521692221673e-05, |
|
"loss": 2.9931, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 6.543181473690228e-05, |
|
"loss": 2.8919, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 4.982671888105512e-05, |
|
"loss": 2.7977, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 3.5218932770313667e-05, |
|
"loss": 2.9689, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 2.2384478845846175e-05, |
|
"loss": 2.756, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.200517324255647e-05, |
|
"loss": 2.8716, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 4.6324050628611986e-06, |
|
"loss": 2.9236, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 6.578444419609313e-07, |
|
"loss": 2.8184, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.78603196144104, |
|
"eval_runtime": 4.3884, |
|
"eval_samples_per_second": 21.192, |
|
"eval_steps_per_second": 2.734, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 2.926355061606279e-07, |
|
"loss": 2.8201, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 3.55617957525975e-06, |
|
"loss": 2.7876, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 1.0275104488948473e-05, |
|
"loss": 2.8738, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 2.009247481060283e-05, |
|
"loss": 2.6439, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 3.2486753626613365e-05, |
|
"loss": 2.5993, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 4.679950861668151e-05, |
|
"loss": 2.7072, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 6.227039054081748e-05, |
|
"loss": 2.7465, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 7.807752594969911e-05, |
|
"loss": 2.5926, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 9.338117830043867e-05, |
|
"loss": 2.7178, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00010736835802938978, |
|
"loss": 2.7027, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00011929601172133719, |
|
"loss": 2.4457, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.00012853049599834097, |
|
"loss": 2.6806, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00013458123912165538, |
|
"loss": 2.4508, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.00013712680207144277, |
|
"loss": 2.6492, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.3717236518859863, |
|
"eval_runtime": 4.3106, |
|
"eval_samples_per_second": 22.039, |
|
"eval_steps_per_second": 2.784, |
|
"step": 136 |
|
} |
|
], |
|
"max_steps": 136, |
|
"num_train_epochs": 2, |
|
"total_flos": 141097697280000.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|