|
{ |
|
"best_metric": 0.8761239051818848, |
|
"best_model_checkpoint": "./tst-translation-output/checkpoint-3000", |
|
"epoch": 1.8217701533323214, |
|
"eval_steps": 1500, |
|
"global_step": 3000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 5e-05, |
|
"loss": 1.562, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.956224829276834e-05, |
|
"loss": 1.118, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 4.912449658553668e-05, |
|
"loss": 1.014, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"eval_bleu": 5.9697, |
|
"eval_gen_len": 18.2307, |
|
"eval_loss": 0.9561266303062439, |
|
"eval_runtime": 1131.8043, |
|
"eval_samples_per_second": 11.638, |
|
"eval_steps_per_second": 1.455, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 4.868674487830503e-05, |
|
"loss": 0.8557, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 4.824899317107337e-05, |
|
"loss": 0.7997, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 4.781124146384171e-05, |
|
"loss": 0.7888, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"eval_bleu": 10.9388, |
|
"eval_gen_len": 17.4752, |
|
"eval_loss": 0.8761239051818848, |
|
"eval_runtime": 1004.0048, |
|
"eval_samples_per_second": 13.119, |
|
"eval_steps_per_second": 1.64, |
|
"step": 3000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 57610, |
|
"num_train_epochs": 35, |
|
"save_steps": 1500, |
|
"total_flos": 4.16088915443712e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|