|
{ |
|
"best_metric": 1.146978735923767, |
|
"best_model_checkpoint": "./jako_mbartLarge_6p/checkpoint-4000", |
|
"epoch": 3.838771593090211, |
|
"eval_steps": 1000, |
|
"global_step": 8000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.951314508276534e-05, |
|
"loss": 1.88, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.829600778967868e-05, |
|
"loss": 1.4498, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"eval_bleu": 16.7947, |
|
"eval_gen_len": 20.0778, |
|
"eval_loss": 1.3302855491638184, |
|
"eval_runtime": 299.3898, |
|
"eval_samples_per_second": 13.915, |
|
"eval_steps_per_second": 0.872, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 4.707887049659202e-05, |
|
"loss": 1.3412, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 4.5861733203505356e-05, |
|
"loss": 1.263, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"eval_bleu": 18.6214, |
|
"eval_gen_len": 20.6673, |
|
"eval_loss": 1.1883659362792969, |
|
"eval_runtime": 336.2683, |
|
"eval_samples_per_second": 12.389, |
|
"eval_steps_per_second": 0.776, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 4.4644595910418695e-05, |
|
"loss": 1.0852, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 4.342745861733204e-05, |
|
"loss": 0.8999, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"eval_bleu": 18.9014, |
|
"eval_gen_len": 19.4064, |
|
"eval_loss": 1.1701579093933105, |
|
"eval_runtime": 290.3573, |
|
"eval_samples_per_second": 14.348, |
|
"eval_steps_per_second": 0.899, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 4.221032132424538e-05, |
|
"loss": 0.8646, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 4.099318403115872e-05, |
|
"loss": 0.827, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"eval_bleu": 21.2511, |
|
"eval_gen_len": 19.566, |
|
"eval_loss": 1.146978735923767, |
|
"eval_runtime": 287.6889, |
|
"eval_samples_per_second": 14.481, |
|
"eval_steps_per_second": 0.907, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 3.977604673807206e-05, |
|
"loss": 0.7493, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 3.85589094449854e-05, |
|
"loss": 0.5948, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"eval_bleu": 20.5498, |
|
"eval_gen_len": 19.3941, |
|
"eval_loss": 1.2114742994308472, |
|
"eval_runtime": 292.2098, |
|
"eval_samples_per_second": 14.257, |
|
"eval_steps_per_second": 0.893, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 3.7341772151898736e-05, |
|
"loss": 0.5631, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 3.6124634858812075e-05, |
|
"loss": 0.5404, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"eval_bleu": 21.2187, |
|
"eval_gen_len": 19.4357, |
|
"eval_loss": 1.2166906595230103, |
|
"eval_runtime": 288.5479, |
|
"eval_samples_per_second": 14.438, |
|
"eval_steps_per_second": 0.905, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 3.4907497565725414e-05, |
|
"loss": 0.5082, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 3.369036027263875e-05, |
|
"loss": 0.3842, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"eval_bleu": 20.7732, |
|
"eval_gen_len": 19.3493, |
|
"eval_loss": 1.271054983139038, |
|
"eval_runtime": 289.0747, |
|
"eval_samples_per_second": 14.411, |
|
"eval_steps_per_second": 0.903, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 3.247322297955209e-05, |
|
"loss": 0.3553, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 3.125608568646544e-05, |
|
"loss": 0.3419, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"eval_bleu": 20.7001, |
|
"eval_gen_len": 19.3106, |
|
"eval_loss": 1.290993571281433, |
|
"eval_runtime": 283.9873, |
|
"eval_samples_per_second": 14.67, |
|
"eval_steps_per_second": 0.919, |
|
"step": 8000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 20840, |
|
"num_train_epochs": 10, |
|
"save_steps": 1000, |
|
"total_flos": 2.77392610295808e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|