|
{ |
|
"best_metric": 1.4045040607452393, |
|
"best_model_checkpoint": "/content/drive/MyDrive/Graduation_Models/Summary_Model/peftmodel/output/checkpoint-9210", |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 9210, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.728555917480999e-05, |
|
"loss": 1.5128, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.457111834961998e-05, |
|
"loss": 1.4734, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 4.185667752442997e-05, |
|
"loss": 1.4822, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_gen_len": 17.18192918192918, |
|
"eval_loss": 1.412906527519226, |
|
"eval_rouge1": 46.4948, |
|
"eval_rouge2": 22.3122, |
|
"eval_rougeL": 38.6849, |
|
"eval_rougeLsum": 42.8536, |
|
"eval_runtime": 136.9551, |
|
"eval_samples_per_second": 5.98, |
|
"eval_steps_per_second": 0.752, |
|
"step": 1842 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 3.914223669923996e-05, |
|
"loss": 1.4543, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 3.642779587404995e-05, |
|
"loss": 1.4473, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 3.3713355048859935e-05, |
|
"loss": 1.4735, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 3.099891422366993e-05, |
|
"loss": 1.4562, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_gen_len": 17.29059829059829, |
|
"eval_loss": 1.4075654745101929, |
|
"eval_rouge1": 46.4026, |
|
"eval_rouge2": 22.2865, |
|
"eval_rougeL": 38.4756, |
|
"eval_rougeLsum": 42.5601, |
|
"eval_runtime": 130.4435, |
|
"eval_samples_per_second": 6.279, |
|
"eval_steps_per_second": 0.79, |
|
"step": 3684 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 2.8284473398479917e-05, |
|
"loss": 1.4496, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 2.5570032573289905e-05, |
|
"loss": 1.4404, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 2.2855591748099893e-05, |
|
"loss": 1.4567, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 2.0141150922909884e-05, |
|
"loss": 1.4513, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_gen_len": 17.056166056166056, |
|
"eval_loss": 1.4049729108810425, |
|
"eval_rouge1": 46.7241, |
|
"eval_rouge2": 22.6258, |
|
"eval_rougeL": 38.9944, |
|
"eval_rougeLsum": 42.8509, |
|
"eval_runtime": 127.7298, |
|
"eval_samples_per_second": 6.412, |
|
"eval_steps_per_second": 0.806, |
|
"step": 5526 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 1.742671009771987e-05, |
|
"loss": 1.4434, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 1.471226927252986e-05, |
|
"loss": 1.4436, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 1.1997828447339848e-05, |
|
"loss": 1.4416, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_gen_len": 17.194139194139193, |
|
"eval_loss": 1.4050428867340088, |
|
"eval_rouge1": 46.5545, |
|
"eval_rouge2": 22.5187, |
|
"eval_rougeL": 38.8127, |
|
"eval_rougeLsum": 42.7317, |
|
"eval_runtime": 127.6425, |
|
"eval_samples_per_second": 6.416, |
|
"eval_steps_per_second": 0.807, |
|
"step": 7368 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 9.283387622149838e-06, |
|
"loss": 1.4425, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 6.568946796959827e-06, |
|
"loss": 1.44, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 3.854505971769816e-06, |
|
"loss": 1.4523, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 1.1400651465798045e-06, |
|
"loss": 1.4356, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_gen_len": 17.158730158730158, |
|
"eval_loss": 1.4045040607452393, |
|
"eval_rouge1": 46.5502, |
|
"eval_rouge2": 22.5448, |
|
"eval_rougeL": 38.7975, |
|
"eval_rougeLsum": 42.6867, |
|
"eval_runtime": 128.8557, |
|
"eval_samples_per_second": 6.356, |
|
"eval_steps_per_second": 0.799, |
|
"step": 9210 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 9210, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"total_flos": 5.124003128672256e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|