|
{ |
|
"best_metric": 1.6186352968215942, |
|
"best_model_checkpoint": "saves/ChineseLLaMA2-7B-Chat/lora/2023-09-07-12-02-29/checkpoint-100", |
|
"epoch": 0.1083717149823896, |
|
"eval_steps": 100, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0009999919374161553, |
|
"loss": 2.0025, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0009999677499246417, |
|
"loss": 1.7737, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009999274383055143, |
|
"loss": 1.7391, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009998710038588363, |
|
"loss": 1.7959, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009997984484046375, |
|
"loss": 1.713, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009997097742828556, |
|
"loss": 1.6441, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009996049843532607, |
|
"loss": 1.704, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009994840819953633, |
|
"loss": 1.6532, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009993470711083048, |
|
"loss": 1.6791, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009991939561107325, |
|
"loss": 1.6465, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000999024741940656, |
|
"loss": 1.6511, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009988394340552898, |
|
"loss": 1.6727, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009986380384308746, |
|
"loss": 1.6653, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009984205615624873, |
|
"loss": 1.6339, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009981870104638294, |
|
"loss": 1.5562, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009979373926670028, |
|
"loss": 1.6291, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009976717162222645, |
|
"loss": 1.625, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009973899896977695, |
|
"loss": 1.6008, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.000997092222179292, |
|
"loss": 1.6821, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0009967784232699352, |
|
"loss": 1.582, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"eval_loss": 1.6186352968215942, |
|
"eval_runtime": 10.6735, |
|
"eval_samples_per_second": 14.054, |
|
"eval_steps_per_second": 1.78, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 2766, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 3.29660416131072e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|