Llama-3.1-8B-Instruct-SFT-100 / trainer_state.json
chchen's picture
End of training
0b5db6a verified
{
"best_metric": 1.191369891166687,
"best_model_checkpoint": "saves/Llama-3.1-8B-Instruct/lora/sft-100/checkpoint-50",
"epoch": 8.88888888888889,
"eval_steps": 50,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.7777777777777777,
"grad_norm": 6.377140522003174,
"learning_rate": 4.849231551964771e-06,
"loss": 1.6881,
"step": 10
},
{
"epoch": 3.5555555555555554,
"grad_norm": 5.378269195556641,
"learning_rate": 3.7500000000000005e-06,
"loss": 1.4551,
"step": 20
},
{
"epoch": 5.333333333333333,
"grad_norm": 3.855208158493042,
"learning_rate": 2.0658795558326745e-06,
"loss": 1.1462,
"step": 30
},
{
"epoch": 7.111111111111111,
"grad_norm": 3.9790968894958496,
"learning_rate": 5.848888922025553e-07,
"loss": 1.0317,
"step": 40
},
{
"epoch": 8.88888888888889,
"grad_norm": 3.3717496395111084,
"learning_rate": 6.089874350439507e-09,
"loss": 1.0038,
"step": 50
},
{
"epoch": 8.88888888888889,
"eval_loss": 1.191369891166687,
"eval_runtime": 0.2516,
"eval_samples_per_second": 39.753,
"eval_steps_per_second": 19.877,
"step": 50
},
{
"epoch": 8.88888888888889,
"step": 50,
"total_flos": 4594288692953088.0,
"train_loss": 1.2649920654296876,
"train_runtime": 69.6409,
"train_samples_per_second": 12.923,
"train_steps_per_second": 0.718
}
],
"logging_steps": 10,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4594288692953088.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}