stackexchange_codegolf / trainer_state.json
sedrickkeh's picture
End of training
8fe0eed verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9925187032418954,
"eval_steps": 500,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.19950124688279303,
"grad_norm": 4.623498128431036,
"learning_rate": 5e-06,
"loss": 0.8013,
"step": 10
},
{
"epoch": 0.39900249376558605,
"grad_norm": 2.1821805178609672,
"learning_rate": 5e-06,
"loss": 0.6798,
"step": 20
},
{
"epoch": 0.5985037406483791,
"grad_norm": 4.415531229785867,
"learning_rate": 5e-06,
"loss": 0.6411,
"step": 30
},
{
"epoch": 0.7980049875311721,
"grad_norm": 6.723453799394174,
"learning_rate": 5e-06,
"loss": 0.6274,
"step": 40
},
{
"epoch": 0.9975062344139651,
"grad_norm": 1.067405523386397,
"learning_rate": 5e-06,
"loss": 0.6123,
"step": 50
},
{
"epoch": 0.9975062344139651,
"eval_loss": 0.6066042184829712,
"eval_runtime": 54.2342,
"eval_samples_per_second": 24.892,
"eval_steps_per_second": 0.406,
"step": 50
},
{
"epoch": 1.1970074812967582,
"grad_norm": 2.541034230738679,
"learning_rate": 5e-06,
"loss": 0.6278,
"step": 60
},
{
"epoch": 1.3965087281795512,
"grad_norm": 0.6825423418485694,
"learning_rate": 5e-06,
"loss": 0.5677,
"step": 70
},
{
"epoch": 1.5960099750623442,
"grad_norm": 0.8666541576639258,
"learning_rate": 5e-06,
"loss": 0.5516,
"step": 80
},
{
"epoch": 1.7955112219451372,
"grad_norm": 0.791365005319929,
"learning_rate": 5e-06,
"loss": 0.5397,
"step": 90
},
{
"epoch": 1.9950124688279303,
"grad_norm": 1.287898310420275,
"learning_rate": 5e-06,
"loss": 0.5438,
"step": 100
},
{
"epoch": 1.9950124688279303,
"eval_loss": 0.5740869641304016,
"eval_runtime": 54.4016,
"eval_samples_per_second": 24.815,
"eval_steps_per_second": 0.404,
"step": 100
},
{
"epoch": 2.1945137157107233,
"grad_norm": 0.9888282972521568,
"learning_rate": 5e-06,
"loss": 0.5407,
"step": 110
},
{
"epoch": 2.3940149625935163,
"grad_norm": 0.8567373783067653,
"learning_rate": 5e-06,
"loss": 0.4996,
"step": 120
},
{
"epoch": 2.5935162094763093,
"grad_norm": 0.6523956470492996,
"learning_rate": 5e-06,
"loss": 0.4972,
"step": 130
},
{
"epoch": 2.7930174563591024,
"grad_norm": 0.7098096207597672,
"learning_rate": 5e-06,
"loss": 0.4926,
"step": 140
},
{
"epoch": 2.9925187032418954,
"grad_norm": 0.6470766076468314,
"learning_rate": 5e-06,
"loss": 0.4952,
"step": 150
},
{
"epoch": 2.9925187032418954,
"eval_loss": 0.5672302842140198,
"eval_runtime": 53.8678,
"eval_samples_per_second": 25.061,
"eval_steps_per_second": 0.408,
"step": 150
},
{
"epoch": 2.9925187032418954,
"step": 150,
"total_flos": 251046207160320.0,
"train_loss": 0.5811795616149902,
"train_runtime": 8937.3206,
"train_samples_per_second": 8.608,
"train_steps_per_second": 0.017
}
],
"logging_steps": 10,
"max_steps": 150,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 251046207160320.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}