Model_1B_Bush / trainer_state.json
GPT-JF's picture
End of training
833232a
raw
history blame
1.42 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 3210,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.78,
"learning_rate": 4.221183800623053e-05,
"loss": 2.765,
"step": 500
},
{
"epoch": 1.56,
"learning_rate": 3.442367601246106e-05,
"loss": 2.5475,
"step": 1000
},
{
"epoch": 2.34,
"learning_rate": 2.663551401869159e-05,
"loss": 2.403,
"step": 1500
},
{
"epoch": 3.12,
"learning_rate": 1.884735202492212e-05,
"loss": 2.3577,
"step": 2000
},
{
"epoch": 3.89,
"learning_rate": 1.1059190031152649e-05,
"loss": 2.3018,
"step": 2500
},
{
"epoch": 4.67,
"learning_rate": 3.2710280373831774e-06,
"loss": 2.2848,
"step": 3000
},
{
"epoch": 5.0,
"step": 3210,
"total_flos": 6704753541120000.0,
"train_loss": 2.4309686013099934,
"train_runtime": 1406.649,
"train_samples_per_second": 9.121,
"train_steps_per_second": 2.282
}
],
"logging_steps": 500,
"max_steps": 3210,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 6704753541120000.0,
"trial_name": null,
"trial_params": null
}