Thoma
Training in progress, step 10, checkpoint
f265d6f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.00016525784355039951,
"eval_steps": 3,
"global_step": 10,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.652578435503995e-05,
"grad_norm": 1.5268281698226929,
"learning_rate": 2e-05,
"loss": 7.6901,
"step": 1
},
{
"epoch": 1.652578435503995e-05,
"eval_loss": 1.7071666717529297,
"eval_runtime": 711.2494,
"eval_samples_per_second": 35.823,
"eval_steps_per_second": 17.912,
"step": 1
},
{
"epoch": 3.30515687100799e-05,
"grad_norm": 1.5581204891204834,
"learning_rate": 4e-05,
"loss": 8.3603,
"step": 2
},
{
"epoch": 4.9577353065119854e-05,
"grad_norm": 1.373064637184143,
"learning_rate": 6e-05,
"loss": 9.0353,
"step": 3
},
{
"epoch": 4.9577353065119854e-05,
"eval_loss": 1.706829309463501,
"eval_runtime": 708.6623,
"eval_samples_per_second": 35.954,
"eval_steps_per_second": 17.978,
"step": 3
},
{
"epoch": 6.61031374201598e-05,
"grad_norm": 1.8199676275253296,
"learning_rate": 8e-05,
"loss": 7.9109,
"step": 4
},
{
"epoch": 8.262892177519976e-05,
"grad_norm": 2.0034425258636475,
"learning_rate": 0.0001,
"loss": 8.509,
"step": 5
},
{
"epoch": 9.915470613023971e-05,
"grad_norm": 1.5793169736862183,
"learning_rate": 0.00012,
"loss": 7.16,
"step": 6
},
{
"epoch": 9.915470613023971e-05,
"eval_loss": 1.6756184101104736,
"eval_runtime": 708.1939,
"eval_samples_per_second": 35.977,
"eval_steps_per_second": 17.989,
"step": 6
},
{
"epoch": 0.00011568049048527966,
"grad_norm": 2.367051362991333,
"learning_rate": 0.00014,
"loss": 6.6789,
"step": 7
},
{
"epoch": 0.0001322062748403196,
"grad_norm": 3.2180848121643066,
"learning_rate": 0.00016,
"loss": 6.5946,
"step": 8
},
{
"epoch": 0.00014873205919535955,
"grad_norm": 5.034686088562012,
"learning_rate": 0.00018,
"loss": 6.5378,
"step": 9
},
{
"epoch": 0.00014873205919535955,
"eval_loss": 1.4698232412338257,
"eval_runtime": 710.1519,
"eval_samples_per_second": 35.878,
"eval_steps_per_second": 17.94,
"step": 9
},
{
"epoch": 0.00016525784355039951,
"grad_norm": 5.680718421936035,
"learning_rate": 0.0002,
"loss": 7.4161,
"step": 10
}
],
"logging_steps": 1,
"max_steps": 10,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1632478030725120.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}