saqidr's picture
Training in progress, step 2000
952f487 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 7.861635220125786,
"eval_steps": 500,
"global_step": 2500,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_accuracy": 0.5958064516129032,
"eval_loss": 0.1949465274810791,
"eval_runtime": 1.357,
"eval_samples_per_second": 2284.501,
"eval_steps_per_second": 47.901,
"step": 318
},
{
"epoch": 1.57,
"learning_rate": 1.606918238993711e-05,
"loss": 0.3113,
"step": 500
},
{
"epoch": 2.0,
"eval_accuracy": 0.8164516129032258,
"eval_loss": 0.09700000286102295,
"eval_runtime": 1.3602,
"eval_samples_per_second": 2279.158,
"eval_steps_per_second": 47.789,
"step": 636
},
{
"epoch": 3.0,
"eval_accuracy": 0.8754838709677419,
"eval_loss": 0.0647290050983429,
"eval_runtime": 1.3748,
"eval_samples_per_second": 2254.826,
"eval_steps_per_second": 47.279,
"step": 954
},
{
"epoch": 3.14,
"learning_rate": 1.2138364779874214e-05,
"loss": 0.1105,
"step": 1000
},
{
"epoch": 4.0,
"eval_accuracy": 0.8951612903225806,
"eval_loss": 0.05031874030828476,
"eval_runtime": 1.3686,
"eval_samples_per_second": 2265.046,
"eval_steps_per_second": 47.493,
"step": 1272
},
{
"epoch": 4.72,
"learning_rate": 8.207547169811321e-06,
"loss": 0.0722,
"step": 1500
},
{
"epoch": 5.0,
"eval_accuracy": 0.9035483870967742,
"eval_loss": 0.04259829595685005,
"eval_runtime": 1.3733,
"eval_samples_per_second": 2257.258,
"eval_steps_per_second": 47.33,
"step": 1590
},
{
"epoch": 6.0,
"eval_accuracy": 0.91,
"eval_loss": 0.03827530890703201,
"eval_runtime": 1.3751,
"eval_samples_per_second": 2254.357,
"eval_steps_per_second": 47.269,
"step": 1908
},
{
"epoch": 6.29,
"learning_rate": 4.276729559748428e-06,
"loss": 0.058,
"step": 2000
},
{
"epoch": 7.0,
"eval_accuracy": 0.9119354838709678,
"eval_loss": 0.035722751170396805,
"eval_runtime": 1.3775,
"eval_samples_per_second": 2250.492,
"eval_steps_per_second": 47.188,
"step": 2226
},
{
"epoch": 7.86,
"learning_rate": 3.459119496855346e-07,
"loss": 0.0523,
"step": 2500
}
],
"logging_steps": 500,
"max_steps": 2544,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 500,
"total_flos": 649131947564688.0,
"train_batch_size": 48,
"trial_name": null,
"trial_params": {
"alpha": 0.05314446157998587,
"num_train_epochs": 8,
"temperature": 17
}
}