|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9333333333333336, |
|
"eval_steps": 5, |
|
"global_step": 33, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.25e-05, |
|
"loss": 1.9699, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.9491, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 5e-05, |
|
"loss": 1.7319, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 1.5441049337387085, |
|
"eval_runtime": 8.6083, |
|
"eval_samples_per_second": 18.122, |
|
"eval_steps_per_second": 0.581, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 4.941551389275217e-05, |
|
"loss": 1.5341, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.768938549177393e-05, |
|
"loss": 1.4557, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 4.4902326642641095e-05, |
|
"loss": 1.3425, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"eval_loss": 1.2967758178710938, |
|
"eval_runtime": 8.8757, |
|
"eval_samples_per_second": 17.576, |
|
"eval_steps_per_second": 0.563, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 4.118465711954569e-05, |
|
"loss": 1.2552, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 3.671021101749476e-05, |
|
"loss": 1.1709, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"eval_loss": 1.2151137590408325, |
|
"eval_runtime": 8.1047, |
|
"eval_samples_per_second": 19.248, |
|
"eval_steps_per_second": 0.617, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 3.168820846323053e-05, |
|
"loss": 1.1595, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 2.635347271463544e-05, |
|
"loss": 1.1061, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 2.0955450086180882e-05, |
|
"loss": 1.0994, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"eval_loss": 1.160514235496521, |
|
"eval_runtime": 7.964, |
|
"eval_samples_per_second": 19.588, |
|
"eval_steps_per_second": 0.628, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 1.574654611650214e-05, |
|
"loss": 1.0804, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 1.0970323365940444e-05, |
|
"loss": 1.0287, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"eval_loss": 1.1381587982177734, |
|
"eval_runtime": 7.9285, |
|
"eval_samples_per_second": 19.676, |
|
"eval_steps_per_second": 0.631, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 6.8501127019217346e-06, |
|
"loss": 1.0266, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 3.578570595810274e-06, |
|
"loss": 1.0038, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 1.3086707204299414e-06, |
|
"loss": 1.0303, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"eval_loss": 1.1314786672592163, |
|
"eval_runtime": 7.9656, |
|
"eval_samples_per_second": 19.584, |
|
"eval_steps_per_second": 0.628, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 1.4655107114101007e-07, |
|
"loss": 1.0081, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"step": 33, |
|
"total_flos": 17934172815360.0, |
|
"train_loss": 1.2418649593989055, |
|
"train_runtime": 753.4405, |
|
"train_samples_per_second": 5.626, |
|
"train_steps_per_second": 0.044 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 33, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 17934172815360.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|