|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.0475020475020473, |
|
"eval_steps": 500, |
|
"global_step": 10000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.8293748293748295e-05, |
|
"loss": 4.3223, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.658749658749659e-05, |
|
"loss": 4.2808, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.4881244881244886e-05, |
|
"loss": 4.2576, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.317499317499318e-05, |
|
"loss": 4.1952, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.146874146874147e-05, |
|
"loss": 4.133, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 3.976248976248976e-05, |
|
"loss": 4.0789, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 3.8056238056238055e-05, |
|
"loss": 4.0146, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 3.6349986349986354e-05, |
|
"loss": 3.9639, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 3.4643734643734647e-05, |
|
"loss": 3.9444, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 3.293748293748294e-05, |
|
"loss": 3.8477, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 3.123123123123123e-05, |
|
"loss": 3.7137, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 2.952497952497953e-05, |
|
"loss": 3.6799, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 2.781872781872782e-05, |
|
"loss": 3.674, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 2.6112476112476115e-05, |
|
"loss": 3.6476, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 2.4406224406224407e-05, |
|
"loss": 3.6145, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 2.2699972699972703e-05, |
|
"loss": 3.5609, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 2.0993720993720995e-05, |
|
"loss": 3.5179, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 1.928746928746929e-05, |
|
"loss": 3.5089, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 1.7581217581217583e-05, |
|
"loss": 3.5192, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 1.5874965874965875e-05, |
|
"loss": 3.39, |
|
"step": 10000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 14652, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 10000, |
|
"total_flos": 1.0451158695936e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|