|
{ |
|
"best_metric": 0.00012142229388700798, |
|
"best_model_checkpoint": "data/further_pretraining/roberta-large/checkpoint-9356", |
|
"epoch": 2.0, |
|
"global_step": 9356, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.9786233433091067e-05, |
|
"loss": 0.0753, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.9572466866182133e-05, |
|
"loss": 0.0589, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.9358700299273195e-05, |
|
"loss": 0.0558, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.914493373236426e-05, |
|
"loss": 0.0551, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.8931167165455325e-05, |
|
"loss": 0.0541, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.8717400598546387e-05, |
|
"loss": 0.0385, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.8503634031637453e-05, |
|
"loss": 0.0123, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.828986746472852e-05, |
|
"loss": 0.0037, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 1.807610089781958e-05, |
|
"loss": 0.0029, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.0009945582132786512, |
|
"eval_runtime": 755.7373, |
|
"eval_samples_per_second": 99.032, |
|
"eval_steps_per_second": 6.19, |
|
"step": 4678 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 1.7862334330910646e-05, |
|
"loss": 0.0017, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 1.764856776400171e-05, |
|
"loss": 0.0011, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 1.7434801197092777e-05, |
|
"loss": 0.0136, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 1.7221034630183842e-05, |
|
"loss": 0.001, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.7007268063274904e-05, |
|
"loss": 0.0009, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 1.679350149636597e-05, |
|
"loss": 0.0007, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 1.6579734929457035e-05, |
|
"loss": 0.0007, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 1.63659683625481e-05, |
|
"loss": 0.0013, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 1.6152201795639163e-05, |
|
"loss": 0.0012, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.00012142229388700798, |
|
"eval_runtime": 758.5152, |
|
"eval_samples_per_second": 98.669, |
|
"eval_steps_per_second": 6.167, |
|
"step": 9356 |
|
} |
|
], |
|
"max_steps": 46780, |
|
"num_train_epochs": 10, |
|
"total_flos": 3.4879581646470144e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|