|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9979978438318189, |
|
"eval_steps": 500, |
|
"global_step": 405, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.523809523809524e-05, |
|
"loss": 2.2532, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019047619047619048, |
|
"loss": 2.0208, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00019972904566786903, |
|
"loss": 1.4376, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019879429836063226, |
|
"loss": 1.1412, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019719866448212795, |
|
"loss": 1.0641, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019495281805930367, |
|
"loss": 1.0379, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0001920717827298248, |
|
"loss": 1.0215, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00018857483124112907, |
|
"loss": 1.0256, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00018448535652497073, |
|
"loss": 1.016, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00017983071520990315, |
|
"loss": 0.9956, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00017464204461852738, |
|
"loss": 0.9963, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001689540544737067, |
|
"loss": 1.0188, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00016280479470713344, |
|
"loss": 0.9989, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00015623540092349732, |
|
"loss": 0.9916, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00014928981922297842, |
|
"loss": 0.9849, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00014201451222287025, |
|
"loss": 0.9778, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00013445814824490805, |
|
"loss": 0.9728, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00012667127574748986, |
|
"loss": 0.9669, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0001187059851806832, |
|
"loss": 0.9786, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00011061556052604578, |
|
"loss": 0.9766, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00010245412285229124, |
|
"loss": 0.9884, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.427626827124317e-05, |
|
"loss": 0.9762, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.613670271597733e-05, |
|
"loss": 0.9811, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 7.808987598431303e-05, |
|
"loss": 0.9803, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.018961749572604e-05, |
|
"loss": 0.9752, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 6.248877619829619e-05, |
|
"loss": 0.9642, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 5.503886703453933e-05, |
|
"loss": 0.9635, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.7889726331097686e-05, |
|
"loss": 0.9708, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.1089178417567164e-05, |
|
"loss": 0.9692, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.468271570462235e-05, |
|
"loss": 0.9752, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 2.8713194361562036e-05, |
|
"loss": 0.9734, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 2.3220547629048796e-05, |
|
"loss": 0.9758, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.824151868484164e-05, |
|
"loss": 0.9668, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.3809414849522584e-05, |
|
"loss": 0.9736, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 9.953884776463652e-06, |
|
"loss": 0.9818, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 6.700720116526116e-06, |
|
"loss": 0.9776, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 4.071682984260638e-06, |
|
"loss": 0.9626, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 2.0843603797766287e-06, |
|
"loss": 0.9745, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 7.520465401290033e-07, |
|
"loss": 0.9609, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 8.365400723512328e-08, |
|
"loss": 0.9675, |
|
"step": 400 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 405, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"total_flos": 2.0624848885598822e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|