|
{ |
|
"best_metric": 0.90346360206604, |
|
"best_model_checkpoint": "outputs/checkpoint-231", |
|
"epoch": 11.983805668016194, |
|
"eval_steps": 500, |
|
"global_step": 555, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.4318488529014845, |
|
"grad_norm": 0.5469953417778015, |
|
"learning_rate": 2e-05, |
|
"loss": 1.9745, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.863697705802969, |
|
"grad_norm": 0.48892971873283386, |
|
"learning_rate": 4e-05, |
|
"loss": 1.8762, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.9932523616734144, |
|
"eval_loss": 1.6047619581222534, |
|
"eval_runtime": 12.7198, |
|
"eval_samples_per_second": 29.246, |
|
"eval_steps_per_second": 3.695, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.2955465587044535, |
|
"grad_norm": 0.5274947285652161, |
|
"learning_rate": 6e-05, |
|
"loss": 1.7913, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.7273954116059378, |
|
"grad_norm": 0.6796801686286926, |
|
"learning_rate": 8e-05, |
|
"loss": 1.6087, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.9865047233468287, |
|
"eval_loss": 1.2794835567474365, |
|
"eval_runtime": 12.7229, |
|
"eval_samples_per_second": 29.239, |
|
"eval_steps_per_second": 3.694, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 2.1592442645074224, |
|
"grad_norm": 0.9777641892433167, |
|
"learning_rate": 0.0001, |
|
"loss": 1.5142, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.591093117408907, |
|
"grad_norm": 1.0962361097335815, |
|
"learning_rate": 9.985329005918702e-05, |
|
"loss": 1.3395, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.979757085020243, |
|
"eval_loss": 1.0361310243606567, |
|
"eval_runtime": 12.6733, |
|
"eval_samples_per_second": 29.353, |
|
"eval_steps_per_second": 3.709, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 3.0229419703103915, |
|
"grad_norm": 1.30574369430542, |
|
"learning_rate": 9.941402118901744e-05, |
|
"loss": 1.2941, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.454790823211876, |
|
"grad_norm": 1.4931496381759644, |
|
"learning_rate": 9.868477119388896e-05, |
|
"loss": 1.1092, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 3.8866396761133606, |
|
"grad_norm": 1.4002817869186401, |
|
"learning_rate": 9.766981960274653e-05, |
|
"loss": 1.0864, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.9946018893387314, |
|
"eval_loss": 0.932435154914856, |
|
"eval_runtime": 12.7232, |
|
"eval_samples_per_second": 29.238, |
|
"eval_steps_per_second": 3.694, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 4.318488529014845, |
|
"grad_norm": 1.869638442993164, |
|
"learning_rate": 9.637512255510475e-05, |
|
"loss": 0.9429, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 4.75033738191633, |
|
"grad_norm": 1.8662426471710205, |
|
"learning_rate": 9.480827784805278e-05, |
|
"loss": 0.9713, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 4.987854251012146, |
|
"eval_loss": 0.90346360206604, |
|
"eval_runtime": 12.6762, |
|
"eval_samples_per_second": 29.346, |
|
"eval_steps_per_second": 3.708, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 5.182186234817814, |
|
"grad_norm": 1.7677403688430786, |
|
"learning_rate": 9.297848034936006e-05, |
|
"loss": 0.9134, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 5.614035087719298, |
|
"grad_norm": 1.6769498586654663, |
|
"learning_rate": 9.089646803833589e-05, |
|
"loss": 0.8171, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 5.98110661268556, |
|
"eval_loss": 0.9262576103210449, |
|
"eval_runtime": 12.7331, |
|
"eval_samples_per_second": 29.215, |
|
"eval_steps_per_second": 3.691, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 6.045883940620783, |
|
"grad_norm": 1.4469077587127686, |
|
"learning_rate": 8.857445899109715e-05, |
|
"loss": 0.7771, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 6.477732793522267, |
|
"grad_norm": 1.5637768507003784, |
|
"learning_rate": 8.602607968003935e-05, |
|
"loss": 0.6662, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 6.909581646423752, |
|
"grad_norm": 1.5962625741958618, |
|
"learning_rate": 8.326628500827826e-05, |
|
"loss": 0.736, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 6.995951417004049, |
|
"eval_loss": 1.029656171798706, |
|
"eval_runtime": 12.6598, |
|
"eval_samples_per_second": 29.384, |
|
"eval_steps_per_second": 3.713, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 7.341430499325236, |
|
"grad_norm": 2.1342411041259766, |
|
"learning_rate": 8.03112705483319e-05, |
|
"loss": 0.6183, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 7.77327935222672, |
|
"grad_norm": 1.800087809562683, |
|
"learning_rate": 7.717837750006106e-05, |
|
"loss": 0.6515, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 7.989203778677463, |
|
"eval_loss": 1.0591753721237183, |
|
"eval_runtime": 12.6625, |
|
"eval_samples_per_second": 29.378, |
|
"eval_steps_per_second": 3.712, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 8.205128205128204, |
|
"grad_norm": 1.779543399810791, |
|
"learning_rate": 7.388599092561315e-05, |
|
"loss": 0.5783, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 8.63697705802969, |
|
"grad_norm": 2.137484550476074, |
|
"learning_rate": 7.045343185856701e-05, |
|
"loss": 0.5429, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 8.982456140350877, |
|
"eval_loss": 1.1770082712173462, |
|
"eval_runtime": 12.6696, |
|
"eval_samples_per_second": 29.362, |
|
"eval_steps_per_second": 3.71, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 9.068825910931174, |
|
"grad_norm": 1.5784409046173096, |
|
"learning_rate": 6.690084392042513e-05, |
|
"loss": 0.5434, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 9.50067476383266, |
|
"grad_norm": 1.5823142528533936, |
|
"learning_rate": 6.32490751098331e-05, |
|
"loss": 0.4707, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 9.932523616734143, |
|
"grad_norm": 1.8034058809280396, |
|
"learning_rate": 5.951955545823342e-05, |
|
"loss": 0.4917, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 9.997300944669366, |
|
"eval_loss": 1.2135237455368042, |
|
"eval_runtime": 12.6734, |
|
"eval_samples_per_second": 29.353, |
|
"eval_steps_per_second": 3.709, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 10.364372469635628, |
|
"grad_norm": 1.660635232925415, |
|
"learning_rate": 5.573417126992003e-05, |
|
"loss": 0.4104, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 10.796221322537113, |
|
"grad_norm": 1.7648595571517944, |
|
"learning_rate": 5.191513668450178e-05, |
|
"loss": 0.4114, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 10.99055330634278, |
|
"eval_loss": 1.3011534214019775, |
|
"eval_runtime": 12.6994, |
|
"eval_samples_per_second": 29.293, |
|
"eval_steps_per_second": 3.701, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 11.228070175438596, |
|
"grad_norm": 1.8220391273498535, |
|
"learning_rate": 4.8084863315498234e-05, |
|
"loss": 0.4249, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 11.65991902834008, |
|
"grad_norm": 1.9576756954193115, |
|
"learning_rate": 4.4265828730079987e-05, |
|
"loss": 0.386, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 11.983805668016194, |
|
"eval_loss": 1.4017869234085083, |
|
"eval_runtime": 12.6911, |
|
"eval_samples_per_second": 29.312, |
|
"eval_steps_per_second": 3.703, |
|
"step": 555 |
|
} |
|
], |
|
"logging_steps": 20, |
|
"max_steps": 920, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 20, |
|
"save_steps": 500, |
|
"total_flos": 4.280269211892941e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|