|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 252, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.11904761904761904, |
|
"grad_norm": 8.739304513169607, |
|
"learning_rate": 5.263157894736842e-07, |
|
"loss": 0.8721, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.23809523809523808, |
|
"grad_norm": 3.049775296771708, |
|
"learning_rate": 1.0526315789473683e-06, |
|
"loss": 0.7667, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.35714285714285715, |
|
"grad_norm": 1.4030875344298006, |
|
"learning_rate": 1.5789473684210526e-06, |
|
"loss": 0.6794, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.47619047619047616, |
|
"grad_norm": 1.3785968299043319, |
|
"learning_rate": 1.9995690062269984e-06, |
|
"loss": 0.6273, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5952380952380952, |
|
"grad_norm": 2.874856490919847, |
|
"learning_rate": 1.9845231970029773e-06, |
|
"loss": 0.5962, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 1.7727339118671337, |
|
"learning_rate": 1.9482977734962752e-06, |
|
"loss": 0.5778, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8333333333333334, |
|
"grad_norm": 1.8345865399680614, |
|
"learning_rate": 1.8916720373012423e-06, |
|
"loss": 0.5651, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"grad_norm": 1.230639934977736, |
|
"learning_rate": 1.8158641529616236e-06, |
|
"loss": 0.5532, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.06842269003391266, |
|
"eval_runtime": 89.8783, |
|
"eval_samples_per_second": 201.417, |
|
"eval_steps_per_second": 0.401, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.0714285714285714, |
|
"grad_norm": 1.6345511825327514, |
|
"learning_rate": 1.7225049421328022e-06, |
|
"loss": 0.5392, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.1904761904761905, |
|
"grad_norm": 1.2835677157215188, |
|
"learning_rate": 1.613602800433194e-06, |
|
"loss": 0.5279, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.3095238095238095, |
|
"grad_norm": 1.6803587292841158, |
|
"learning_rate": 1.4915004917131344e-06, |
|
"loss": 0.5234, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 1.2882453085170966, |
|
"learning_rate": 1.3588247492071359e-06, |
|
"loss": 0.518, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.5476190476190477, |
|
"grad_norm": 2.32875469479684, |
|
"learning_rate": 1.2184297677777462e-06, |
|
"loss": 0.5122, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"grad_norm": 1.7960563320982275, |
|
"learning_rate": 1.073335802877504e-06, |
|
"loss": 0.5096, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.7857142857142856, |
|
"grad_norm": 1.6933464680489778, |
|
"learning_rate": 9.266641971224962e-07, |
|
"loss": 0.5068, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"grad_norm": 0.9423206924941796, |
|
"learning_rate": 7.815702322222537e-07, |
|
"loss": 0.5032, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.06419934332370758, |
|
"eval_runtime": 89.5881, |
|
"eval_samples_per_second": 202.069, |
|
"eval_steps_per_second": 0.402, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 2.0238095238095237, |
|
"grad_norm": 1.0705067821333696, |
|
"learning_rate": 6.411752507928641e-07, |
|
"loss": 0.4998, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.142857142857143, |
|
"grad_norm": 0.8500177067973432, |
|
"learning_rate": 5.084995082868657e-07, |
|
"loss": 0.4907, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.261904761904762, |
|
"grad_norm": 0.7596030935231589, |
|
"learning_rate": 3.8639719956680615e-07, |
|
"loss": 0.4892, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.380952380952381, |
|
"grad_norm": 0.5822861939530503, |
|
"learning_rate": 2.7749505786719795e-07, |
|
"loss": 0.4856, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 0.5090518617223921, |
|
"learning_rate": 1.8413584703837615e-07, |
|
"loss": 0.4846, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.619047619047619, |
|
"grad_norm": 0.4618727718359669, |
|
"learning_rate": 1.0832796269875755e-07, |
|
"loss": 0.4871, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.738095238095238, |
|
"grad_norm": 0.45695002516772665, |
|
"learning_rate": 5.170222650372469e-08, |
|
"loss": 0.4861, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 0.4368551467326881, |
|
"learning_rate": 1.547680299702281e-08, |
|
"loss": 0.4855, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.9761904761904763, |
|
"grad_norm": 0.42274290113964946, |
|
"learning_rate": 4.309937730015978e-10, |
|
"loss": 0.4835, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.06357227265834808, |
|
"eval_runtime": 88.8886, |
|
"eval_samples_per_second": 203.659, |
|
"eval_steps_per_second": 0.405, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 252, |
|
"total_flos": 3375200049561600.0, |
|
"train_loss": 0.5500972129049755, |
|
"train_runtime": 15199.4807, |
|
"train_samples_per_second": 67.888, |
|
"train_steps_per_second": 0.017 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 252, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3375200049561600.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|