|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.979683972911964, |
|
"eval_steps": 500, |
|
"global_step": 165, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.18058690744920994, |
|
"grad_norm": 4.922645091392012, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7687, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3611738148984199, |
|
"grad_norm": 0.9395652150073383, |
|
"learning_rate": 5e-06, |
|
"loss": 0.671, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5417607223476298, |
|
"grad_norm": 1.046038701193381, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6367, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.7223476297968398, |
|
"grad_norm": 0.594532259113549, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6256, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.9029345372460497, |
|
"grad_norm": 0.5730093717758814, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6154, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.9932279909706546, |
|
"eval_loss": 0.5993303656578064, |
|
"eval_runtime": 39.1462, |
|
"eval_samples_per_second": 38.062, |
|
"eval_steps_per_second": 0.613, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.0835214446952597, |
|
"grad_norm": 0.7543972397193625, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6293, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.2641083521444696, |
|
"grad_norm": 0.6738473289304389, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5656, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.4446952595936795, |
|
"grad_norm": 0.7714560381826885, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5588, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.6252821670428894, |
|
"grad_norm": 0.5914735318576527, |
|
"learning_rate": 5e-06, |
|
"loss": 0.561, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.8058690744920993, |
|
"grad_norm": 0.6021441110951579, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5526, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.9864559819413092, |
|
"grad_norm": 0.6858905012136822, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5525, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.9864559819413092, |
|
"eval_loss": 0.5813493132591248, |
|
"eval_runtime": 39.0789, |
|
"eval_samples_per_second": 38.128, |
|
"eval_steps_per_second": 0.614, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.1670428893905194, |
|
"grad_norm": 0.8968088455298097, |
|
"learning_rate": 5e-06, |
|
"loss": 0.549, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.3476297968397293, |
|
"grad_norm": 1.110817374207432, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5085, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.528216704288939, |
|
"grad_norm": 0.6217391415622974, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5079, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.708803611738149, |
|
"grad_norm": 0.7055441296859828, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5073, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.889390519187359, |
|
"grad_norm": 0.7396133060004412, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5138, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.979683972911964, |
|
"eval_loss": 0.5825901627540588, |
|
"eval_runtime": 38.2401, |
|
"eval_samples_per_second": 38.964, |
|
"eval_steps_per_second": 0.628, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 2.979683972911964, |
|
"step": 165, |
|
"total_flos": 276171765841920.0, |
|
"train_loss": 0.5809369983095112, |
|
"train_runtime": 5706.7095, |
|
"train_samples_per_second": 14.877, |
|
"train_steps_per_second": 0.029 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 165, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 276171765841920.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|