|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.256410256410255, |
|
"eval_steps": 100, |
|
"global_step": 2000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5128205128205128, |
|
"grad_norm": 485.6934509277344, |
|
"learning_rate": 9.9907e-06, |
|
"loss": 3.7897, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.5128205128205128, |
|
"eval_loss": 2.913217067718506, |
|
"eval_runtime": 36.1065, |
|
"eval_samples_per_second": 10.884, |
|
"eval_steps_per_second": 1.385, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.0256410256410255, |
|
"grad_norm": 35.502872467041016, |
|
"learning_rate": 9.980700000000001e-06, |
|
"loss": 2.6449, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.0256410256410255, |
|
"eval_loss": 2.1936488151550293, |
|
"eval_runtime": 35.8618, |
|
"eval_samples_per_second": 10.959, |
|
"eval_steps_per_second": 1.394, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.5384615384615383, |
|
"grad_norm": 44.5572624206543, |
|
"learning_rate": 9.970700000000001e-06, |
|
"loss": 2.123, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.5384615384615383, |
|
"eval_loss": 1.9975601434707642, |
|
"eval_runtime": 36.0033, |
|
"eval_samples_per_second": 10.916, |
|
"eval_steps_per_second": 1.389, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.051282051282051, |
|
"grad_norm": 91.06848907470703, |
|
"learning_rate": 9.960700000000001e-06, |
|
"loss": 1.9616, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.051282051282051, |
|
"eval_loss": 1.9457911252975464, |
|
"eval_runtime": 35.8591, |
|
"eval_samples_per_second": 10.96, |
|
"eval_steps_per_second": 1.394, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.564102564102564, |
|
"grad_norm": 62.52458953857422, |
|
"learning_rate": 9.950700000000001e-06, |
|
"loss": 1.9329, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.564102564102564, |
|
"eval_loss": 1.8983820676803589, |
|
"eval_runtime": 35.7607, |
|
"eval_samples_per_second": 10.99, |
|
"eval_steps_per_second": 1.398, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.076923076923077, |
|
"grad_norm": 46.58191680908203, |
|
"learning_rate": 9.940700000000001e-06, |
|
"loss": 1.8998, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.076923076923077, |
|
"eval_loss": 1.8464109897613525, |
|
"eval_runtime": 35.5376, |
|
"eval_samples_per_second": 11.059, |
|
"eval_steps_per_second": 1.407, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.58974358974359, |
|
"grad_norm": 43.859432220458984, |
|
"learning_rate": 9.930700000000001e-06, |
|
"loss": 1.8632, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 3.58974358974359, |
|
"eval_loss": 1.8351906538009644, |
|
"eval_runtime": 35.8606, |
|
"eval_samples_per_second": 10.959, |
|
"eval_steps_per_second": 1.394, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 4.102564102564102, |
|
"grad_norm": 36.12472152709961, |
|
"learning_rate": 9.920700000000001e-06, |
|
"loss": 1.8202, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 4.102564102564102, |
|
"eval_loss": 1.7837625741958618, |
|
"eval_runtime": 35.8551, |
|
"eval_samples_per_second": 10.961, |
|
"eval_steps_per_second": 1.395, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 4.615384615384615, |
|
"grad_norm": 56.74811553955078, |
|
"learning_rate": 9.910700000000002e-06, |
|
"loss": 1.7767, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 4.615384615384615, |
|
"eval_loss": 1.777076005935669, |
|
"eval_runtime": 35.652, |
|
"eval_samples_per_second": 11.023, |
|
"eval_steps_per_second": 1.402, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 5.128205128205128, |
|
"grad_norm": 97.7017822265625, |
|
"learning_rate": 9.900700000000002e-06, |
|
"loss": 1.7757, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 5.128205128205128, |
|
"eval_loss": 1.7516826391220093, |
|
"eval_runtime": 35.6867, |
|
"eval_samples_per_second": 11.013, |
|
"eval_steps_per_second": 1.401, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 5.641025641025641, |
|
"grad_norm": 21.942626953125, |
|
"learning_rate": 9.8907e-06, |
|
"loss": 1.7592, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 5.641025641025641, |
|
"eval_loss": 1.7459185123443604, |
|
"eval_runtime": 35.7882, |
|
"eval_samples_per_second": 10.981, |
|
"eval_steps_per_second": 1.397, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 6.153846153846154, |
|
"grad_norm": 31.696691513061523, |
|
"learning_rate": 9.8807e-06, |
|
"loss": 1.8098, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 6.153846153846154, |
|
"eval_loss": 1.7705851793289185, |
|
"eval_runtime": 35.6241, |
|
"eval_samples_per_second": 11.032, |
|
"eval_steps_per_second": 1.404, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 6.666666666666667, |
|
"grad_norm": 69.51777648925781, |
|
"learning_rate": 9.8707e-06, |
|
"loss": 1.6903, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 6.666666666666667, |
|
"eval_loss": 1.717505693435669, |
|
"eval_runtime": 35.7343, |
|
"eval_samples_per_second": 10.998, |
|
"eval_steps_per_second": 1.399, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 7.17948717948718, |
|
"grad_norm": 159.8116455078125, |
|
"learning_rate": 9.8607e-06, |
|
"loss": 1.7428, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 7.17948717948718, |
|
"eval_loss": 1.7039239406585693, |
|
"eval_runtime": 35.6477, |
|
"eval_samples_per_second": 11.025, |
|
"eval_steps_per_second": 1.403, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 7.6923076923076925, |
|
"grad_norm": 52.08290100097656, |
|
"learning_rate": 9.8507e-06, |
|
"loss": 1.6913, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 7.6923076923076925, |
|
"eval_loss": 1.6931686401367188, |
|
"eval_runtime": 35.6627, |
|
"eval_samples_per_second": 11.02, |
|
"eval_steps_per_second": 1.402, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 8.205128205128204, |
|
"grad_norm": 67.04603576660156, |
|
"learning_rate": 9.8407e-06, |
|
"loss": 1.7072, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 8.205128205128204, |
|
"eval_loss": 1.6624616384506226, |
|
"eval_runtime": 35.7439, |
|
"eval_samples_per_second": 10.995, |
|
"eval_steps_per_second": 1.399, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 8.717948717948717, |
|
"grad_norm": 22.715839385986328, |
|
"learning_rate": 9.8307e-06, |
|
"loss": 1.6593, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 8.717948717948717, |
|
"eval_loss": 1.6604528427124023, |
|
"eval_runtime": 35.8353, |
|
"eval_samples_per_second": 10.967, |
|
"eval_steps_per_second": 1.395, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 9.23076923076923, |
|
"grad_norm": 36.652496337890625, |
|
"learning_rate": 9.8207e-06, |
|
"loss": 1.7015, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 9.23076923076923, |
|
"eval_loss": 1.691542148590088, |
|
"eval_runtime": 35.7641, |
|
"eval_samples_per_second": 10.989, |
|
"eval_steps_per_second": 1.398, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 9.743589743589745, |
|
"grad_norm": 55.04361343383789, |
|
"learning_rate": 9.8107e-06, |
|
"loss": 1.6452, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 9.743589743589745, |
|
"eval_loss": 1.64271080493927, |
|
"eval_runtime": 35.6018, |
|
"eval_samples_per_second": 11.039, |
|
"eval_steps_per_second": 1.404, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 10.256410256410255, |
|
"grad_norm": 48.84671401977539, |
|
"learning_rate": 9.800700000000001e-06, |
|
"loss": 1.6642, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 10.256410256410255, |
|
"eval_loss": 1.6221907138824463, |
|
"eval_runtime": 35.7515, |
|
"eval_samples_per_second": 10.993, |
|
"eval_steps_per_second": 1.399, |
|
"step": 2000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 100000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 513, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.64486332416e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|