|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9399232396020991, |
|
"eval_steps": 100, |
|
"global_step": 3000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 7.659268856048584, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 1.3919, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"eval_loss": 1.1893327236175537, |
|
"eval_runtime": 192.797, |
|
"eval_samples_per_second": 5.187, |
|
"eval_steps_per_second": 5.187, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 3.593693971633911, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.7649, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"eval_loss": 0.37141919136047363, |
|
"eval_runtime": 193.0277, |
|
"eval_samples_per_second": 5.181, |
|
"eval_steps_per_second": 5.181, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.2718026638031006, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.2492, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"eval_loss": 0.1235867515206337, |
|
"eval_runtime": 192.4574, |
|
"eval_samples_per_second": 5.196, |
|
"eval_steps_per_second": 5.196, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.3505817651748657, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.1111, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"eval_loss": 0.06019064038991928, |
|
"eval_runtime": 193.0151, |
|
"eval_samples_per_second": 5.181, |
|
"eval_steps_per_second": 5.181, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 2.2099411487579346, |
|
"learning_rate": 2e-05, |
|
"loss": 0.1213, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_loss": 0.05992453545331955, |
|
"eval_runtime": 193.5019, |
|
"eval_samples_per_second": 5.168, |
|
"eval_steps_per_second": 5.168, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.006523499730974436, |
|
"learning_rate": 1.9555555555555557e-05, |
|
"loss": 0.1161, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"eval_loss": 0.03172233700752258, |
|
"eval_runtime": 192.8437, |
|
"eval_samples_per_second": 5.186, |
|
"eval_steps_per_second": 5.186, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.0743604227900505, |
|
"learning_rate": 1.9111111111111113e-05, |
|
"loss": 0.0853, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_loss": 0.03372843191027641, |
|
"eval_runtime": 193.2798, |
|
"eval_samples_per_second": 5.174, |
|
"eval_steps_per_second": 5.174, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 2.5127480030059814, |
|
"learning_rate": 1.866666666666667e-05, |
|
"loss": 0.0801, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_loss": 0.02089008316397667, |
|
"eval_runtime": 193.1941, |
|
"eval_samples_per_second": 5.176, |
|
"eval_steps_per_second": 5.176, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 2.219334125518799, |
|
"learning_rate": 1.8222222222222224e-05, |
|
"loss": 0.0619, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"eval_loss": 0.03212675824761391, |
|
"eval_runtime": 192.8222, |
|
"eval_samples_per_second": 5.186, |
|
"eval_steps_per_second": 5.186, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.01359077449887991, |
|
"learning_rate": 1.7777777777777777e-05, |
|
"loss": 0.0707, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"eval_loss": 0.0244381632655859, |
|
"eval_runtime": 192.3716, |
|
"eval_samples_per_second": 5.198, |
|
"eval_steps_per_second": 5.198, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.007412207778543234, |
|
"learning_rate": 1.7333333333333336e-05, |
|
"loss": 0.0631, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"eval_loss": 0.025782734155654907, |
|
"eval_runtime": 192.5677, |
|
"eval_samples_per_second": 5.193, |
|
"eval_steps_per_second": 5.193, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.004010587465018034, |
|
"learning_rate": 1.688888888888889e-05, |
|
"loss": 0.0645, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"eval_loss": 0.026380345225334167, |
|
"eval_runtime": 192.1928, |
|
"eval_samples_per_second": 5.203, |
|
"eval_steps_per_second": 5.203, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.0805005207657814, |
|
"learning_rate": 1.6444444444444444e-05, |
|
"loss": 0.0419, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"eval_loss": 0.027041926980018616, |
|
"eval_runtime": 192.535, |
|
"eval_samples_per_second": 5.194, |
|
"eval_steps_per_second": 5.194, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.010087325237691402, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.0612, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 0.018431425094604492, |
|
"eval_runtime": 192.3449, |
|
"eval_samples_per_second": 5.199, |
|
"eval_steps_per_second": 5.199, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.6794092655181885, |
|
"learning_rate": 1.555555555555556e-05, |
|
"loss": 0.0569, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"eval_loss": 0.017885498702526093, |
|
"eval_runtime": 192.6531, |
|
"eval_samples_per_second": 5.191, |
|
"eval_steps_per_second": 5.191, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 1.6970280408859253, |
|
"learning_rate": 1.5111111111111112e-05, |
|
"loss": 0.0546, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_loss": 0.022140460088849068, |
|
"eval_runtime": 192.8968, |
|
"eval_samples_per_second": 5.184, |
|
"eval_steps_per_second": 5.184, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.013943832367658615, |
|
"learning_rate": 1.4666666666666666e-05, |
|
"loss": 0.069, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"eval_loss": 0.020227737724781036, |
|
"eval_runtime": 193.1829, |
|
"eval_samples_per_second": 5.176, |
|
"eval_steps_per_second": 5.176, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 2.067197322845459, |
|
"learning_rate": 1.4222222222222224e-05, |
|
"loss": 0.0509, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"eval_loss": 0.01612325944006443, |
|
"eval_runtime": 192.6314, |
|
"eval_samples_per_second": 5.191, |
|
"eval_steps_per_second": 5.191, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 2.2480263710021973, |
|
"learning_rate": 1.377777777777778e-05, |
|
"loss": 0.0495, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"eval_loss": 0.01796303130686283, |
|
"eval_runtime": 192.3154, |
|
"eval_samples_per_second": 5.2, |
|
"eval_steps_per_second": 5.2, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.0029044542461633682, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.0444, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"eval_loss": 0.02335376851260662, |
|
"eval_runtime": 192.3608, |
|
"eval_samples_per_second": 5.199, |
|
"eval_steps_per_second": 5.199, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 1.1315308809280396, |
|
"learning_rate": 1.288888888888889e-05, |
|
"loss": 0.0523, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_loss": 0.019370460882782936, |
|
"eval_runtime": 192.4625, |
|
"eval_samples_per_second": 5.196, |
|
"eval_steps_per_second": 5.196, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.0026583941653370857, |
|
"learning_rate": 1.2444444444444446e-05, |
|
"loss": 0.0553, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"eval_loss": 0.01725778914988041, |
|
"eval_runtime": 192.3261, |
|
"eval_samples_per_second": 5.2, |
|
"eval_steps_per_second": 5.2, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.004530389327555895, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.0538, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_loss": 0.022032534703612328, |
|
"eval_runtime": 192.3926, |
|
"eval_samples_per_second": 5.198, |
|
"eval_steps_per_second": 5.198, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.0024464745074510574, |
|
"learning_rate": 1.1555555555555556e-05, |
|
"loss": 0.0456, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_loss": 0.030184298753738403, |
|
"eval_runtime": 192.2035, |
|
"eval_samples_per_second": 5.203, |
|
"eval_steps_per_second": 5.203, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.0019896693993359804, |
|
"learning_rate": 1.1111111111111113e-05, |
|
"loss": 0.0499, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_loss": 0.01628696359694004, |
|
"eval_runtime": 192.4383, |
|
"eval_samples_per_second": 5.196, |
|
"eval_steps_per_second": 5.196, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.001884658238850534, |
|
"learning_rate": 1.0666666666666667e-05, |
|
"loss": 0.0493, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"eval_loss": 0.025292817503213882, |
|
"eval_runtime": 192.2059, |
|
"eval_samples_per_second": 5.203, |
|
"eval_steps_per_second": 5.203, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.04386008903384209, |
|
"learning_rate": 1.0222222222222223e-05, |
|
"loss": 0.0452, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"eval_loss": 0.022591974586248398, |
|
"eval_runtime": 192.8163, |
|
"eval_samples_per_second": 5.186, |
|
"eval_steps_per_second": 5.186, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.0019288246985524893, |
|
"learning_rate": 9.777777777777779e-06, |
|
"loss": 0.0438, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"eval_loss": 0.0226399265229702, |
|
"eval_runtime": 192.6394, |
|
"eval_samples_per_second": 5.191, |
|
"eval_steps_per_second": 5.191, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.056311335414648056, |
|
"learning_rate": 9.333333333333334e-06, |
|
"loss": 0.0579, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"eval_loss": 0.02137078531086445, |
|
"eval_runtime": 192.3652, |
|
"eval_samples_per_second": 5.198, |
|
"eval_steps_per_second": 5.198, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.003287563333287835, |
|
"learning_rate": 8.888888888888888e-06, |
|
"loss": 0.036, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"eval_loss": 0.015787875279784203, |
|
"eval_runtime": 192.7182, |
|
"eval_samples_per_second": 5.189, |
|
"eval_steps_per_second": 5.189, |
|
"step": 3000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 5000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 1000, |
|
"total_flos": 4.8306377981952e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|