|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.5665387326701654, |
|
"eval_steps": 100, |
|
"global_step": 5000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 7.659268856048584, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 1.3919, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"eval_loss": 1.1893327236175537, |
|
"eval_runtime": 192.797, |
|
"eval_samples_per_second": 5.187, |
|
"eval_steps_per_second": 5.187, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 3.593693971633911, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.7649, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"eval_loss": 0.37141919136047363, |
|
"eval_runtime": 193.0277, |
|
"eval_samples_per_second": 5.181, |
|
"eval_steps_per_second": 5.181, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.2718026638031006, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.2492, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"eval_loss": 0.1235867515206337, |
|
"eval_runtime": 192.4574, |
|
"eval_samples_per_second": 5.196, |
|
"eval_steps_per_second": 5.196, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.3505817651748657, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.1111, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"eval_loss": 0.06019064038991928, |
|
"eval_runtime": 193.0151, |
|
"eval_samples_per_second": 5.181, |
|
"eval_steps_per_second": 5.181, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 2.2099411487579346, |
|
"learning_rate": 2e-05, |
|
"loss": 0.1213, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_loss": 0.05992453545331955, |
|
"eval_runtime": 193.5019, |
|
"eval_samples_per_second": 5.168, |
|
"eval_steps_per_second": 5.168, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.006523499730974436, |
|
"learning_rate": 1.9555555555555557e-05, |
|
"loss": 0.1161, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"eval_loss": 0.03172233700752258, |
|
"eval_runtime": 192.8437, |
|
"eval_samples_per_second": 5.186, |
|
"eval_steps_per_second": 5.186, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.0743604227900505, |
|
"learning_rate": 1.9111111111111113e-05, |
|
"loss": 0.0853, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_loss": 0.03372843191027641, |
|
"eval_runtime": 193.2798, |
|
"eval_samples_per_second": 5.174, |
|
"eval_steps_per_second": 5.174, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 2.5127480030059814, |
|
"learning_rate": 1.866666666666667e-05, |
|
"loss": 0.0801, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_loss": 0.02089008316397667, |
|
"eval_runtime": 193.1941, |
|
"eval_samples_per_second": 5.176, |
|
"eval_steps_per_second": 5.176, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 2.219334125518799, |
|
"learning_rate": 1.8222222222222224e-05, |
|
"loss": 0.0619, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"eval_loss": 0.03212675824761391, |
|
"eval_runtime": 192.8222, |
|
"eval_samples_per_second": 5.186, |
|
"eval_steps_per_second": 5.186, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.01359077449887991, |
|
"learning_rate": 1.7777777777777777e-05, |
|
"loss": 0.0707, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"eval_loss": 0.0244381632655859, |
|
"eval_runtime": 192.3716, |
|
"eval_samples_per_second": 5.198, |
|
"eval_steps_per_second": 5.198, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.007412207778543234, |
|
"learning_rate": 1.7333333333333336e-05, |
|
"loss": 0.0631, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"eval_loss": 0.025782734155654907, |
|
"eval_runtime": 192.5677, |
|
"eval_samples_per_second": 5.193, |
|
"eval_steps_per_second": 5.193, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.004010587465018034, |
|
"learning_rate": 1.688888888888889e-05, |
|
"loss": 0.0645, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"eval_loss": 0.026380345225334167, |
|
"eval_runtime": 192.1928, |
|
"eval_samples_per_second": 5.203, |
|
"eval_steps_per_second": 5.203, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.0805005207657814, |
|
"learning_rate": 1.6444444444444444e-05, |
|
"loss": 0.0419, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"eval_loss": 0.027041926980018616, |
|
"eval_runtime": 192.535, |
|
"eval_samples_per_second": 5.194, |
|
"eval_steps_per_second": 5.194, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.010087325237691402, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.0612, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 0.018431425094604492, |
|
"eval_runtime": 192.3449, |
|
"eval_samples_per_second": 5.199, |
|
"eval_steps_per_second": 5.199, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.6794092655181885, |
|
"learning_rate": 1.555555555555556e-05, |
|
"loss": 0.0569, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"eval_loss": 0.017885498702526093, |
|
"eval_runtime": 192.6531, |
|
"eval_samples_per_second": 5.191, |
|
"eval_steps_per_second": 5.191, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 1.6970280408859253, |
|
"learning_rate": 1.5111111111111112e-05, |
|
"loss": 0.0546, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_loss": 0.022140460088849068, |
|
"eval_runtime": 192.8968, |
|
"eval_samples_per_second": 5.184, |
|
"eval_steps_per_second": 5.184, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.013943832367658615, |
|
"learning_rate": 1.4666666666666666e-05, |
|
"loss": 0.069, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"eval_loss": 0.020227737724781036, |
|
"eval_runtime": 193.1829, |
|
"eval_samples_per_second": 5.176, |
|
"eval_steps_per_second": 5.176, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 2.067197322845459, |
|
"learning_rate": 1.4222222222222224e-05, |
|
"loss": 0.0509, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"eval_loss": 0.01612325944006443, |
|
"eval_runtime": 192.6314, |
|
"eval_samples_per_second": 5.191, |
|
"eval_steps_per_second": 5.191, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 2.2480263710021973, |
|
"learning_rate": 1.377777777777778e-05, |
|
"loss": 0.0495, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"eval_loss": 0.01796303130686283, |
|
"eval_runtime": 192.3154, |
|
"eval_samples_per_second": 5.2, |
|
"eval_steps_per_second": 5.2, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.0029044542461633682, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.0444, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"eval_loss": 0.02335376851260662, |
|
"eval_runtime": 192.3608, |
|
"eval_samples_per_second": 5.199, |
|
"eval_steps_per_second": 5.199, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 1.1315308809280396, |
|
"learning_rate": 1.288888888888889e-05, |
|
"loss": 0.0523, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_loss": 0.019370460882782936, |
|
"eval_runtime": 192.4625, |
|
"eval_samples_per_second": 5.196, |
|
"eval_steps_per_second": 5.196, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.0026583941653370857, |
|
"learning_rate": 1.2444444444444446e-05, |
|
"loss": 0.0553, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"eval_loss": 0.01725778914988041, |
|
"eval_runtime": 192.3261, |
|
"eval_samples_per_second": 5.2, |
|
"eval_steps_per_second": 5.2, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.004530389327555895, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.0538, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_loss": 0.022032534703612328, |
|
"eval_runtime": 192.3926, |
|
"eval_samples_per_second": 5.198, |
|
"eval_steps_per_second": 5.198, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.0024464745074510574, |
|
"learning_rate": 1.1555555555555556e-05, |
|
"loss": 0.0456, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_loss": 0.030184298753738403, |
|
"eval_runtime": 192.2035, |
|
"eval_samples_per_second": 5.203, |
|
"eval_steps_per_second": 5.203, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.0019896693993359804, |
|
"learning_rate": 1.1111111111111113e-05, |
|
"loss": 0.0499, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_loss": 0.01628696359694004, |
|
"eval_runtime": 192.4383, |
|
"eval_samples_per_second": 5.196, |
|
"eval_steps_per_second": 5.196, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.001884658238850534, |
|
"learning_rate": 1.0666666666666667e-05, |
|
"loss": 0.0493, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"eval_loss": 0.025292817503213882, |
|
"eval_runtime": 192.2059, |
|
"eval_samples_per_second": 5.203, |
|
"eval_steps_per_second": 5.203, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.04386008903384209, |
|
"learning_rate": 1.0222222222222223e-05, |
|
"loss": 0.0452, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"eval_loss": 0.022591974586248398, |
|
"eval_runtime": 192.8163, |
|
"eval_samples_per_second": 5.186, |
|
"eval_steps_per_second": 5.186, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.0019288246985524893, |
|
"learning_rate": 9.777777777777779e-06, |
|
"loss": 0.0438, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"eval_loss": 0.0226399265229702, |
|
"eval_runtime": 192.6394, |
|
"eval_samples_per_second": 5.191, |
|
"eval_steps_per_second": 5.191, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.056311335414648056, |
|
"learning_rate": 9.333333333333334e-06, |
|
"loss": 0.0579, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"eval_loss": 0.02137078531086445, |
|
"eval_runtime": 192.3652, |
|
"eval_samples_per_second": 5.198, |
|
"eval_steps_per_second": 5.198, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.003287563333287835, |
|
"learning_rate": 8.888888888888888e-06, |
|
"loss": 0.036, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"eval_loss": 0.015787875279784203, |
|
"eval_runtime": 192.7182, |
|
"eval_samples_per_second": 5.189, |
|
"eval_steps_per_second": 5.189, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.0015150770777836442, |
|
"learning_rate": 8.444444444444446e-06, |
|
"loss": 0.055, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"eval_loss": 0.020349696278572083, |
|
"eval_runtime": 192.752, |
|
"eval_samples_per_second": 5.188, |
|
"eval_steps_per_second": 5.188, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.8284673690795898, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.0424, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.011587778106331825, |
|
"eval_runtime": 192.4082, |
|
"eval_samples_per_second": 5.197, |
|
"eval_steps_per_second": 5.197, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"grad_norm": 0.00324226007796824, |
|
"learning_rate": 7.555555555555556e-06, |
|
"loss": 0.0232, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"eval_loss": 0.02541309781372547, |
|
"eval_runtime": 192.4753, |
|
"eval_samples_per_second": 5.195, |
|
"eval_steps_per_second": 5.195, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"grad_norm": 0.0018368299352005124, |
|
"learning_rate": 7.111111111111112e-06, |
|
"loss": 0.0391, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"eval_loss": 0.019817600026726723, |
|
"eval_runtime": 192.2908, |
|
"eval_samples_per_second": 5.2, |
|
"eval_steps_per_second": 5.2, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"grad_norm": 0.001375267980620265, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.029, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"eval_loss": 0.014760646037757397, |
|
"eval_runtime": 192.1713, |
|
"eval_samples_per_second": 5.204, |
|
"eval_steps_per_second": 5.204, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"grad_norm": 1.2048271894454956, |
|
"learning_rate": 6.222222222222223e-06, |
|
"loss": 0.028, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"eval_loss": 0.024096647277474403, |
|
"eval_runtime": 191.7597, |
|
"eval_samples_per_second": 5.215, |
|
"eval_steps_per_second": 5.215, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"grad_norm": 1.7010436058044434, |
|
"learning_rate": 5.777777777777778e-06, |
|
"loss": 0.033, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"eval_loss": 0.024101875722408295, |
|
"eval_runtime": 191.6566, |
|
"eval_samples_per_second": 5.218, |
|
"eval_steps_per_second": 5.218, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"grad_norm": 0.4044632613658905, |
|
"learning_rate": 5.333333333333334e-06, |
|
"loss": 0.0411, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"eval_loss": 0.020846880972385406, |
|
"eval_runtime": 192.1837, |
|
"eval_samples_per_second": 5.203, |
|
"eval_steps_per_second": 5.203, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"grad_norm": 0.47499576210975647, |
|
"learning_rate": 4.888888888888889e-06, |
|
"loss": 0.0233, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"eval_loss": 0.010989435017108917, |
|
"eval_runtime": 192.4351, |
|
"eval_samples_per_second": 5.197, |
|
"eval_steps_per_second": 5.197, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 0.017001571133732796, |
|
"learning_rate": 4.444444444444444e-06, |
|
"loss": 0.0204, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"eval_loss": 0.02116994932293892, |
|
"eval_runtime": 192.8643, |
|
"eval_samples_per_second": 5.185, |
|
"eval_steps_per_second": 5.185, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"grad_norm": 0.0578785166144371, |
|
"learning_rate": 4.004444444444445e-06, |
|
"loss": 0.0378, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"eval_loss": 0.018333839252591133, |
|
"eval_runtime": 192.9576, |
|
"eval_samples_per_second": 5.182, |
|
"eval_steps_per_second": 5.182, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"grad_norm": 0.0014218598371371627, |
|
"learning_rate": 3.5600000000000002e-06, |
|
"loss": 0.0289, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"eval_loss": 0.02419031597673893, |
|
"eval_runtime": 192.6293, |
|
"eval_samples_per_second": 5.191, |
|
"eval_steps_per_second": 5.191, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"grad_norm": 0.0013137555215507746, |
|
"learning_rate": 3.1155555555555555e-06, |
|
"loss": 0.0298, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"eval_loss": 0.02638879045844078, |
|
"eval_runtime": 192.8273, |
|
"eval_samples_per_second": 5.186, |
|
"eval_steps_per_second": 5.186, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"grad_norm": 0.35259732604026794, |
|
"learning_rate": 2.6711111111111116e-06, |
|
"loss": 0.0382, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"eval_loss": 0.01607164740562439, |
|
"eval_runtime": 192.7408, |
|
"eval_samples_per_second": 5.188, |
|
"eval_steps_per_second": 5.188, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"grad_norm": 0.0020605421159416437, |
|
"learning_rate": 2.226666666666667e-06, |
|
"loss": 0.0339, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"eval_loss": 0.014907135628163815, |
|
"eval_runtime": 192.8289, |
|
"eval_samples_per_second": 5.186, |
|
"eval_steps_per_second": 5.186, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"grad_norm": 0.0019016048172488809, |
|
"learning_rate": 1.7822222222222225e-06, |
|
"loss": 0.0195, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"eval_loss": 0.015925556421279907, |
|
"eval_runtime": 192.4215, |
|
"eval_samples_per_second": 5.197, |
|
"eval_steps_per_second": 5.197, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"grad_norm": 0.734219491481781, |
|
"learning_rate": 1.337777777777778e-06, |
|
"loss": 0.0328, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"eval_loss": 0.014890914782881737, |
|
"eval_runtime": 192.4767, |
|
"eval_samples_per_second": 5.195, |
|
"eval_steps_per_second": 5.195, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 2.157243251800537, |
|
"learning_rate": 8.933333333333334e-07, |
|
"loss": 0.0429, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"eval_loss": 0.01486047450453043, |
|
"eval_runtime": 191.7644, |
|
"eval_samples_per_second": 5.215, |
|
"eval_steps_per_second": 5.215, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"grad_norm": 0.00161929486785084, |
|
"learning_rate": 4.488888888888889e-07, |
|
"loss": 0.0312, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"eval_loss": 0.02127786912024021, |
|
"eval_runtime": 191.9957, |
|
"eval_samples_per_second": 5.208, |
|
"eval_steps_per_second": 5.208, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"grad_norm": 0.001571273198351264, |
|
"learning_rate": 4.444444444444445e-09, |
|
"loss": 0.0364, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"eval_loss": 0.01901436597108841, |
|
"eval_runtime": 191.5985, |
|
"eval_samples_per_second": 5.219, |
|
"eval_steps_per_second": 5.219, |
|
"step": 5000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 5000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 1000, |
|
"total_flos": 8.051062996992e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|