|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.5380710659898478, |
|
"eval_steps": 500, |
|
"global_step": 1000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.025380710659898477, |
|
"grad_norm": 2.13912296295166, |
|
"learning_rate": 4.957698815566836e-05, |
|
"loss": 0.4596, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.050761421319796954, |
|
"grad_norm": 1.5265041589736938, |
|
"learning_rate": 4.9153976311336716e-05, |
|
"loss": 0.5119, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07614213197969544, |
|
"grad_norm": 3.402026891708374, |
|
"learning_rate": 4.873096446700508e-05, |
|
"loss": 0.5408, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.10152284263959391, |
|
"grad_norm": 1.8265776634216309, |
|
"learning_rate": 4.8307952622673436e-05, |
|
"loss": 0.5605, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12690355329949238, |
|
"grad_norm": 1.4988059997558594, |
|
"learning_rate": 4.7884940778341796e-05, |
|
"loss": 0.5919, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.15228426395939088, |
|
"grad_norm": 1.6790786981582642, |
|
"learning_rate": 4.746192893401015e-05, |
|
"loss": 0.5605, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.17766497461928935, |
|
"grad_norm": 2.108412742614746, |
|
"learning_rate": 4.7038917089678517e-05, |
|
"loss": 0.5024, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.20304568527918782, |
|
"grad_norm": 1.2667797803878784, |
|
"learning_rate": 4.661590524534687e-05, |
|
"loss": 0.6062, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.22842639593908629, |
|
"grad_norm": 1.2963333129882812, |
|
"learning_rate": 4.619289340101523e-05, |
|
"loss": 0.5623, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.25380710659898476, |
|
"grad_norm": 4.575770378112793, |
|
"learning_rate": 4.576988155668359e-05, |
|
"loss": 0.5304, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.27918781725888325, |
|
"grad_norm": 1.6193996667861938, |
|
"learning_rate": 4.534686971235195e-05, |
|
"loss": 0.5276, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.30456852791878175, |
|
"grad_norm": 1.5291751623153687, |
|
"learning_rate": 4.492385786802031e-05, |
|
"loss": 0.4513, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.3299492385786802, |
|
"grad_norm": 2.916069984436035, |
|
"learning_rate": 4.4500846023688664e-05, |
|
"loss": 0.4674, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.3553299492385787, |
|
"grad_norm": 2.216944456100464, |
|
"learning_rate": 4.4077834179357024e-05, |
|
"loss": 0.455, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.38071065989847713, |
|
"grad_norm": 2.572436571121216, |
|
"learning_rate": 4.365482233502538e-05, |
|
"loss": 0.5409, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.40609137055837563, |
|
"grad_norm": 2.5171351432800293, |
|
"learning_rate": 4.3231810490693744e-05, |
|
"loss": 0.5328, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.43147208121827413, |
|
"grad_norm": 2.056858777999878, |
|
"learning_rate": 4.2808798646362104e-05, |
|
"loss": 0.515, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.45685279187817257, |
|
"grad_norm": 1.575071930885315, |
|
"learning_rate": 4.238578680203046e-05, |
|
"loss": 0.5201, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.48223350253807107, |
|
"grad_norm": 3.0955426692962646, |
|
"learning_rate": 4.196277495769882e-05, |
|
"loss": 0.4181, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.5076142131979695, |
|
"grad_norm": 2.798530340194702, |
|
"learning_rate": 4.153976311336718e-05, |
|
"loss": 0.3292, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.5329949238578681, |
|
"grad_norm": 2.3137614727020264, |
|
"learning_rate": 4.111675126903554e-05, |
|
"loss": 0.6242, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.5583756345177665, |
|
"grad_norm": 1.7211440801620483, |
|
"learning_rate": 4.069373942470389e-05, |
|
"loss": 0.5459, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.583756345177665, |
|
"grad_norm": 1.9022053480148315, |
|
"learning_rate": 4.027072758037225e-05, |
|
"loss": 0.4379, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.6091370558375635, |
|
"grad_norm": 1.4369803667068481, |
|
"learning_rate": 3.9847715736040605e-05, |
|
"loss": 0.5092, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.6345177664974619, |
|
"grad_norm": 1.8029989004135132, |
|
"learning_rate": 3.942470389170897e-05, |
|
"loss": 0.5678, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.6598984771573604, |
|
"grad_norm": 2.690417528152466, |
|
"learning_rate": 3.900169204737733e-05, |
|
"loss": 0.4386, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.6852791878172588, |
|
"grad_norm": 3.036839485168457, |
|
"learning_rate": 3.8578680203045685e-05, |
|
"loss": 0.5936, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.7106598984771574, |
|
"grad_norm": 1.6595145463943481, |
|
"learning_rate": 3.8155668358714046e-05, |
|
"loss": 0.5406, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.7360406091370558, |
|
"grad_norm": 4.433804035186768, |
|
"learning_rate": 3.7732656514382406e-05, |
|
"loss": 0.4874, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.7614213197969543, |
|
"grad_norm": 2.043555498123169, |
|
"learning_rate": 3.7309644670050766e-05, |
|
"loss": 0.4603, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.7868020304568528, |
|
"grad_norm": 2.553321123123169, |
|
"learning_rate": 3.688663282571912e-05, |
|
"loss": 0.5858, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.8121827411167513, |
|
"grad_norm": 2.677241563796997, |
|
"learning_rate": 3.646362098138748e-05, |
|
"loss": 0.4095, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.8375634517766497, |
|
"grad_norm": 3.408170461654663, |
|
"learning_rate": 3.604060913705584e-05, |
|
"loss": 0.6228, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.8629441624365483, |
|
"grad_norm": 3.3228750228881836, |
|
"learning_rate": 3.56175972927242e-05, |
|
"loss": 0.4735, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.8883248730964467, |
|
"grad_norm": 4.007043838500977, |
|
"learning_rate": 3.519458544839256e-05, |
|
"loss": 0.51, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.9137055837563451, |
|
"grad_norm": 1.5816287994384766, |
|
"learning_rate": 3.477157360406091e-05, |
|
"loss": 0.5176, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.9390862944162437, |
|
"grad_norm": 1.4616796970367432, |
|
"learning_rate": 3.434856175972927e-05, |
|
"loss": 0.286, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.9644670050761421, |
|
"grad_norm": 4.507604122161865, |
|
"learning_rate": 3.3925549915397633e-05, |
|
"loss": 0.6015, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.9898477157360406, |
|
"grad_norm": 1.7000436782836914, |
|
"learning_rate": 3.3502538071065994e-05, |
|
"loss": 0.3593, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.512408971786499, |
|
"eval_runtime": 8.5129, |
|
"eval_samples_per_second": 92.565, |
|
"eval_steps_per_second": 11.629, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 1.015228426395939, |
|
"grad_norm": 3.039973497390747, |
|
"learning_rate": 3.307952622673435e-05, |
|
"loss": 0.5087, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.0406091370558375, |
|
"grad_norm": 2.5285613536834717, |
|
"learning_rate": 3.265651438240271e-05, |
|
"loss": 0.4335, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.0659898477157361, |
|
"grad_norm": 1.5140420198440552, |
|
"learning_rate": 3.223350253807107e-05, |
|
"loss": 0.3759, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.0913705583756346, |
|
"grad_norm": 2.0796151161193848, |
|
"learning_rate": 3.181049069373943e-05, |
|
"loss": 0.6431, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.116751269035533, |
|
"grad_norm": 3.370028018951416, |
|
"learning_rate": 3.138747884940779e-05, |
|
"loss": 0.4801, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.1421319796954315, |
|
"grad_norm": 7.150363445281982, |
|
"learning_rate": 3.096446700507614e-05, |
|
"loss": 0.4856, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.16751269035533, |
|
"grad_norm": 4.6761860847473145, |
|
"learning_rate": 3.05414551607445e-05, |
|
"loss": 0.5973, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.1928934010152283, |
|
"grad_norm": 1.6876248121261597, |
|
"learning_rate": 3.0118443316412858e-05, |
|
"loss": 0.4265, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.218274111675127, |
|
"grad_norm": 2.2751359939575195, |
|
"learning_rate": 2.969543147208122e-05, |
|
"loss": 0.4307, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.2436548223350254, |
|
"grad_norm": 4.351472854614258, |
|
"learning_rate": 2.927241962774958e-05, |
|
"loss": 0.4978, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.2690355329949239, |
|
"grad_norm": 3.3015713691711426, |
|
"learning_rate": 2.8849407783417938e-05, |
|
"loss": 0.4159, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.2944162436548223, |
|
"grad_norm": 1.4904918670654297, |
|
"learning_rate": 2.84263959390863e-05, |
|
"loss": 0.512, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.3197969543147208, |
|
"grad_norm": 2.306755542755127, |
|
"learning_rate": 2.800338409475465e-05, |
|
"loss": 0.5745, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.3451776649746192, |
|
"grad_norm": 3.0485565662384033, |
|
"learning_rate": 2.7580372250423015e-05, |
|
"loss": 0.4243, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.3705583756345177, |
|
"grad_norm": 5.009148597717285, |
|
"learning_rate": 2.715736040609137e-05, |
|
"loss": 0.5187, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.3959390862944163, |
|
"grad_norm": 3.1525375843048096, |
|
"learning_rate": 2.6734348561759732e-05, |
|
"loss": 0.4693, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.4213197969543148, |
|
"grad_norm": 2.2495410442352295, |
|
"learning_rate": 2.6311336717428085e-05, |
|
"loss": 0.5011, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.4467005076142132, |
|
"grad_norm": 2.390629291534424, |
|
"learning_rate": 2.588832487309645e-05, |
|
"loss": 0.6348, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.4720812182741116, |
|
"grad_norm": 1.7083393335342407, |
|
"learning_rate": 2.546531302876481e-05, |
|
"loss": 0.4737, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.49746192893401, |
|
"grad_norm": 2.4492344856262207, |
|
"learning_rate": 2.5042301184433166e-05, |
|
"loss": 0.5164, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.5228426395939088, |
|
"grad_norm": 8.137948989868164, |
|
"learning_rate": 2.4619289340101523e-05, |
|
"loss": 0.4653, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.548223350253807, |
|
"grad_norm": 2.3943252563476562, |
|
"learning_rate": 2.4196277495769883e-05, |
|
"loss": 0.4019, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.5736040609137056, |
|
"grad_norm": 1.8179367780685425, |
|
"learning_rate": 2.3773265651438243e-05, |
|
"loss": 0.3428, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.598984771573604, |
|
"grad_norm": 2.3452229499816895, |
|
"learning_rate": 2.33502538071066e-05, |
|
"loss": 0.5092, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.6243654822335025, |
|
"grad_norm": 1.6915607452392578, |
|
"learning_rate": 2.292724196277496e-05, |
|
"loss": 0.5964, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.649746192893401, |
|
"grad_norm": 3.0646092891693115, |
|
"learning_rate": 2.2504230118443317e-05, |
|
"loss": 0.4237, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.6751269035532994, |
|
"grad_norm": 2.2048497200012207, |
|
"learning_rate": 2.2081218274111677e-05, |
|
"loss": 0.3713, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.700507614213198, |
|
"grad_norm": 3.50895094871521, |
|
"learning_rate": 2.1658206429780033e-05, |
|
"loss": 0.4798, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.7258883248730963, |
|
"grad_norm": 2.0043256282806396, |
|
"learning_rate": 2.1235194585448394e-05, |
|
"loss": 0.583, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.751269035532995, |
|
"grad_norm": 1.845831274986267, |
|
"learning_rate": 2.0812182741116754e-05, |
|
"loss": 0.5429, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.7766497461928934, |
|
"grad_norm": 1.7165807485580444, |
|
"learning_rate": 2.038917089678511e-05, |
|
"loss": 0.4464, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.8020304568527918, |
|
"grad_norm": 1.361329436302185, |
|
"learning_rate": 1.996615905245347e-05, |
|
"loss": 0.4246, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.8274111675126905, |
|
"grad_norm": 2.407137870788574, |
|
"learning_rate": 1.9543147208121827e-05, |
|
"loss": 0.4645, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.8527918781725887, |
|
"grad_norm": 2.324209213256836, |
|
"learning_rate": 1.9120135363790187e-05, |
|
"loss": 0.5234, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.8781725888324874, |
|
"grad_norm": 3.296576738357544, |
|
"learning_rate": 1.8697123519458544e-05, |
|
"loss": 0.557, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.9035532994923858, |
|
"grad_norm": 2.2372303009033203, |
|
"learning_rate": 1.8274111675126904e-05, |
|
"loss": 0.4279, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.9289340101522843, |
|
"grad_norm": 4.05513334274292, |
|
"learning_rate": 1.785109983079526e-05, |
|
"loss": 0.4958, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.9543147208121827, |
|
"grad_norm": 1.2267847061157227, |
|
"learning_rate": 1.7428087986463625e-05, |
|
"loss": 0.4711, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.9796954314720812, |
|
"grad_norm": 2.452728033065796, |
|
"learning_rate": 1.700507614213198e-05, |
|
"loss": 0.4696, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.47782018780708313, |
|
"eval_runtime": 8.5194, |
|
"eval_samples_per_second": 92.495, |
|
"eval_steps_per_second": 11.621, |
|
"step": 788 |
|
}, |
|
{ |
|
"epoch": 2.00507614213198, |
|
"grad_norm": 1.9264096021652222, |
|
"learning_rate": 1.658206429780034e-05, |
|
"loss": 0.4061, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.030456852791878, |
|
"grad_norm": 1.578764796257019, |
|
"learning_rate": 1.6159052453468698e-05, |
|
"loss": 0.4168, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.0558375634517767, |
|
"grad_norm": 5.699263095855713, |
|
"learning_rate": 1.5736040609137055e-05, |
|
"loss": 0.4339, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.081218274111675, |
|
"grad_norm": 3.1477041244506836, |
|
"learning_rate": 1.5313028764805415e-05, |
|
"loss": 0.3623, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.1065989847715736, |
|
"grad_norm": 2.8351192474365234, |
|
"learning_rate": 1.4890016920473774e-05, |
|
"loss": 0.446, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.1319796954314723, |
|
"grad_norm": 6.005784511566162, |
|
"learning_rate": 1.4467005076142132e-05, |
|
"loss": 0.3802, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.1573604060913705, |
|
"grad_norm": 3.5085108280181885, |
|
"learning_rate": 1.404399323181049e-05, |
|
"loss": 0.4335, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.182741116751269, |
|
"grad_norm": 2.457703113555908, |
|
"learning_rate": 1.362098138747885e-05, |
|
"loss": 0.3866, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.2081218274111674, |
|
"grad_norm": 6.848071098327637, |
|
"learning_rate": 1.3197969543147209e-05, |
|
"loss": 0.3146, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.233502538071066, |
|
"grad_norm": 2.1162843704223633, |
|
"learning_rate": 1.2774957698815568e-05, |
|
"loss": 0.3747, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.2588832487309647, |
|
"grad_norm": 2.40061354637146, |
|
"learning_rate": 1.2351945854483926e-05, |
|
"loss": 0.5355, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.284263959390863, |
|
"grad_norm": 12.88298225402832, |
|
"learning_rate": 1.1928934010152284e-05, |
|
"loss": 0.3908, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.3096446700507616, |
|
"grad_norm": 6.1054463386535645, |
|
"learning_rate": 1.1505922165820643e-05, |
|
"loss": 0.3276, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.33502538071066, |
|
"grad_norm": 16.432769775390625, |
|
"learning_rate": 1.1082910321489003e-05, |
|
"loss": 0.2436, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.3604060913705585, |
|
"grad_norm": 9.989404678344727, |
|
"learning_rate": 1.0659898477157361e-05, |
|
"loss": 0.5455, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.3857868020304567, |
|
"grad_norm": 3.592336893081665, |
|
"learning_rate": 1.023688663282572e-05, |
|
"loss": 0.467, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 2.4111675126903553, |
|
"grad_norm": 4.606255531311035, |
|
"learning_rate": 9.813874788494078e-06, |
|
"loss": 0.4826, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.436548223350254, |
|
"grad_norm": 2.08891224861145, |
|
"learning_rate": 9.390862944162437e-06, |
|
"loss": 0.2995, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.4619289340101522, |
|
"grad_norm": 4.196601867675781, |
|
"learning_rate": 8.967851099830795e-06, |
|
"loss": 0.5598, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 2.487309644670051, |
|
"grad_norm": 2.653444290161133, |
|
"learning_rate": 8.544839255499154e-06, |
|
"loss": 0.4471, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 2.512690355329949, |
|
"grad_norm": 1.8868201971054077, |
|
"learning_rate": 8.121827411167512e-06, |
|
"loss": 0.4616, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.5380710659898478, |
|
"grad_norm": 5.2379045486450195, |
|
"learning_rate": 7.698815566835872e-06, |
|
"loss": 0.2389, |
|
"step": 1000 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1182, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1059739189248000.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|