|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9991537376586743, |
|
"eval_steps": 500, |
|
"global_step": 1329, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.022566995768688293, |
|
"grad_norm": 1.1096520316172054, |
|
"learning_rate": 5e-06, |
|
"loss": 0.744, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.045133991537376586, |
|
"grad_norm": 0.9122978446333387, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6807, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06770098730606489, |
|
"grad_norm": 0.6346075490211303, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6632, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09026798307475317, |
|
"grad_norm": 0.644158557259213, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6406, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.11283497884344147, |
|
"grad_norm": 0.7926751486847664, |
|
"learning_rate": 5e-06, |
|
"loss": 0.634, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13540197461212977, |
|
"grad_norm": 0.6355148794198717, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6334, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.15796897038081806, |
|
"grad_norm": 0.7184290150899074, |
|
"learning_rate": 5e-06, |
|
"loss": 0.629, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.18053596614950634, |
|
"grad_norm": 0.8485255099309241, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6308, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.20310296191819463, |
|
"grad_norm": 0.907415069912097, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6284, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.22566995768688294, |
|
"grad_norm": 0.6791580454456693, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6275, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.24823695345557123, |
|
"grad_norm": 0.5868952135190455, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6181, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.27080394922425954, |
|
"grad_norm": 0.7185283136864035, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6174, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.2933709449929478, |
|
"grad_norm": 0.6793470563217162, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6187, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.3159379407616361, |
|
"grad_norm": 0.6866370437838811, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6136, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.3385049365303244, |
|
"grad_norm": 0.7624542009212121, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6139, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3610719322990127, |
|
"grad_norm": 0.5670049736454741, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6158, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.383638928067701, |
|
"grad_norm": 0.674515645964626, |
|
"learning_rate": 5e-06, |
|
"loss": 0.612, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.40620592383638926, |
|
"grad_norm": 0.6395111816709952, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6087, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.4287729196050776, |
|
"grad_norm": 0.7228729780890316, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6034, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.4513399153737659, |
|
"grad_norm": 0.6359367979025581, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6099, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.47390691114245415, |
|
"grad_norm": 0.5529928726958753, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6063, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.49647390691114246, |
|
"grad_norm": 0.7203121657077105, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5981, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.5190409026798307, |
|
"grad_norm": 0.5884566138797194, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6069, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.5416078984485191, |
|
"grad_norm": 0.5680258676280858, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6057, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.5641748942172073, |
|
"grad_norm": 0.5976334720348693, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5999, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5867418899858956, |
|
"grad_norm": 0.7322668494196893, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6026, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.609308885754584, |
|
"grad_norm": 0.5581559480474937, |
|
"learning_rate": 5e-06, |
|
"loss": 0.598, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.6318758815232722, |
|
"grad_norm": 0.5458883179876004, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6045, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.6544428772919605, |
|
"grad_norm": 0.6056045571233394, |
|
"learning_rate": 5e-06, |
|
"loss": 0.598, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.6770098730606487, |
|
"grad_norm": 0.6222844032859247, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6041, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6995768688293371, |
|
"grad_norm": 0.5534363061428031, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5973, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.7221438645980254, |
|
"grad_norm": 0.6988575619337959, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6006, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.7447108603667136, |
|
"grad_norm": 0.5794083927351209, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5963, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.767277856135402, |
|
"grad_norm": 0.6122372451743079, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5957, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.7898448519040903, |
|
"grad_norm": 0.7493879285669082, |
|
"learning_rate": 5e-06, |
|
"loss": 0.597, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.8124118476727785, |
|
"grad_norm": 0.5603389574893285, |
|
"learning_rate": 5e-06, |
|
"loss": 0.596, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.8349788434414669, |
|
"grad_norm": 0.5972337008615317, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6031, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.8575458392101551, |
|
"grad_norm": 0.6488471089146787, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5999, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.8801128349788434, |
|
"grad_norm": 0.5813626386687312, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5938, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.9026798307475318, |
|
"grad_norm": 0.5316271715903418, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5961, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.92524682651622, |
|
"grad_norm": 0.6048061064813547, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5941, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.9478138222849083, |
|
"grad_norm": 0.5936532217112329, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5947, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.9703808180535967, |
|
"grad_norm": 0.574531708859424, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5888, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.9929478138222849, |
|
"grad_norm": 0.6711323560467252, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5936, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.9997179125528914, |
|
"eval_loss": 0.5913681387901306, |
|
"eval_runtime": 697.6116, |
|
"eval_samples_per_second": 17.117, |
|
"eval_steps_per_second": 0.536, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 1.0155148095909732, |
|
"grad_norm": 0.7268447078847582, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6006, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.0380818053596614, |
|
"grad_norm": 0.5732383250485011, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5274, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.0606488011283497, |
|
"grad_norm": 0.6051041162305182, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5421, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.0832157968970382, |
|
"grad_norm": 0.5731582423193028, |
|
"learning_rate": 5e-06, |
|
"loss": 0.53, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.1057827926657264, |
|
"grad_norm": 0.6128630592519895, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5401, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.1283497884344147, |
|
"grad_norm": 0.5904141940083983, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5387, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.150916784203103, |
|
"grad_norm": 0.7022917090539815, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5399, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.1734837799717912, |
|
"grad_norm": 0.5731738347454584, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5324, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.1960507757404795, |
|
"grad_norm": 0.7246031496920289, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5387, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.2186177715091677, |
|
"grad_norm": 0.5696614848674469, |
|
"learning_rate": 5e-06, |
|
"loss": 0.536, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.2411847672778562, |
|
"grad_norm": 0.6542521164361487, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5408, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.2637517630465445, |
|
"grad_norm": 0.6706655302808069, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5427, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.2863187588152327, |
|
"grad_norm": 0.5713802367676806, |
|
"learning_rate": 5e-06, |
|
"loss": 0.54, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.308885754583921, |
|
"grad_norm": 0.5720250086292773, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5398, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.3314527503526092, |
|
"grad_norm": 0.5519092322076451, |
|
"learning_rate": 5e-06, |
|
"loss": 0.54, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.3540197461212977, |
|
"grad_norm": 0.6061004913438247, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5453, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.376586741889986, |
|
"grad_norm": 0.54008499279403, |
|
"learning_rate": 5e-06, |
|
"loss": 0.538, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.3991537376586742, |
|
"grad_norm": 0.5739979207854969, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5422, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.4217207334273625, |
|
"grad_norm": 0.5474321427386326, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5404, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.4442877291960508, |
|
"grad_norm": 0.6486717244236897, |
|
"learning_rate": 5e-06, |
|
"loss": 0.54, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.466854724964739, |
|
"grad_norm": 0.5674328769717695, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5409, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.4894217207334273, |
|
"grad_norm": 0.6437778353374325, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5447, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.5119887165021155, |
|
"grad_norm": 0.5740743217989274, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5393, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.5345557122708038, |
|
"grad_norm": 0.5530367304846456, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5336, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.5571227080394923, |
|
"grad_norm": 0.5896556944460669, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5357, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.5796897038081805, |
|
"grad_norm": 0.5702119466199734, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5399, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.6022566995768688, |
|
"grad_norm": 0.5457633328629294, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5427, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.6248236953455573, |
|
"grad_norm": 0.5813985653049328, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5507, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.6473906911142455, |
|
"grad_norm": 0.5773592052172303, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5339, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.6699576868829338, |
|
"grad_norm": 0.7024422394795109, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5498, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.692524682651622, |
|
"grad_norm": 0.6198371019281352, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5412, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.7150916784203103, |
|
"grad_norm": 0.6206766376942175, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5402, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.7376586741889986, |
|
"grad_norm": 0.8876798469818571, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5414, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.7602256699576868, |
|
"grad_norm": 0.6454735529692549, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5367, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.782792665726375, |
|
"grad_norm": 0.5640065520091834, |
|
"learning_rate": 5e-06, |
|
"loss": 0.54, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.8053596614950633, |
|
"grad_norm": 0.6437842162195886, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5351, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.8279266572637518, |
|
"grad_norm": 0.5364569494187976, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5458, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.85049365303244, |
|
"grad_norm": 0.6372467429532644, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5332, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.8730606488011283, |
|
"grad_norm": 0.5667642300890294, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5453, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.8956276445698168, |
|
"grad_norm": 0.6292324542663285, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5454, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.918194640338505, |
|
"grad_norm": 0.5359738824194854, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5334, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.9407616361071933, |
|
"grad_norm": 0.6110789865963517, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5384, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.9633286318758816, |
|
"grad_norm": 0.6818169983817923, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5449, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.9858956276445698, |
|
"grad_norm": 0.607679292904791, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5431, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.9994358251057829, |
|
"eval_loss": 0.587314784526825, |
|
"eval_runtime": 695.4072, |
|
"eval_samples_per_second": 17.171, |
|
"eval_steps_per_second": 0.538, |
|
"step": 886 |
|
}, |
|
{ |
|
"epoch": 2.008462623413258, |
|
"grad_norm": 1.0551195477645454, |
|
"learning_rate": 5e-06, |
|
"loss": 0.567, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.0310296191819464, |
|
"grad_norm": 0.6627990985792488, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4782, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.0535966149506346, |
|
"grad_norm": 0.6383666257677532, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4798, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.076163610719323, |
|
"grad_norm": 0.6389368523448007, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4721, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.098730606488011, |
|
"grad_norm": 0.5722750118689505, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4792, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.1212976022566994, |
|
"grad_norm": 0.6088961879653472, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4833, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 2.143864598025388, |
|
"grad_norm": 0.6394884138596829, |
|
"learning_rate": 5e-06, |
|
"loss": 0.485, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.1664315937940763, |
|
"grad_norm": 0.6101358628155096, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4811, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.1889985895627646, |
|
"grad_norm": 0.7097822402886351, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4792, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 2.211565585331453, |
|
"grad_norm": 0.6513094055573179, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4741, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 2.234132581100141, |
|
"grad_norm": 0.6185979196999992, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4741, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.2566995768688294, |
|
"grad_norm": 0.5898104504895565, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4829, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.2792665726375176, |
|
"grad_norm": 0.6044370773906275, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4892, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 2.301833568406206, |
|
"grad_norm": 0.7060015814845905, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4835, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 2.324400564174894, |
|
"grad_norm": 0.6277702807897904, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4877, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 2.3469675599435824, |
|
"grad_norm": 0.7014468153576923, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4857, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 2.3695345557122707, |
|
"grad_norm": 0.5979537329375223, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4836, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.392101551480959, |
|
"grad_norm": 0.5724278822524301, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4827, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 2.414668547249647, |
|
"grad_norm": 0.6094804305902596, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4839, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 2.4372355430183354, |
|
"grad_norm": 0.6584799733332577, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4872, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.459802538787024, |
|
"grad_norm": 0.577947147177415, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4853, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 2.4823695345557124, |
|
"grad_norm": 0.5998433634691931, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4884, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.5049365303244007, |
|
"grad_norm": 0.5965762031241623, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4918, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 2.527503526093089, |
|
"grad_norm": 0.6554403444343672, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4825, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 2.550070521861777, |
|
"grad_norm": 0.574534309523368, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4875, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 2.5726375176304654, |
|
"grad_norm": 0.5681388329823351, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4857, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 2.5952045133991537, |
|
"grad_norm": 0.7384496157662809, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4866, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 2.617771509167842, |
|
"grad_norm": 0.6076094430353052, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4902, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 2.64033850493653, |
|
"grad_norm": 0.576931484268167, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4792, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 2.6629055007052185, |
|
"grad_norm": 0.6130340243205236, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4902, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 2.685472496473907, |
|
"grad_norm": 0.5574302117676829, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4908, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 2.7080394922425954, |
|
"grad_norm": 0.6775339732075475, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4894, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.7306064880112837, |
|
"grad_norm": 0.5758452111087213, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4907, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 2.753173483779972, |
|
"grad_norm": 0.5991383407736889, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4874, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 2.77574047954866, |
|
"grad_norm": 0.6337617318240264, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4939, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 2.7983074753173485, |
|
"grad_norm": 0.6226058340602839, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4921, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 2.8208744710860367, |
|
"grad_norm": 0.5635539209906445, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4923, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.843441466854725, |
|
"grad_norm": 0.5836376386084916, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4942, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 2.8660084626234132, |
|
"grad_norm": 0.5834760483019259, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4912, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 2.8885754583921015, |
|
"grad_norm": 0.5453775847159935, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4891, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 2.9111424541607898, |
|
"grad_norm": 0.5928177413547667, |
|
"learning_rate": 5e-06, |
|
"loss": 0.493, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 2.933709449929478, |
|
"grad_norm": 0.579440975363755, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4886, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.9562764456981663, |
|
"grad_norm": 0.6369568326270619, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4925, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 2.9788434414668545, |
|
"grad_norm": 0.635755019164249, |
|
"learning_rate": 5e-06, |
|
"loss": 0.489, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 2.9991537376586743, |
|
"eval_loss": 0.6002675294876099, |
|
"eval_runtime": 696.4593, |
|
"eval_samples_per_second": 17.145, |
|
"eval_steps_per_second": 0.537, |
|
"step": 1329 |
|
}, |
|
{ |
|
"epoch": 2.9991537376586743, |
|
"step": 1329, |
|
"total_flos": 5064195066298368.0, |
|
"train_loss": 0.5472425596439542, |
|
"train_runtime": 121923.5756, |
|
"train_samples_per_second": 5.582, |
|
"train_steps_per_second": 0.011 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1329, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5064195066298368.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|