|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 200, |
|
"global_step": 2193, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.013679890560875513, |
|
"grad_norm": 4.306338197381437, |
|
"learning_rate": 4.5454545454545457e-07, |
|
"loss": 0.9255, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.027359781121751026, |
|
"grad_norm": 2.40394571117872, |
|
"learning_rate": 9.090909090909091e-07, |
|
"loss": 0.863, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04103967168262654, |
|
"grad_norm": 1.8078902930615164, |
|
"learning_rate": 1.3636363636363636e-06, |
|
"loss": 0.7599, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05471956224350205, |
|
"grad_norm": 1.4220385901238113, |
|
"learning_rate": 1.8181818181818183e-06, |
|
"loss": 0.7184, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.06839945280437756, |
|
"grad_norm": 1.3537605726648354, |
|
"learning_rate": 2.2727272727272728e-06, |
|
"loss": 0.6981, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.08207934336525308, |
|
"grad_norm": 1.2344983469823567, |
|
"learning_rate": 2.7272727272727272e-06, |
|
"loss": 0.6943, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.09575923392612859, |
|
"grad_norm": 1.1119101183337694, |
|
"learning_rate": 3.181818181818182e-06, |
|
"loss": 0.6687, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.1094391244870041, |
|
"grad_norm": 1.2164492298241776, |
|
"learning_rate": 3.6363636363636366e-06, |
|
"loss": 0.6694, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.12311901504787962, |
|
"grad_norm": 1.3446219804481927, |
|
"learning_rate": 4.0909090909090915e-06, |
|
"loss": 0.6602, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.13679890560875513, |
|
"grad_norm": 1.2505229649313154, |
|
"learning_rate": 4.5454545454545455e-06, |
|
"loss": 0.6514, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.15047879616963064, |
|
"grad_norm": 1.1279331262953187, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6413, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.16415868673050615, |
|
"grad_norm": 1.246045877795659, |
|
"learning_rate": 5.4545454545454545e-06, |
|
"loss": 0.6584, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.17783857729138167, |
|
"grad_norm": 1.1785589469428788, |
|
"learning_rate": 5.90909090909091e-06, |
|
"loss": 0.6292, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.19151846785225718, |
|
"grad_norm": 1.219408849788821, |
|
"learning_rate": 6.363636363636364e-06, |
|
"loss": 0.6487, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.2051983584131327, |
|
"grad_norm": 1.1571538455846238, |
|
"learning_rate": 6.818181818181818e-06, |
|
"loss": 0.6294, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.2188782489740082, |
|
"grad_norm": 1.3558018996617476, |
|
"learning_rate": 7.272727272727273e-06, |
|
"loss": 0.6443, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.23255813953488372, |
|
"grad_norm": 1.5617060190415994, |
|
"learning_rate": 7.727272727272727e-06, |
|
"loss": 0.6411, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.24623803009575923, |
|
"grad_norm": 1.235429384148413, |
|
"learning_rate": 8.181818181818183e-06, |
|
"loss": 0.6385, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.25991792065663477, |
|
"grad_norm": 1.2646278028286053, |
|
"learning_rate": 8.636363636363637e-06, |
|
"loss": 0.6466, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.27359781121751026, |
|
"grad_norm": 1.3092856333044969, |
|
"learning_rate": 9.090909090909091e-06, |
|
"loss": 0.6185, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.27359781121751026, |
|
"eval_loss": 0.6430878043174744, |
|
"eval_runtime": 168.6747, |
|
"eval_samples_per_second": 30.817, |
|
"eval_steps_per_second": 3.854, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.2872777017783858, |
|
"grad_norm": 1.1897854638522036, |
|
"learning_rate": 9.545454545454547e-06, |
|
"loss": 0.6235, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.3009575923392613, |
|
"grad_norm": 1.249148897978773, |
|
"learning_rate": 1e-05, |
|
"loss": 0.6305, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.3146374829001368, |
|
"grad_norm": 1.1885808643667615, |
|
"learning_rate": 9.99936616472178e-06, |
|
"loss": 0.6308, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.3283173734610123, |
|
"grad_norm": 1.250733802249208, |
|
"learning_rate": 9.997464819585985e-06, |
|
"loss": 0.6331, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.34199726402188785, |
|
"grad_norm": 1.1199745780436605, |
|
"learning_rate": 9.994296446648463e-06, |
|
"loss": 0.6319, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.35567715458276333, |
|
"grad_norm": 1.1180646470383142, |
|
"learning_rate": 9.989861849199833e-06, |
|
"loss": 0.6368, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.3693570451436389, |
|
"grad_norm": 1.1411785528368101, |
|
"learning_rate": 9.984162151561814e-06, |
|
"loss": 0.628, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.38303693570451436, |
|
"grad_norm": 1.0640206569253472, |
|
"learning_rate": 9.977198798802186e-06, |
|
"loss": 0.6191, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.3967168262653899, |
|
"grad_norm": 1.2307131395813051, |
|
"learning_rate": 9.9689735563684e-06, |
|
"loss": 0.6311, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.4103967168262654, |
|
"grad_norm": 1.1751145352265429, |
|
"learning_rate": 9.959488509639986e-06, |
|
"loss": 0.6204, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.4240766073871409, |
|
"grad_norm": 1.1381135969095364, |
|
"learning_rate": 9.948746063399836e-06, |
|
"loss": 0.6185, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.4377564979480164, |
|
"grad_norm": 1.2008146993612843, |
|
"learning_rate": 9.936748941224514e-06, |
|
"loss": 0.63, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.45143638850889195, |
|
"grad_norm": 1.0906550245462099, |
|
"learning_rate": 9.923500184793728e-06, |
|
"loss": 0.6325, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.46511627906976744, |
|
"grad_norm": 1.2687828199372913, |
|
"learning_rate": 9.909003153119165e-06, |
|
"loss": 0.6202, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.478796169630643, |
|
"grad_norm": 1.2051295685227352, |
|
"learning_rate": 9.893261521692865e-06, |
|
"loss": 0.6157, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.49247606019151846, |
|
"grad_norm": 1.1391781521048607, |
|
"learning_rate": 9.876279281555363e-06, |
|
"loss": 0.6244, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.506155950752394, |
|
"grad_norm": 1.0911712689339368, |
|
"learning_rate": 9.858060738283819e-06, |
|
"loss": 0.6209, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.5198358413132695, |
|
"grad_norm": 1.1210123313032032, |
|
"learning_rate": 9.838610510900412e-06, |
|
"loss": 0.6168, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.533515731874145, |
|
"grad_norm": 1.2189445592306896, |
|
"learning_rate": 9.817933530701258e-06, |
|
"loss": 0.6273, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.5471956224350205, |
|
"grad_norm": 1.0483395986247415, |
|
"learning_rate": 9.79603504000615e-06, |
|
"loss": 0.6088, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.5471956224350205, |
|
"eval_loss": 0.6271325945854187, |
|
"eval_runtime": 167.7353, |
|
"eval_samples_per_second": 30.989, |
|
"eval_steps_per_second": 3.875, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.560875512995896, |
|
"grad_norm": 1.0660522163415433, |
|
"learning_rate": 9.772920590829471e-06, |
|
"loss": 0.6137, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.5745554035567716, |
|
"grad_norm": 1.2363091668613064, |
|
"learning_rate": 9.748596043472547e-06, |
|
"loss": 0.6143, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.5882352941176471, |
|
"grad_norm": 1.0981082723618094, |
|
"learning_rate": 9.723067565037874e-06, |
|
"loss": 0.6068, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.6019151846785226, |
|
"grad_norm": 1.1847772259009521, |
|
"learning_rate": 9.696341627865548e-06, |
|
"loss": 0.6263, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.615595075239398, |
|
"grad_norm": 1.179605990278435, |
|
"learning_rate": 9.668425007892298e-06, |
|
"loss": 0.624, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.6292749658002736, |
|
"grad_norm": 1.1327982969979666, |
|
"learning_rate": 9.639324782933556e-06, |
|
"loss": 0.6115, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.6429548563611491, |
|
"grad_norm": 1.1092926273561299, |
|
"learning_rate": 9.609048330888996e-06, |
|
"loss": 0.6231, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.6566347469220246, |
|
"grad_norm": 1.2023856216186377, |
|
"learning_rate": 9.577603327871983e-06, |
|
"loss": 0.6087, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.6703146374829001, |
|
"grad_norm": 1.140079893396528, |
|
"learning_rate": 9.544997746263409e-06, |
|
"loss": 0.6207, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.6839945280437757, |
|
"grad_norm": 1.0956555350394377, |
|
"learning_rate": 9.511239852690429e-06, |
|
"loss": 0.5912, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.6976744186046512, |
|
"grad_norm": 1.0991687302711715, |
|
"learning_rate": 9.476338205930593e-06, |
|
"loss": 0.6084, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.7113543091655267, |
|
"grad_norm": 1.0969007693966542, |
|
"learning_rate": 9.440301654741889e-06, |
|
"loss": 0.6132, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.7250341997264022, |
|
"grad_norm": 1.1125573118122185, |
|
"learning_rate": 9.403139335619302e-06, |
|
"loss": 0.6194, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.7387140902872777, |
|
"grad_norm": 1.1809078700930535, |
|
"learning_rate": 9.36486067047838e-06, |
|
"loss": 0.5951, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.7523939808481532, |
|
"grad_norm": 1.1864929076204318, |
|
"learning_rate": 9.325475364266474e-06, |
|
"loss": 0.6007, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.7660738714090287, |
|
"grad_norm": 1.26583782519778, |
|
"learning_rate": 9.28499340250219e-06, |
|
"loss": 0.6146, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.7797537619699042, |
|
"grad_norm": 1.076257354571428, |
|
"learning_rate": 9.24342504874373e-06, |
|
"loss": 0.6077, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.7934336525307798, |
|
"grad_norm": 1.0622000039574993, |
|
"learning_rate": 9.200780841986717e-06, |
|
"loss": 0.6001, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.8071135430916553, |
|
"grad_norm": 1.188474169737116, |
|
"learning_rate": 9.157071593992217e-06, |
|
"loss": 0.6289, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.8207934336525308, |
|
"grad_norm": 1.1602582650897655, |
|
"learning_rate": 9.112308386545574e-06, |
|
"loss": 0.6083, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.8207934336525308, |
|
"eval_loss": 0.6138648390769958, |
|
"eval_runtime": 169.037, |
|
"eval_samples_per_second": 30.751, |
|
"eval_steps_per_second": 3.845, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.8344733242134063, |
|
"grad_norm": 1.1422692672202526, |
|
"learning_rate": 9.066502568646805e-06, |
|
"loss": 0.6028, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.8481532147742818, |
|
"grad_norm": 1.1671002032829694, |
|
"learning_rate": 9.019665753633247e-06, |
|
"loss": 0.6025, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.8618331053351573, |
|
"grad_norm": 1.1006027336870072, |
|
"learning_rate": 8.971809816235164e-06, |
|
"loss": 0.6072, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.8755129958960328, |
|
"grad_norm": 1.14419096908491, |
|
"learning_rate": 8.922946889565119e-06, |
|
"loss": 0.6014, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.8891928864569083, |
|
"grad_norm": 1.1499692013955807, |
|
"learning_rate": 8.873089362041797e-06, |
|
"loss": 0.584, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.9028727770177839, |
|
"grad_norm": 1.0137383119426882, |
|
"learning_rate": 8.822249874249131e-06, |
|
"loss": 0.5929, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.9165526675786594, |
|
"grad_norm": 1.0739771517494396, |
|
"learning_rate": 8.770441315731477e-06, |
|
"loss": 0.5954, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.9302325581395349, |
|
"grad_norm": 1.066521535106681, |
|
"learning_rate": 8.717676821725679e-06, |
|
"loss": 0.5876, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.9439124487004104, |
|
"grad_norm": 1.1050537240337106, |
|
"learning_rate": 8.663969769830824e-06, |
|
"loss": 0.6006, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.957592339261286, |
|
"grad_norm": 1.0834708122941283, |
|
"learning_rate": 8.609333776616592e-06, |
|
"loss": 0.5974, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.9712722298221614, |
|
"grad_norm": 1.2256953791044325, |
|
"learning_rate": 8.553782694170963e-06, |
|
"loss": 0.6043, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.9849521203830369, |
|
"grad_norm": 1.0369686902126203, |
|
"learning_rate": 8.497330606588257e-06, |
|
"loss": 0.5779, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.9986320109439124, |
|
"grad_norm": 1.0669076543876443, |
|
"learning_rate": 8.439991826398331e-06, |
|
"loss": 0.6108, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.012311901504788, |
|
"grad_norm": 1.38284058642527, |
|
"learning_rate": 8.38178089093786e-06, |
|
"loss": 0.4662, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.0259917920656634, |
|
"grad_norm": 1.36236398170643, |
|
"learning_rate": 8.322712558664635e-06, |
|
"loss": 0.4296, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.039671682626539, |
|
"grad_norm": 1.2100527044799618, |
|
"learning_rate": 8.262801805415783e-06, |
|
"loss": 0.4295, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.0533515731874146, |
|
"grad_norm": 1.1588309328691597, |
|
"learning_rate": 8.202063820610887e-06, |
|
"loss": 0.4244, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.06703146374829, |
|
"grad_norm": 1.1590821782151508, |
|
"learning_rate": 8.140514003400945e-06, |
|
"loss": 0.4257, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.0807113543091655, |
|
"grad_norm": 1.1515662295268354, |
|
"learning_rate": 8.078167958764162e-06, |
|
"loss": 0.4325, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.094391244870041, |
|
"grad_norm": 1.1972938041742296, |
|
"learning_rate": 8.015041493549562e-06, |
|
"loss": 0.4314, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.094391244870041, |
|
"eval_loss": 0.6188511848449707, |
|
"eval_runtime": 168.4076, |
|
"eval_samples_per_second": 30.866, |
|
"eval_steps_per_second": 3.86, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.1080711354309165, |
|
"grad_norm": 1.2310788704520765, |
|
"learning_rate": 7.951150612469396e-06, |
|
"loss": 0.4335, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.121751025991792, |
|
"grad_norm": 1.2391064990548075, |
|
"learning_rate": 7.886511514041422e-06, |
|
"loss": 0.4271, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.1354309165526675, |
|
"grad_norm": 1.1957485922230724, |
|
"learning_rate": 7.821140586482013e-06, |
|
"loss": 0.4367, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.1491108071135432, |
|
"grad_norm": 1.2204287898575044, |
|
"learning_rate": 7.755054403551191e-06, |
|
"loss": 0.4359, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.1627906976744187, |
|
"grad_norm": 1.2398786083740503, |
|
"learning_rate": 7.688269720350616e-06, |
|
"loss": 0.433, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.1764705882352942, |
|
"grad_norm": 1.1869051225812688, |
|
"learning_rate": 7.620803469075588e-06, |
|
"loss": 0.4387, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.1901504787961696, |
|
"grad_norm": 1.1936581208347872, |
|
"learning_rate": 7.552672754722169e-06, |
|
"loss": 0.4198, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.2038303693570451, |
|
"grad_norm": 1.153245583578447, |
|
"learning_rate": 7.483894850750473e-06, |
|
"loss": 0.4356, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.2175102599179206, |
|
"grad_norm": 1.2121786191778063, |
|
"learning_rate": 7.414487194705258e-06, |
|
"loss": 0.4334, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.231190150478796, |
|
"grad_norm": 1.1865361362733824, |
|
"learning_rate": 7.344467383794917e-06, |
|
"loss": 0.4335, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.2448700410396718, |
|
"grad_norm": 1.2578535517364966, |
|
"learning_rate": 7.2738531704299845e-06, |
|
"loss": 0.4338, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.2585499316005473, |
|
"grad_norm": 1.208342728404754, |
|
"learning_rate": 7.2026624577222855e-06, |
|
"loss": 0.4506, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.2722298221614228, |
|
"grad_norm": 1.2293841459842745, |
|
"learning_rate": 7.1309132949459e-06, |
|
"loss": 0.4356, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.2859097127222983, |
|
"grad_norm": 1.1995897552412818, |
|
"learning_rate": 7.058623872961051e-06, |
|
"loss": 0.4293, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.2995896032831737, |
|
"grad_norm": 1.3990655945335009, |
|
"learning_rate": 6.985812519602094e-06, |
|
"loss": 0.4268, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.3132694938440492, |
|
"grad_norm": 1.2252593246536305, |
|
"learning_rate": 6.912497695030796e-06, |
|
"loss": 0.4286, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.3269493844049247, |
|
"grad_norm": 1.1833071160287538, |
|
"learning_rate": 6.838697987056046e-06, |
|
"loss": 0.4313, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.3406292749658002, |
|
"grad_norm": 1.1637401442465505, |
|
"learning_rate": 6.764432106421223e-06, |
|
"loss": 0.4279, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.3543091655266757, |
|
"grad_norm": 1.2180322469819673, |
|
"learning_rate": 6.68971888206037e-06, |
|
"loss": 0.4305, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.3679890560875512, |
|
"grad_norm": 1.2229086904650939, |
|
"learning_rate": 6.614577256324426e-06, |
|
"loss": 0.4322, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.3679890560875512, |
|
"eval_loss": 0.617301881313324, |
|
"eval_runtime": 168.0881, |
|
"eval_samples_per_second": 30.924, |
|
"eval_steps_per_second": 3.867, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.3816689466484269, |
|
"grad_norm": 1.1947852132614785, |
|
"learning_rate": 6.539026280178695e-06, |
|
"loss": 0.4279, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.3953488372093024, |
|
"grad_norm": 1.247192723450866, |
|
"learning_rate": 6.46308510837277e-06, |
|
"loss": 0.4309, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 1.4090287277701778, |
|
"grad_norm": 1.185576441473233, |
|
"learning_rate": 6.386772994584156e-06, |
|
"loss": 0.4349, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 1.4227086183310533, |
|
"grad_norm": 1.129361008569495, |
|
"learning_rate": 6.310109286536801e-06, |
|
"loss": 0.4232, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 1.4363885088919288, |
|
"grad_norm": 1.1431132187777882, |
|
"learning_rate": 6.233113421095794e-06, |
|
"loss": 0.4355, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.4500683994528043, |
|
"grad_norm": 1.1747059710212222, |
|
"learning_rate": 6.155804919339453e-06, |
|
"loss": 0.4286, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 1.46374829001368, |
|
"grad_norm": 1.151624752794334, |
|
"learning_rate": 6.078203381610064e-06, |
|
"loss": 0.4314, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 1.4774281805745555, |
|
"grad_norm": 1.2141807720724713, |
|
"learning_rate": 6.000328482544532e-06, |
|
"loss": 0.4292, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 1.491108071135431, |
|
"grad_norm": 1.2395901378501935, |
|
"learning_rate": 5.92219996608618e-06, |
|
"loss": 0.4295, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 1.5047879616963065, |
|
"grad_norm": 1.3241796863033826, |
|
"learning_rate": 5.843837640479e-06, |
|
"loss": 0.4399, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.518467852257182, |
|
"grad_norm": 1.1677488632090005, |
|
"learning_rate": 5.765261373245567e-06, |
|
"loss": 0.424, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 1.5321477428180574, |
|
"grad_norm": 1.2084792827131878, |
|
"learning_rate": 5.686491086149965e-06, |
|
"loss": 0.4285, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 1.545827633378933, |
|
"grad_norm": 1.2057361531766408, |
|
"learning_rate": 5.60754675014693e-06, |
|
"loss": 0.4397, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 1.5595075239398084, |
|
"grad_norm": 1.0459853203349079, |
|
"learning_rate": 5.528448380318532e-06, |
|
"loss": 0.4184, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 1.573187414500684, |
|
"grad_norm": 1.1565826426536214, |
|
"learning_rate": 5.449216030799667e-06, |
|
"loss": 0.4199, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.5868673050615594, |
|
"grad_norm": 1.337080485300612, |
|
"learning_rate": 5.369869789693656e-06, |
|
"loss": 0.4392, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 1.6005471956224349, |
|
"grad_norm": 1.2455984858098026, |
|
"learning_rate": 5.290429773979224e-06, |
|
"loss": 0.4337, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 1.6142270861833106, |
|
"grad_norm": 1.218208739067707, |
|
"learning_rate": 5.2109161244101545e-06, |
|
"loss": 0.4448, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 1.627906976744186, |
|
"grad_norm": 1.1285789923935337, |
|
"learning_rate": 5.1313490004089265e-06, |
|
"loss": 0.4286, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 1.6415868673050615, |
|
"grad_norm": 1.2264365996543631, |
|
"learning_rate": 5.05174857495561e-06, |
|
"loss": 0.4368, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.6415868673050615, |
|
"eval_loss": 0.608180046081543, |
|
"eval_runtime": 168.5858, |
|
"eval_samples_per_second": 30.833, |
|
"eval_steps_per_second": 3.856, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.655266757865937, |
|
"grad_norm": 1.243465209947156, |
|
"learning_rate": 4.972135029473333e-06, |
|
"loss": 0.4276, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 1.6689466484268127, |
|
"grad_norm": 1.1806815681626925, |
|
"learning_rate": 4.892528548711594e-06, |
|
"loss": 0.4334, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 1.6826265389876882, |
|
"grad_norm": 1.1380113337270097, |
|
"learning_rate": 4.812949315628746e-06, |
|
"loss": 0.4272, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 1.6963064295485637, |
|
"grad_norm": 1.1677234792880147, |
|
"learning_rate": 4.7334175062749275e-06, |
|
"loss": 0.4164, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 1.7099863201094392, |
|
"grad_norm": 1.1552072694954474, |
|
"learning_rate": 4.653953284676737e-06, |
|
"loss": 0.4415, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 1.7236662106703147, |
|
"grad_norm": 1.2021058595481928, |
|
"learning_rate": 4.574576797724984e-06, |
|
"loss": 0.4161, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 1.7373461012311902, |
|
"grad_norm": 1.3115148564903278, |
|
"learning_rate": 4.495308170066739e-06, |
|
"loss": 0.4326, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 1.7510259917920656, |
|
"grad_norm": 1.2648526427702413, |
|
"learning_rate": 4.4161674990030715e-06, |
|
"loss": 0.4426, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 1.7647058823529411, |
|
"grad_norm": 1.137931826008329, |
|
"learning_rate": 4.3371748493936834e-06, |
|
"loss": 0.4263, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 1.7783857729138166, |
|
"grad_norm": 1.203799366153618, |
|
"learning_rate": 4.2583502485697945e-06, |
|
"loss": 0.422, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.792065663474692, |
|
"grad_norm": 1.1215051415533788, |
|
"learning_rate": 4.179713681256523e-06, |
|
"loss": 0.4278, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 1.8057455540355676, |
|
"grad_norm": 1.1919550892429331, |
|
"learning_rate": 4.101285084506075e-06, |
|
"loss": 0.4126, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 1.819425444596443, |
|
"grad_norm": 1.2602652902830995, |
|
"learning_rate": 4.023084342643026e-06, |
|
"loss": 0.4302, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 1.8331053351573188, |
|
"grad_norm": 1.1856280985422172, |
|
"learning_rate": 3.94513128222297e-06, |
|
"loss": 0.4308, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 1.8467852257181943, |
|
"grad_norm": 1.1188116484677932, |
|
"learning_rate": 3.867445667005801e-06, |
|
"loss": 0.4343, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 1.8604651162790697, |
|
"grad_norm": 1.1588258315903728, |
|
"learning_rate": 3.790047192944931e-06, |
|
"loss": 0.4301, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 1.8741450068399452, |
|
"grad_norm": 1.1459817706332724, |
|
"learning_rate": 3.7129554831936998e-06, |
|
"loss": 0.4175, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 1.887824897400821, |
|
"grad_norm": 1.104555953617468, |
|
"learning_rate": 3.6361900831302264e-06, |
|
"loss": 0.4175, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 1.9015047879616964, |
|
"grad_norm": 1.1942887584277926, |
|
"learning_rate": 3.5597704554019907e-06, |
|
"loss": 0.4211, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 1.915184678522572, |
|
"grad_norm": 1.0668265437929074, |
|
"learning_rate": 3.4837159749913944e-06, |
|
"loss": 0.4195, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.915184678522572, |
|
"eval_loss": 0.6006618142127991, |
|
"eval_runtime": 167.6616, |
|
"eval_samples_per_second": 31.003, |
|
"eval_steps_per_second": 3.877, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.9288645690834474, |
|
"grad_norm": 1.088376315283852, |
|
"learning_rate": 3.40804592430354e-06, |
|
"loss": 0.4205, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 1.9425444596443229, |
|
"grad_norm": 1.1227517978501422, |
|
"learning_rate": 3.3327794882774785e-06, |
|
"loss": 0.4295, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 1.9562243502051984, |
|
"grad_norm": 1.1732094471787033, |
|
"learning_rate": 3.2579357495221786e-06, |
|
"loss": 0.4104, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 1.9699042407660738, |
|
"grad_norm": 1.2162059196521249, |
|
"learning_rate": 3.183533683478427e-06, |
|
"loss": 0.4091, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 1.9835841313269493, |
|
"grad_norm": 1.108696878534993, |
|
"learning_rate": 3.1095921536079215e-06, |
|
"loss": 0.43, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 1.9972640218878248, |
|
"grad_norm": 1.148281960391559, |
|
"learning_rate": 3.0361299066107225e-06, |
|
"loss": 0.4146, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 2.0109439124487003, |
|
"grad_norm": 1.853284154156804, |
|
"learning_rate": 2.9631655676723358e-06, |
|
"loss": 0.3057, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 2.024623803009576, |
|
"grad_norm": 1.2698401561195134, |
|
"learning_rate": 2.890717635741589e-06, |
|
"loss": 0.2671, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 2.0383036935704513, |
|
"grad_norm": 1.1953162644205662, |
|
"learning_rate": 2.81880447884052e-06, |
|
"loss": 0.2676, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 2.0519835841313268, |
|
"grad_norm": 1.120927117498952, |
|
"learning_rate": 2.7474443294074537e-06, |
|
"loss": 0.2593, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.0656634746922027, |
|
"grad_norm": 1.324110443129841, |
|
"learning_rate": 2.6766552796744583e-06, |
|
"loss": 0.2703, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 2.079343365253078, |
|
"grad_norm": 1.3413599300031922, |
|
"learning_rate": 2.606455277080346e-06, |
|
"loss": 0.2708, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 2.0930232558139537, |
|
"grad_norm": 1.1788852376372527, |
|
"learning_rate": 2.536862119720387e-06, |
|
"loss": 0.2664, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 2.106703146374829, |
|
"grad_norm": 1.1789132415176646, |
|
"learning_rate": 2.467893451833884e-06, |
|
"loss": 0.2673, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 2.1203830369357046, |
|
"grad_norm": 1.2137095943380327, |
|
"learning_rate": 2.3995667593307563e-06, |
|
"loss": 0.2622, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 2.13406292749658, |
|
"grad_norm": 1.2537108019590875, |
|
"learning_rate": 2.331899365358266e-06, |
|
"loss": 0.2695, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 2.1477428180574556, |
|
"grad_norm": 1.2719182887175577, |
|
"learning_rate": 2.2649084259090058e-06, |
|
"loss": 0.2593, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 2.161422708618331, |
|
"grad_norm": 1.164630355430247, |
|
"learning_rate": 2.1986109254712745e-06, |
|
"loss": 0.2708, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 2.1751025991792066, |
|
"grad_norm": 1.1779155039946436, |
|
"learning_rate": 2.133023672722923e-06, |
|
"loss": 0.2697, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 2.188782489740082, |
|
"grad_norm": 1.1294417346333414, |
|
"learning_rate": 2.0681632962697955e-06, |
|
"loss": 0.2528, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.188782489740082, |
|
"eval_loss": 0.6378765106201172, |
|
"eval_runtime": 171.4166, |
|
"eval_samples_per_second": 30.324, |
|
"eval_steps_per_second": 3.792, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.2024623803009575, |
|
"grad_norm": 1.2183820949308963, |
|
"learning_rate": 2.0040462404297873e-06, |
|
"loss": 0.2719, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 2.216142270861833, |
|
"grad_norm": 1.1698549285231044, |
|
"learning_rate": 1.9406887610636716e-06, |
|
"loss": 0.2659, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 2.2298221614227085, |
|
"grad_norm": 1.2697486173216241, |
|
"learning_rate": 1.8781069214536711e-06, |
|
"loss": 0.2677, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 2.243502051983584, |
|
"grad_norm": 1.214820822899955, |
|
"learning_rate": 1.8163165882308776e-06, |
|
"loss": 0.2662, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 2.2571819425444595, |
|
"grad_norm": 1.3319711950469666, |
|
"learning_rate": 1.7553334273525107e-06, |
|
"loss": 0.2648, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 2.270861833105335, |
|
"grad_norm": 1.245211504895954, |
|
"learning_rate": 1.6951729001300626e-06, |
|
"loss": 0.2659, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 2.2845417236662104, |
|
"grad_norm": 1.2357536814491976, |
|
"learning_rate": 1.6358502593093417e-06, |
|
"loss": 0.2638, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 2.2982216142270864, |
|
"grad_norm": 1.2488899759446517, |
|
"learning_rate": 1.5773805452033664e-06, |
|
"loss": 0.2647, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 2.311901504787962, |
|
"grad_norm": 1.1504334028748302, |
|
"learning_rate": 1.5197785818791395e-06, |
|
"loss": 0.2611, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 2.3255813953488373, |
|
"grad_norm": 1.2488245466965713, |
|
"learning_rate": 1.463058973399241e-06, |
|
"loss": 0.2597, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.339261285909713, |
|
"grad_norm": 1.2329978043228385, |
|
"learning_rate": 1.4072361001191997e-06, |
|
"loss": 0.2664, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 2.3529411764705883, |
|
"grad_norm": 1.200114038379274, |
|
"learning_rate": 1.3523241150415822e-06, |
|
"loss": 0.2579, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 2.366621067031464, |
|
"grad_norm": 1.1866913191947843, |
|
"learning_rate": 1.298336940227724e-06, |
|
"loss": 0.2627, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 2.3803009575923393, |
|
"grad_norm": 1.215070259088293, |
|
"learning_rate": 1.2452882632680125e-06, |
|
"loss": 0.2629, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 2.3939808481532148, |
|
"grad_norm": 1.2848393192245922, |
|
"learning_rate": 1.1931915338116157e-06, |
|
"loss": 0.2579, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 2.4076607387140903, |
|
"grad_norm": 1.178082536045285, |
|
"learning_rate": 1.1420599601565375e-06, |
|
"loss": 0.2655, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 2.4213406292749657, |
|
"grad_norm": 1.1599349607603797, |
|
"learning_rate": 1.0919065059008626e-06, |
|
"loss": 0.2614, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 2.4350205198358412, |
|
"grad_norm": 1.218569747775109, |
|
"learning_rate": 1.042743886656043e-06, |
|
"loss": 0.2633, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 2.4487004103967167, |
|
"grad_norm": 1.1984843383370676, |
|
"learning_rate": 9.94584566823062e-07, |
|
"loss": 0.2626, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 2.462380300957592, |
|
"grad_norm": 1.1483398440958268, |
|
"learning_rate": 9.474407564322685e-07, |
|
"loss": 0.2648, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.462380300957592, |
|
"eval_loss": 0.6390905976295471, |
|
"eval_runtime": 167.5515, |
|
"eval_samples_per_second": 31.023, |
|
"eval_steps_per_second": 3.879, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.4760601915184677, |
|
"grad_norm": 1.2020289928203793, |
|
"learning_rate": 9.01324408047734e-07, |
|
"loss": 0.2649, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 2.4897400820793436, |
|
"grad_norm": 1.1514025291135426, |
|
"learning_rate": 8.562472137368638e-07, |
|
"loss": 0.2598, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 2.503419972640219, |
|
"grad_norm": 1.1443562664564093, |
|
"learning_rate": 8.122206021060552e-07, |
|
"loss": 0.2627, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 2.5170998632010946, |
|
"grad_norm": 1.2145886027335204, |
|
"learning_rate": 7.692557354031633e-07, |
|
"loss": 0.2554, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 2.53077975376197, |
|
"grad_norm": 1.2003000862318016, |
|
"learning_rate": 7.273635066874796e-07, |
|
"loss": 0.2595, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 2.5444596443228455, |
|
"grad_norm": 1.2102887918479095, |
|
"learning_rate": 6.86554537067986e-07, |
|
"loss": 0.2562, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 2.558139534883721, |
|
"grad_norm": 1.1976060832616344, |
|
"learning_rate": 6.46839173010525e-07, |
|
"loss": 0.2589, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 2.5718194254445965, |
|
"grad_norm": 1.1509734934067806, |
|
"learning_rate": 6.08227483714629e-07, |
|
"loss": 0.2565, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 2.585499316005472, |
|
"grad_norm": 1.237509833168683, |
|
"learning_rate": 5.707292585606277e-07, |
|
"loss": 0.2646, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 2.5991792065663475, |
|
"grad_norm": 1.2849404614912345, |
|
"learning_rate": 5.343540046277113e-07, |
|
"loss": 0.2571, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2.612859097127223, |
|
"grad_norm": 1.2020133789054372, |
|
"learning_rate": 4.991109442835578e-07, |
|
"loss": 0.2605, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 2.6265389876880985, |
|
"grad_norm": 1.2567141981825356, |
|
"learning_rate": 4.6500901284615094e-07, |
|
"loss": 0.2621, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 2.640218878248974, |
|
"grad_norm": 1.1549261891969707, |
|
"learning_rate": 4.320568563183708e-07, |
|
"loss": 0.2646, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 2.6538987688098494, |
|
"grad_norm": 1.227541734725619, |
|
"learning_rate": 4.0026282919593815e-07, |
|
"loss": 0.2653, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 2.667578659370725, |
|
"grad_norm": 1.1729506883600467, |
|
"learning_rate": 3.6963499234926314e-07, |
|
"loss": 0.2675, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 2.6812585499316004, |
|
"grad_norm": 1.2392479528804268, |
|
"learning_rate": 3.401811109797404e-07, |
|
"loss": 0.2805, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 2.694938440492476, |
|
"grad_norm": 1.1834886023051063, |
|
"learning_rate": 3.1190865265100954e-07, |
|
"loss": 0.2594, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 2.7086183310533514, |
|
"grad_norm": 1.1342198262790109, |
|
"learning_rate": 2.8482478539566595e-07, |
|
"loss": 0.2662, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 2.722298221614227, |
|
"grad_norm": 1.2035486656539252, |
|
"learning_rate": 2.58936375897923e-07, |
|
"loss": 0.2688, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 2.7359781121751023, |
|
"grad_norm": 1.181326670816563, |
|
"learning_rate": 2.342499877526755e-07, |
|
"loss": 0.2586, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.7359781121751023, |
|
"eval_loss": 0.6391583681106567, |
|
"eval_runtime": 167.8571, |
|
"eval_samples_per_second": 30.967, |
|
"eval_steps_per_second": 3.872, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.7496580027359783, |
|
"grad_norm": 1.1676009303739256, |
|
"learning_rate": 2.1077187980140467e-07, |
|
"loss": 0.2616, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 2.7633378932968538, |
|
"grad_norm": 1.219603623890368, |
|
"learning_rate": 1.8850800454534358e-07, |
|
"loss": 0.2591, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 2.7770177838577292, |
|
"grad_norm": 1.2643792382182215, |
|
"learning_rate": 1.6746400663631923e-07, |
|
"loss": 0.2608, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 2.7906976744186047, |
|
"grad_norm": 1.2780674547136892, |
|
"learning_rate": 1.4764522144563865e-07, |
|
"loss": 0.2625, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 2.80437756497948, |
|
"grad_norm": 1.2502610941549783, |
|
"learning_rate": 1.2905667371139298e-07, |
|
"loss": 0.259, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 2.8180574555403557, |
|
"grad_norm": 1.246880298259785, |
|
"learning_rate": 1.117030762645116e-07, |
|
"loss": 0.2613, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 2.831737346101231, |
|
"grad_norm": 1.2060582120323238, |
|
"learning_rate": 9.558882883390075e-08, |
|
"loss": 0.2607, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 2.8454172366621067, |
|
"grad_norm": 1.1847791928318971, |
|
"learning_rate": 8.071801693096237e-08, |
|
"loss": 0.2575, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 2.859097127222982, |
|
"grad_norm": 1.2366555323688222, |
|
"learning_rate": 6.70944108137761e-08, |
|
"loss": 0.2601, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 2.8727770177838576, |
|
"grad_norm": 1.1353358354521152, |
|
"learning_rate": 5.472146453121163e-08, |
|
"loss": 0.2565, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 2.886456908344733, |
|
"grad_norm": 1.2500960828119085, |
|
"learning_rate": 4.360231504720813e-08, |
|
"loss": 0.2726, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 2.9001367989056086, |
|
"grad_norm": 1.234389078825029, |
|
"learning_rate": 3.3739781445449316e-08, |
|
"loss": 0.2544, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 2.9138166894664845, |
|
"grad_norm": 1.1537121999523, |
|
"learning_rate": 2.513636421462573e-08, |
|
"loss": 0.2587, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 2.92749658002736, |
|
"grad_norm": 1.1582097586418938, |
|
"learning_rate": 1.779424461447965e-08, |
|
"loss": 0.2622, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 2.9411764705882355, |
|
"grad_norm": 1.1715551395170911, |
|
"learning_rate": 1.1715284122779114e-08, |
|
"loss": 0.261, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 2.954856361149111, |
|
"grad_norm": 1.14493606362213, |
|
"learning_rate": 6.901023963369336e-09, |
|
"loss": 0.2581, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 2.9685362517099865, |
|
"grad_norm": 1.2230232820768532, |
|
"learning_rate": 3.3526847154219387e-09, |
|
"loss": 0.2577, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 2.982216142270862, |
|
"grad_norm": 1.2703211507866685, |
|
"learning_rate": 1.0711660039747219e-09, |
|
"loss": 0.2691, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 2.9958960328317374, |
|
"grad_norm": 1.1271004957793302, |
|
"learning_rate": 5.704627184577316e-11, |
|
"loss": 0.2604, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 2193, |
|
"total_flos": 391576352194560.0, |
|
"train_loss": 0.44177393167463547, |
|
"train_runtime": 22000.6442, |
|
"train_samples_per_second": 6.378, |
|
"train_steps_per_second": 0.1 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 2193, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 391576352194560.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|