|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.6456390565002743, |
|
"eval_steps": 1000, |
|
"global_step": 3000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.013713658804168952, |
|
"grad_norm": 0.9250810742378235, |
|
"learning_rate": 2.4808205470313544e-05, |
|
"loss": 2.5905, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.027427317608337904, |
|
"grad_norm": 0.7566132545471191, |
|
"learning_rate": 2.459973315543696e-05, |
|
"loss": 2.1754, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04114097641250686, |
|
"grad_norm": 0.8571016788482666, |
|
"learning_rate": 2.4391260840560377e-05, |
|
"loss": 2.063, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.05485463521667581, |
|
"grad_norm": 0.734812319278717, |
|
"learning_rate": 2.418278852568379e-05, |
|
"loss": 2.0424, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.06856829402084476, |
|
"grad_norm": 0.706753671169281, |
|
"learning_rate": 2.3974316210807207e-05, |
|
"loss": 2.022, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.08228195282501372, |
|
"grad_norm": 0.7179118394851685, |
|
"learning_rate": 2.3765843895930624e-05, |
|
"loss": 2.0341, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.09599561162918267, |
|
"grad_norm": 0.7341741323471069, |
|
"learning_rate": 2.3557371581054037e-05, |
|
"loss": 2.0339, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.10970927043335162, |
|
"grad_norm": 0.6983020901679993, |
|
"learning_rate": 2.334889926617745e-05, |
|
"loss": 1.9717, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.12342292923752057, |
|
"grad_norm": 0.7121065855026245, |
|
"learning_rate": 2.3140426951300867e-05, |
|
"loss": 1.996, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.13713658804168952, |
|
"grad_norm": 0.7687849402427673, |
|
"learning_rate": 2.2931954636424284e-05, |
|
"loss": 1.9742, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.1508502468458585, |
|
"grad_norm": 0.7747183442115784, |
|
"learning_rate": 2.2723482321547697e-05, |
|
"loss": 1.9774, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.16456390565002743, |
|
"grad_norm": 0.8771799802780151, |
|
"learning_rate": 2.2515010006671114e-05, |
|
"loss": 1.9537, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.17827756445419637, |
|
"grad_norm": 0.7963676452636719, |
|
"learning_rate": 2.230653769179453e-05, |
|
"loss": 1.9408, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.19199122325836535, |
|
"grad_norm": 0.7438392639160156, |
|
"learning_rate": 2.2098065376917944e-05, |
|
"loss": 2.0012, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.2057048820625343, |
|
"grad_norm": 0.7908737063407898, |
|
"learning_rate": 2.188959306204136e-05, |
|
"loss": 1.9485, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.21941854086670323, |
|
"grad_norm": 0.7139158248901367, |
|
"learning_rate": 2.1681120747164777e-05, |
|
"loss": 1.9829, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.23313219967087218, |
|
"grad_norm": 0.9640923738479614, |
|
"learning_rate": 2.147264843228819e-05, |
|
"loss": 2.0043, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.24684585847504115, |
|
"grad_norm": 0.8830370903015137, |
|
"learning_rate": 2.1264176117411607e-05, |
|
"loss": 1.9798, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.2605595172792101, |
|
"grad_norm": 0.8050561547279358, |
|
"learning_rate": 2.1055703802535024e-05, |
|
"loss": 1.9679, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.27427317608337903, |
|
"grad_norm": 0.8845964074134827, |
|
"learning_rate": 2.084723148765844e-05, |
|
"loss": 1.9261, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.287986834887548, |
|
"grad_norm": 0.7687380909919739, |
|
"learning_rate": 2.0638759172781854e-05, |
|
"loss": 1.938, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.301700493691717, |
|
"grad_norm": 0.8741185665130615, |
|
"learning_rate": 2.043028685790527e-05, |
|
"loss": 1.941, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.3154141524958859, |
|
"grad_norm": 0.8414379358291626, |
|
"learning_rate": 2.0221814543028687e-05, |
|
"loss": 1.9442, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.32912781130005486, |
|
"grad_norm": 0.8874805569648743, |
|
"learning_rate": 2.00133422281521e-05, |
|
"loss": 1.9899, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.34284147010422383, |
|
"grad_norm": 0.8438735604286194, |
|
"learning_rate": 1.9804869913275517e-05, |
|
"loss": 1.9332, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.35655512890839275, |
|
"grad_norm": 0.8141827583312988, |
|
"learning_rate": 1.9596397598398934e-05, |
|
"loss": 1.9334, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.3702687877125617, |
|
"grad_norm": 0.8998775482177734, |
|
"learning_rate": 1.9387925283522347e-05, |
|
"loss": 1.9208, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.3839824465167307, |
|
"grad_norm": 0.8827155232429504, |
|
"learning_rate": 1.9179452968645764e-05, |
|
"loss": 1.9287, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.3976961053208996, |
|
"grad_norm": 0.7951996922492981, |
|
"learning_rate": 1.897098065376918e-05, |
|
"loss": 1.9514, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.4114097641250686, |
|
"grad_norm": 0.784051775932312, |
|
"learning_rate": 1.8762508338892594e-05, |
|
"loss": 1.9226, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.4251234229292375, |
|
"grad_norm": 0.9527340531349182, |
|
"learning_rate": 1.855403602401601e-05, |
|
"loss": 1.9459, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.43883708173340646, |
|
"grad_norm": 0.8100705742835999, |
|
"learning_rate": 1.8345563709139427e-05, |
|
"loss": 1.9218, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.45255074053757544, |
|
"grad_norm": 0.814917266368866, |
|
"learning_rate": 1.8137091394262844e-05, |
|
"loss": 1.9682, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.46626439934174435, |
|
"grad_norm": 0.8693529367446899, |
|
"learning_rate": 1.7928619079386257e-05, |
|
"loss": 1.9068, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.4799780581459133, |
|
"grad_norm": 0.8571690320968628, |
|
"learning_rate": 1.7720146764509674e-05, |
|
"loss": 1.9225, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.4936917169500823, |
|
"grad_norm": 0.9239374995231628, |
|
"learning_rate": 1.751167444963309e-05, |
|
"loss": 1.9628, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.5074053757542513, |
|
"grad_norm": 0.8887170553207397, |
|
"learning_rate": 1.7303202134756504e-05, |
|
"loss": 1.8972, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.5211190345584202, |
|
"grad_norm": 0.8989230394363403, |
|
"learning_rate": 1.709472981987992e-05, |
|
"loss": 1.9517, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.5348326933625891, |
|
"grad_norm": 0.8530762791633606, |
|
"learning_rate": 1.6886257505003337e-05, |
|
"loss": 1.9163, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.5485463521667581, |
|
"grad_norm": 0.9289182424545288, |
|
"learning_rate": 1.667778519012675e-05, |
|
"loss": 1.9399, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.5485463521667581, |
|
"eval_loss": 1.8345812559127808, |
|
"eval_runtime": 12.1751, |
|
"eval_samples_per_second": 11.91, |
|
"eval_steps_per_second": 1.561, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.562260010970927, |
|
"grad_norm": 0.9478083848953247, |
|
"learning_rate": 1.6469312875250167e-05, |
|
"loss": 1.9219, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 0.575973669775096, |
|
"grad_norm": 0.9554803371429443, |
|
"learning_rate": 1.6260840560373584e-05, |
|
"loss": 1.9297, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.589687328579265, |
|
"grad_norm": 0.9292556643486023, |
|
"learning_rate": 1.6052368245496997e-05, |
|
"loss": 1.9245, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 0.603400987383434, |
|
"grad_norm": 0.9070860743522644, |
|
"learning_rate": 1.5843895930620414e-05, |
|
"loss": 1.9247, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.6171146461876028, |
|
"grad_norm": 0.883678674697876, |
|
"learning_rate": 1.563542361574383e-05, |
|
"loss": 1.9041, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 0.6308283049917718, |
|
"grad_norm": 0.9620354175567627, |
|
"learning_rate": 1.5426951300867247e-05, |
|
"loss": 1.9394, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.6445419637959408, |
|
"grad_norm": 0.9458388090133667, |
|
"learning_rate": 1.521847898599066e-05, |
|
"loss": 1.9513, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 0.6582556226001097, |
|
"grad_norm": 0.9318162798881531, |
|
"learning_rate": 1.5010006671114077e-05, |
|
"loss": 1.8923, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.6719692814042787, |
|
"grad_norm": 0.8988519906997681, |
|
"learning_rate": 1.4801534356237492e-05, |
|
"loss": 1.9337, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 0.6856829402084477, |
|
"grad_norm": 0.9615154266357422, |
|
"learning_rate": 1.4593062041360909e-05, |
|
"loss": 1.9118, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.6993965990126165, |
|
"grad_norm": 0.9634252190589905, |
|
"learning_rate": 1.4384589726484324e-05, |
|
"loss": 1.8784, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 0.7131102578167855, |
|
"grad_norm": 0.9005519151687622, |
|
"learning_rate": 1.4176117411607739e-05, |
|
"loss": 1.9124, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.7268239166209545, |
|
"grad_norm": 0.9393877387046814, |
|
"learning_rate": 1.3967645096731155e-05, |
|
"loss": 1.925, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 0.7405375754251234, |
|
"grad_norm": 0.9061549305915833, |
|
"learning_rate": 1.375917278185457e-05, |
|
"loss": 1.9245, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.7542512342292924, |
|
"grad_norm": 0.9310101270675659, |
|
"learning_rate": 1.3550700466977987e-05, |
|
"loss": 1.9177, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 0.7679648930334614, |
|
"grad_norm": 0.892022430896759, |
|
"learning_rate": 1.3342228152101402e-05, |
|
"loss": 1.8819, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.7816785518376302, |
|
"grad_norm": 1.0094044208526611, |
|
"learning_rate": 1.3133755837224817e-05, |
|
"loss": 1.9032, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 0.7953922106417992, |
|
"grad_norm": 0.9686225652694702, |
|
"learning_rate": 1.2925283522348234e-05, |
|
"loss": 1.9229, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 0.8091058694459682, |
|
"grad_norm": 0.8588367104530334, |
|
"learning_rate": 1.2716811207471649e-05, |
|
"loss": 1.8997, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 0.8228195282501372, |
|
"grad_norm": 0.9800122380256653, |
|
"learning_rate": 1.2508338892595064e-05, |
|
"loss": 1.9087, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.8365331870543061, |
|
"grad_norm": 0.9647035002708435, |
|
"learning_rate": 1.229986657771848e-05, |
|
"loss": 1.9239, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 0.850246845858475, |
|
"grad_norm": 0.9459319114685059, |
|
"learning_rate": 1.2091394262841895e-05, |
|
"loss": 1.88, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 0.863960504662644, |
|
"grad_norm": 0.989025354385376, |
|
"learning_rate": 1.1882921947965312e-05, |
|
"loss": 1.9134, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 0.8776741634668129, |
|
"grad_norm": 0.9573891162872314, |
|
"learning_rate": 1.1674449633088725e-05, |
|
"loss": 1.917, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.8913878222709819, |
|
"grad_norm": 0.9619189500808716, |
|
"learning_rate": 1.1465977318212142e-05, |
|
"loss": 1.9202, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 0.9051014810751509, |
|
"grad_norm": 0.959064781665802, |
|
"learning_rate": 1.1257505003335557e-05, |
|
"loss": 1.9053, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 0.9188151398793198, |
|
"grad_norm": 1.0735059976577759, |
|
"learning_rate": 1.1049032688458972e-05, |
|
"loss": 1.914, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 0.9325287986834887, |
|
"grad_norm": 0.8986095786094666, |
|
"learning_rate": 1.0840560373582389e-05, |
|
"loss": 1.8883, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.9462424574876577, |
|
"grad_norm": 0.9695867300033569, |
|
"learning_rate": 1.0632088058705804e-05, |
|
"loss": 1.892, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 0.9599561162918266, |
|
"grad_norm": 0.893974244594574, |
|
"learning_rate": 1.042361574382922e-05, |
|
"loss": 1.9094, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.9736697750959956, |
|
"grad_norm": 1.0204190015792847, |
|
"learning_rate": 1.0215143428952635e-05, |
|
"loss": 1.9121, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 0.9873834339001646, |
|
"grad_norm": 0.9737017750740051, |
|
"learning_rate": 1.000667111407605e-05, |
|
"loss": 1.9003, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.0010970927043334, |
|
"grad_norm": 0.9492760300636292, |
|
"learning_rate": 9.798198799199467e-06, |
|
"loss": 1.9097, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 1.0148107515085025, |
|
"grad_norm": 0.9199559688568115, |
|
"learning_rate": 9.589726484322882e-06, |
|
"loss": 1.7813, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 1.0285244103126714, |
|
"grad_norm": 0.8272762298583984, |
|
"learning_rate": 9.381254169446297e-06, |
|
"loss": 1.8073, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 1.0422380691168405, |
|
"grad_norm": 0.9935997724533081, |
|
"learning_rate": 9.172781854569714e-06, |
|
"loss": 1.7889, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.0559517279210093, |
|
"grad_norm": 1.0486711263656616, |
|
"learning_rate": 8.964309539693129e-06, |
|
"loss": 1.8072, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 1.0696653867251782, |
|
"grad_norm": 1.0662914514541626, |
|
"learning_rate": 8.755837224816545e-06, |
|
"loss": 1.8171, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 1.0833790455293473, |
|
"grad_norm": 0.9916715621948242, |
|
"learning_rate": 8.54736490993996e-06, |
|
"loss": 1.8002, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 1.0970927043335161, |
|
"grad_norm": 1.0449947118759155, |
|
"learning_rate": 8.338892595063375e-06, |
|
"loss": 1.8416, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.0970927043335161, |
|
"eval_loss": 1.7659664154052734, |
|
"eval_runtime": 12.173, |
|
"eval_samples_per_second": 11.912, |
|
"eval_steps_per_second": 1.561, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.1108063631376852, |
|
"grad_norm": 1.0766034126281738, |
|
"learning_rate": 8.130420280186792e-06, |
|
"loss": 1.8049, |
|
"step": 2025 |
|
}, |
|
{ |
|
"epoch": 1.124520021941854, |
|
"grad_norm": 1.1313064098358154, |
|
"learning_rate": 7.921947965310207e-06, |
|
"loss": 1.7966, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 1.1382336807460232, |
|
"grad_norm": 1.219266653060913, |
|
"learning_rate": 7.713475650433624e-06, |
|
"loss": 1.839, |
|
"step": 2075 |
|
}, |
|
{ |
|
"epoch": 1.151947339550192, |
|
"grad_norm": 1.171193242073059, |
|
"learning_rate": 7.5050033355570386e-06, |
|
"loss": 1.8187, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.1656609983543609, |
|
"grad_norm": 1.0916893482208252, |
|
"learning_rate": 7.296531020680454e-06, |
|
"loss": 1.8144, |
|
"step": 2125 |
|
}, |
|
{ |
|
"epoch": 1.17937465715853, |
|
"grad_norm": 1.0833641290664673, |
|
"learning_rate": 7.088058705803869e-06, |
|
"loss": 1.8367, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 1.1930883159626988, |
|
"grad_norm": 1.1324750185012817, |
|
"learning_rate": 6.879586390927285e-06, |
|
"loss": 1.8323, |
|
"step": 2175 |
|
}, |
|
{ |
|
"epoch": 1.2068019747668677, |
|
"grad_norm": 1.1888611316680908, |
|
"learning_rate": 6.671114076050701e-06, |
|
"loss": 1.8014, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.2205156335710368, |
|
"grad_norm": 1.2182481288909912, |
|
"learning_rate": 6.462641761174117e-06, |
|
"loss": 1.8353, |
|
"step": 2225 |
|
}, |
|
{ |
|
"epoch": 1.2342292923752056, |
|
"grad_norm": 1.2250556945800781, |
|
"learning_rate": 6.254169446297532e-06, |
|
"loss": 1.816, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 1.2479429511793747, |
|
"grad_norm": 1.2347311973571777, |
|
"learning_rate": 6.045697131420948e-06, |
|
"loss": 1.8175, |
|
"step": 2275 |
|
}, |
|
{ |
|
"epoch": 1.2616566099835436, |
|
"grad_norm": 1.2559269666671753, |
|
"learning_rate": 5.837224816544363e-06, |
|
"loss": 1.8196, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.2753702687877126, |
|
"grad_norm": 1.2170192003250122, |
|
"learning_rate": 5.6287525016677785e-06, |
|
"loss": 1.8137, |
|
"step": 2325 |
|
}, |
|
{ |
|
"epoch": 1.2890839275918815, |
|
"grad_norm": 1.2384990453720093, |
|
"learning_rate": 5.420280186791194e-06, |
|
"loss": 1.7757, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 1.3027975863960504, |
|
"grad_norm": 1.2390954494476318, |
|
"learning_rate": 5.21180787191461e-06, |
|
"loss": 1.7952, |
|
"step": 2375 |
|
}, |
|
{ |
|
"epoch": 1.3165112452002194, |
|
"grad_norm": 1.3812655210494995, |
|
"learning_rate": 5.003335557038025e-06, |
|
"loss": 1.8311, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.3302249040043883, |
|
"grad_norm": 1.2410950660705566, |
|
"learning_rate": 4.794863242161441e-06, |
|
"loss": 1.7996, |
|
"step": 2425 |
|
}, |
|
{ |
|
"epoch": 1.3439385628085574, |
|
"grad_norm": 1.1785918474197388, |
|
"learning_rate": 4.586390927284857e-06, |
|
"loss": 1.793, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 1.3576522216127263, |
|
"grad_norm": 1.4189759492874146, |
|
"learning_rate": 4.377918612408273e-06, |
|
"loss": 1.8558, |
|
"step": 2475 |
|
}, |
|
{ |
|
"epoch": 1.3713658804168953, |
|
"grad_norm": 1.209304928779602, |
|
"learning_rate": 4.169446297531688e-06, |
|
"loss": 1.7649, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.3850795392210642, |
|
"grad_norm": 1.2057141065597534, |
|
"learning_rate": 3.9609739826551035e-06, |
|
"loss": 1.8148, |
|
"step": 2525 |
|
}, |
|
{ |
|
"epoch": 1.398793198025233, |
|
"grad_norm": 1.0700486898422241, |
|
"learning_rate": 3.7525016677785193e-06, |
|
"loss": 1.8063, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 1.4125068568294021, |
|
"grad_norm": 1.1492820978164673, |
|
"learning_rate": 3.5440293529019347e-06, |
|
"loss": 1.7777, |
|
"step": 2575 |
|
}, |
|
{ |
|
"epoch": 1.426220515633571, |
|
"grad_norm": 1.2483413219451904, |
|
"learning_rate": 3.3355570380253505e-06, |
|
"loss": 1.8595, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.4399341744377399, |
|
"grad_norm": 1.384775996208191, |
|
"learning_rate": 3.127084723148766e-06, |
|
"loss": 1.7923, |
|
"step": 2625 |
|
}, |
|
{ |
|
"epoch": 1.453647833241909, |
|
"grad_norm": 1.2097506523132324, |
|
"learning_rate": 2.9186124082721813e-06, |
|
"loss": 1.8107, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 1.467361492046078, |
|
"grad_norm": 1.2465295791625977, |
|
"learning_rate": 2.710140093395597e-06, |
|
"loss": 1.8037, |
|
"step": 2675 |
|
}, |
|
{ |
|
"epoch": 1.4810751508502469, |
|
"grad_norm": 1.3312894105911255, |
|
"learning_rate": 2.5016677785190126e-06, |
|
"loss": 1.8149, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.4947888096544157, |
|
"grad_norm": 1.1086313724517822, |
|
"learning_rate": 2.2931954636424284e-06, |
|
"loss": 1.8261, |
|
"step": 2725 |
|
}, |
|
{ |
|
"epoch": 1.5085024684585848, |
|
"grad_norm": 1.3292433023452759, |
|
"learning_rate": 2.084723148765844e-06, |
|
"loss": 1.8145, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 1.5222161272627537, |
|
"grad_norm": 1.2049185037612915, |
|
"learning_rate": 1.8762508338892596e-06, |
|
"loss": 1.8255, |
|
"step": 2775 |
|
}, |
|
{ |
|
"epoch": 1.5359297860669225, |
|
"grad_norm": 1.266183614730835, |
|
"learning_rate": 1.6677785190126753e-06, |
|
"loss": 1.7954, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 1.5496434448710916, |
|
"grad_norm": 1.337235927581787, |
|
"learning_rate": 1.4593062041360907e-06, |
|
"loss": 1.7749, |
|
"step": 2825 |
|
}, |
|
{ |
|
"epoch": 1.5633571036752607, |
|
"grad_norm": 1.3112897872924805, |
|
"learning_rate": 1.2508338892595063e-06, |
|
"loss": 1.8236, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 1.5770707624794296, |
|
"grad_norm": 1.24995756149292, |
|
"learning_rate": 1.042361574382922e-06, |
|
"loss": 1.8364, |
|
"step": 2875 |
|
}, |
|
{ |
|
"epoch": 1.5907844212835984, |
|
"grad_norm": 1.2855443954467773, |
|
"learning_rate": 8.338892595063376e-07, |
|
"loss": 1.8267, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 1.6044980800877675, |
|
"grad_norm": 1.1317636966705322, |
|
"learning_rate": 6.254169446297531e-07, |
|
"loss": 1.8185, |
|
"step": 2925 |
|
}, |
|
{ |
|
"epoch": 1.6182117388919364, |
|
"grad_norm": 1.3289167881011963, |
|
"learning_rate": 4.169446297531688e-07, |
|
"loss": 1.8577, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 1.6319253976961052, |
|
"grad_norm": 1.2797874212265015, |
|
"learning_rate": 2.084723148765844e-07, |
|
"loss": 1.811, |
|
"step": 2975 |
|
}, |
|
{ |
|
"epoch": 1.6456390565002743, |
|
"grad_norm": 1.3882725238800049, |
|
"learning_rate": 0.0, |
|
"loss": 1.8245, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.6456390565002743, |
|
"eval_loss": 1.7348041534423828, |
|
"eval_runtime": 12.1692, |
|
"eval_samples_per_second": 11.915, |
|
"eval_steps_per_second": 1.561, |
|
"step": 3000 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 3000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.0737917404957901e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|