{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.22977589191342043, "eval_steps": 500, "global_step": 7500, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.003063678558845606, "grad_norm": 2.5270798206329346, "learning_rate": 1.0000000000000002e-06, "loss": 3.1564, "step": 100 }, { "epoch": 0.006127357117691212, "grad_norm": 2.604092597961426, "learning_rate": 2.0000000000000003e-06, "loss": 3.1166, "step": 200 }, { "epoch": 0.009191035676536818, "grad_norm": 2.5005924701690674, "learning_rate": 3e-06, "loss": 3.0905, "step": 300 }, { "epoch": 0.012254714235382424, "grad_norm": 2.7525177001953125, "learning_rate": 4.000000000000001e-06, "loss": 3.0652, "step": 400 }, { "epoch": 0.015318392794228029, "grad_norm": 2.5251283645629883, "learning_rate": 5e-06, "loss": 3.0351, "step": 500 }, { "epoch": 0.018382071353073636, "grad_norm": 2.6771860122680664, "learning_rate": 6e-06, "loss": 3.0028, "step": 600 }, { "epoch": 0.02144574991191924, "grad_norm": 2.4799458980560303, "learning_rate": 7e-06, "loss": 3.0007, "step": 700 }, { "epoch": 0.024509428470764847, "grad_norm": 2.8402369022369385, "learning_rate": 8.000000000000001e-06, "loss": 2.9753, "step": 800 }, { "epoch": 0.027573107029610452, "grad_norm": 2.446842908859253, "learning_rate": 9e-06, "loss": 2.9737, "step": 900 }, { "epoch": 0.030636785588456058, "grad_norm": 2.3989391326904297, "learning_rate": 1e-05, "loss": 2.9618, "step": 1000 }, { "epoch": 0.03370046414730166, "grad_norm": 2.469160795211792, "learning_rate": 9.99695413509548e-06, "loss": 2.928, "step": 1100 }, { "epoch": 0.03676414270614727, "grad_norm": 2.4107494354248047, "learning_rate": 9.987820251299121e-06, "loss": 2.9214, "step": 1200 }, { "epoch": 0.039827821264992874, "grad_norm": 2.540278911590576, "learning_rate": 9.972609476841368e-06, "loss": 2.9198, "step": 1300 }, { "epoch": 0.04289149982383848, "grad_norm": 2.4398670196533203, "learning_rate": 9.951340343707852e-06, "loss": 2.9182, "step": 1400 }, { "epoch": 0.04595517838268409, "grad_norm": 2.49687123298645, "learning_rate": 9.924038765061042e-06, "loss": 2.8994, "step": 1500 }, { "epoch": 0.049018856941529694, "grad_norm": 2.4655182361602783, "learning_rate": 9.890738003669029e-06, "loss": 2.8805, "step": 1600 }, { "epoch": 0.0520825355003753, "grad_norm": 2.4170656204223633, "learning_rate": 9.851478631379982e-06, "loss": 2.8829, "step": 1700 }, { "epoch": 0.055146214059220905, "grad_norm": 2.362334966659546, "learning_rate": 9.806308479691595e-06, "loss": 2.8891, "step": 1800 }, { "epoch": 0.058209892618066514, "grad_norm": 2.426173210144043, "learning_rate": 9.755282581475769e-06, "loss": 2.875, "step": 1900 }, { "epoch": 0.061273571176912116, "grad_norm": 2.6380629539489746, "learning_rate": 9.698463103929542e-06, "loss": 2.879, "step": 2000 }, { "epoch": 0.06433724973575772, "grad_norm": 2.5123095512390137, "learning_rate": 9.635919272833938e-06, "loss": 2.8622, "step": 2100 }, { "epoch": 0.06740092829460333, "grad_norm": 2.3789472579956055, "learning_rate": 9.567727288213005e-06, "loss": 2.8669, "step": 2200 }, { "epoch": 0.07046460685344894, "grad_norm": 2.387221336364746, "learning_rate": 9.493970231495836e-06, "loss": 2.8641, "step": 2300 }, { "epoch": 0.07352828541229454, "grad_norm": 2.4947221279144287, "learning_rate": 9.414737964294636e-06, "loss": 2.8525, "step": 2400 }, { "epoch": 0.07659196397114015, "grad_norm": 2.390381097793579, "learning_rate": 9.330127018922195e-06, "loss": 2.8497, "step": 2500 }, { "epoch": 0.07965564252998575, "grad_norm": 2.451345682144165, "learning_rate": 9.24024048078213e-06, "loss": 2.8482, "step": 2600 }, { "epoch": 0.08271932108883136, "grad_norm": 2.317512035369873, "learning_rate": 9.145187862775208e-06, "loss": 2.8486, "step": 2700 }, { "epoch": 0.08578299964767697, "grad_norm": 2.620295524597168, "learning_rate": 9.045084971874738e-06, "loss": 2.8474, "step": 2800 }, { "epoch": 0.08884667820652258, "grad_norm": 2.3616020679473877, "learning_rate": 8.94005376803361e-06, "loss": 2.8283, "step": 2900 }, { "epoch": 0.09191035676536818, "grad_norm": 2.517153263092041, "learning_rate": 8.83022221559489e-06, "loss": 2.8451, "step": 3000 }, { "epoch": 0.09497403532421378, "grad_norm": 2.6935179233551025, "learning_rate": 8.715724127386971e-06, "loss": 2.8472, "step": 3100 }, { "epoch": 0.09803771388305939, "grad_norm": 2.4080545902252197, "learning_rate": 8.596699001693257e-06, "loss": 2.8428, "step": 3200 }, { "epoch": 0.101101392441905, "grad_norm": 2.369750499725342, "learning_rate": 8.473291852294986e-06, "loss": 2.829, "step": 3300 }, { "epoch": 0.1041650710007506, "grad_norm": 2.3385934829711914, "learning_rate": 8.345653031794292e-06, "loss": 2.8349, "step": 3400 }, { "epoch": 0.1072287495595962, "grad_norm": 2.2659566402435303, "learning_rate": 8.213938048432697e-06, "loss": 2.8306, "step": 3500 }, { "epoch": 0.11029242811844181, "grad_norm": 2.508164882659912, "learning_rate": 8.078307376628292e-06, "loss": 2.8225, "step": 3600 }, { "epoch": 0.11335610667728742, "grad_norm": 2.302351713180542, "learning_rate": 7.938926261462366e-06, "loss": 2.808, "step": 3700 }, { "epoch": 0.11641978523613303, "grad_norm": 2.3972647190093994, "learning_rate": 7.795964517353734e-06, "loss": 2.8214, "step": 3800 }, { "epoch": 0.11948346379497864, "grad_norm": 2.4006614685058594, "learning_rate": 7.649596321166024e-06, "loss": 2.8198, "step": 3900 }, { "epoch": 0.12254714235382423, "grad_norm": 2.271172285079956, "learning_rate": 7.500000000000001e-06, "loss": 2.8186, "step": 4000 }, { "epoch": 0.12561082091266984, "grad_norm": 2.3733065128326416, "learning_rate": 7.347357813929455e-06, "loss": 2.8192, "step": 4100 }, { "epoch": 0.12867449947151544, "grad_norm": 2.3404409885406494, "learning_rate": 7.191855733945388e-06, "loss": 2.8235, "step": 4200 }, { "epoch": 0.13173817803036106, "grad_norm": 2.417692184448242, "learning_rate": 7.033683215379002e-06, "loss": 2.8109, "step": 4300 }, { "epoch": 0.13480185658920665, "grad_norm": 2.3670148849487305, "learning_rate": 6.873032967079562e-06, "loss": 2.8204, "step": 4400 }, { "epoch": 0.13786553514805228, "grad_norm": 2.336367130279541, "learning_rate": 6.710100716628345e-06, "loss": 2.8056, "step": 4500 }, { "epoch": 0.14092921370689787, "grad_norm": 2.3793513774871826, "learning_rate": 6.545084971874738e-06, "loss": 2.7987, "step": 4600 }, { "epoch": 0.14399289226574347, "grad_norm": 2.3895699977874756, "learning_rate": 6.378186779084996e-06, "loss": 2.8127, "step": 4700 }, { "epoch": 0.1470565708245891, "grad_norm": 2.288458824157715, "learning_rate": 6.209609477998339e-06, "loss": 2.8111, "step": 4800 }, { "epoch": 0.15012024938343468, "grad_norm": 2.299058437347412, "learning_rate": 6.039558454088796e-06, "loss": 2.7945, "step": 4900 }, { "epoch": 0.1531839279422803, "grad_norm": 2.3097774982452393, "learning_rate": 5.8682408883346535e-06, "loss": 2.7981, "step": 5000 }, { "epoch": 0.1562476065011259, "grad_norm": 2.433981418609619, "learning_rate": 5.695865504800328e-06, "loss": 2.7938, "step": 5100 }, { "epoch": 0.1593112850599715, "grad_norm": 2.2754006385803223, "learning_rate": 5.522642316338268e-06, "loss": 2.7886, "step": 5200 }, { "epoch": 0.16237496361881712, "grad_norm": 2.3001139163970947, "learning_rate": 5.348782368720627e-06, "loss": 2.8008, "step": 5300 }, { "epoch": 0.16543864217766271, "grad_norm": 2.365751266479492, "learning_rate": 5.174497483512506e-06, "loss": 2.7858, "step": 5400 }, { "epoch": 0.16850232073650834, "grad_norm": 2.448115348815918, "learning_rate": 5e-06, "loss": 2.795, "step": 5500 }, { "epoch": 0.17156599929535393, "grad_norm": 2.414354085922241, "learning_rate": 4.825502516487497e-06, "loss": 2.7971, "step": 5600 }, { "epoch": 0.17462967785419953, "grad_norm": 2.447974443435669, "learning_rate": 4.651217631279374e-06, "loss": 2.7803, "step": 5700 }, { "epoch": 0.17769335641304515, "grad_norm": 2.38202166557312, "learning_rate": 4.477357683661734e-06, "loss": 2.784, "step": 5800 }, { "epoch": 0.18075703497189075, "grad_norm": 2.391268730163574, "learning_rate": 4.304134495199675e-06, "loss": 2.7903, "step": 5900 }, { "epoch": 0.18382071353073637, "grad_norm": 2.351288318634033, "learning_rate": 4.131759111665349e-06, "loss": 2.7788, "step": 6000 }, { "epoch": 0.18688439208958196, "grad_norm": 2.399247169494629, "learning_rate": 3.960441545911205e-06, "loss": 2.7868, "step": 6100 }, { "epoch": 0.18994807064842756, "grad_norm": 2.426527976989746, "learning_rate": 3.790390522001662e-06, "loss": 2.8012, "step": 6200 }, { "epoch": 0.19301174920727318, "grad_norm": 2.3167338371276855, "learning_rate": 3.6218132209150047e-06, "loss": 2.774, "step": 6300 }, { "epoch": 0.19607542776611878, "grad_norm": 2.2909271717071533, "learning_rate": 3.4549150281252635e-06, "loss": 2.7715, "step": 6400 }, { "epoch": 0.19913910632496437, "grad_norm": 2.3950772285461426, "learning_rate": 3.289899283371657e-06, "loss": 2.7832, "step": 6500 }, { "epoch": 0.20220278488381, "grad_norm": 2.5031042098999023, "learning_rate": 3.12696703292044e-06, "loss": 2.7801, "step": 6600 }, { "epoch": 0.2052664634426556, "grad_norm": 2.3458404541015625, "learning_rate": 2.966316784621e-06, "loss": 2.7788, "step": 6700 }, { "epoch": 0.2083301420015012, "grad_norm": 2.3030614852905273, "learning_rate": 2.8081442660546126e-06, "loss": 2.772, "step": 6800 }, { "epoch": 0.2113938205603468, "grad_norm": 2.2177374362945557, "learning_rate": 2.6526421860705474e-06, "loss": 2.7706, "step": 6900 }, { "epoch": 0.2144574991191924, "grad_norm": 2.2889530658721924, "learning_rate": 2.5000000000000015e-06, "loss": 2.7721, "step": 7000 }, { "epoch": 0.21752117767803802, "grad_norm": 2.288100481033325, "learning_rate": 2.3504036788339763e-06, "loss": 2.7774, "step": 7100 }, { "epoch": 0.22058485623688362, "grad_norm": 2.3013720512390137, "learning_rate": 2.204035482646267e-06, "loss": 2.7739, "step": 7200 }, { "epoch": 0.22364853479572924, "grad_norm": 2.4036357402801514, "learning_rate": 2.061073738537635e-06, "loss": 2.7759, "step": 7300 }, { "epoch": 0.22671221335457484, "grad_norm": 2.35628080368042, "learning_rate": 1.9216926233717087e-06, "loss": 2.7724, "step": 7400 }, { "epoch": 0.22977589191342043, "grad_norm": 2.314425230026245, "learning_rate": 1.7860619515673034e-06, "loss": 2.7742, "step": 7500 } ], "logging_steps": 100, "max_steps": 10000, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 7.1748116545536e+17, "train_batch_size": 16, "trial_name": null, "trial_params": null }