{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.12254714235382423, "eval_steps": 500, "global_step": 4000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.003063678558845606, "grad_norm": 2.5270798206329346, "learning_rate": 1.0000000000000002e-06, "loss": 3.1564, "step": 100 }, { "epoch": 0.006127357117691212, "grad_norm": 2.604092597961426, "learning_rate": 2.0000000000000003e-06, "loss": 3.1166, "step": 200 }, { "epoch": 0.009191035676536818, "grad_norm": 2.5005924701690674, "learning_rate": 3e-06, "loss": 3.0905, "step": 300 }, { "epoch": 0.012254714235382424, "grad_norm": 2.7525177001953125, "learning_rate": 4.000000000000001e-06, "loss": 3.0652, "step": 400 }, { "epoch": 0.015318392794228029, "grad_norm": 2.5251283645629883, "learning_rate": 5e-06, "loss": 3.0351, "step": 500 }, { "epoch": 0.018382071353073636, "grad_norm": 2.6771860122680664, "learning_rate": 6e-06, "loss": 3.0028, "step": 600 }, { "epoch": 0.02144574991191924, "grad_norm": 2.4799458980560303, "learning_rate": 7e-06, "loss": 3.0007, "step": 700 }, { "epoch": 0.024509428470764847, "grad_norm": 2.8402369022369385, "learning_rate": 8.000000000000001e-06, "loss": 2.9753, "step": 800 }, { "epoch": 0.027573107029610452, "grad_norm": 2.446842908859253, "learning_rate": 9e-06, "loss": 2.9737, "step": 900 }, { "epoch": 0.030636785588456058, "grad_norm": 2.3989391326904297, "learning_rate": 1e-05, "loss": 2.9618, "step": 1000 }, { "epoch": 0.03370046414730166, "grad_norm": 2.469160795211792, "learning_rate": 9.99695413509548e-06, "loss": 2.928, "step": 1100 }, { "epoch": 0.03676414270614727, "grad_norm": 2.4107494354248047, "learning_rate": 9.987820251299121e-06, "loss": 2.9214, "step": 1200 }, { "epoch": 0.039827821264992874, "grad_norm": 2.540278911590576, "learning_rate": 9.972609476841368e-06, "loss": 2.9198, "step": 1300 }, { "epoch": 0.04289149982383848, "grad_norm": 2.4398670196533203, "learning_rate": 9.951340343707852e-06, "loss": 2.9182, "step": 1400 }, { "epoch": 0.04595517838268409, "grad_norm": 2.49687123298645, "learning_rate": 9.924038765061042e-06, "loss": 2.8994, "step": 1500 }, { "epoch": 0.049018856941529694, "grad_norm": 2.4655182361602783, "learning_rate": 9.890738003669029e-06, "loss": 2.8805, "step": 1600 }, { "epoch": 0.0520825355003753, "grad_norm": 2.4170656204223633, "learning_rate": 9.851478631379982e-06, "loss": 2.8829, "step": 1700 }, { "epoch": 0.055146214059220905, "grad_norm": 2.362334966659546, "learning_rate": 9.806308479691595e-06, "loss": 2.8891, "step": 1800 }, { "epoch": 0.058209892618066514, "grad_norm": 2.426173210144043, "learning_rate": 9.755282581475769e-06, "loss": 2.875, "step": 1900 }, { "epoch": 0.061273571176912116, "grad_norm": 2.6380629539489746, "learning_rate": 9.698463103929542e-06, "loss": 2.879, "step": 2000 }, { "epoch": 0.06433724973575772, "grad_norm": 2.5123095512390137, "learning_rate": 9.635919272833938e-06, "loss": 2.8622, "step": 2100 }, { "epoch": 0.06740092829460333, "grad_norm": 2.3789472579956055, "learning_rate": 9.567727288213005e-06, "loss": 2.8669, "step": 2200 }, { "epoch": 0.07046460685344894, "grad_norm": 2.387221336364746, "learning_rate": 9.493970231495836e-06, "loss": 2.8641, "step": 2300 }, { "epoch": 0.07352828541229454, "grad_norm": 2.4947221279144287, "learning_rate": 9.414737964294636e-06, "loss": 2.8525, "step": 2400 }, { "epoch": 0.07659196397114015, "grad_norm": 2.390381097793579, "learning_rate": 9.330127018922195e-06, "loss": 2.8497, "step": 2500 }, { "epoch": 0.07965564252998575, "grad_norm": 2.451345682144165, "learning_rate": 9.24024048078213e-06, "loss": 2.8482, "step": 2600 }, { "epoch": 0.08271932108883136, "grad_norm": 2.317512035369873, "learning_rate": 9.145187862775208e-06, "loss": 2.8486, "step": 2700 }, { "epoch": 0.08578299964767697, "grad_norm": 2.620295524597168, "learning_rate": 9.045084971874738e-06, "loss": 2.8474, "step": 2800 }, { "epoch": 0.08884667820652258, "grad_norm": 2.3616020679473877, "learning_rate": 8.94005376803361e-06, "loss": 2.8283, "step": 2900 }, { "epoch": 0.09191035676536818, "grad_norm": 2.517153263092041, "learning_rate": 8.83022221559489e-06, "loss": 2.8451, "step": 3000 }, { "epoch": 0.09497403532421378, "grad_norm": 2.6935179233551025, "learning_rate": 8.715724127386971e-06, "loss": 2.8472, "step": 3100 }, { "epoch": 0.09803771388305939, "grad_norm": 2.4080545902252197, "learning_rate": 8.596699001693257e-06, "loss": 2.8428, "step": 3200 }, { "epoch": 0.101101392441905, "grad_norm": 2.369750499725342, "learning_rate": 8.473291852294986e-06, "loss": 2.829, "step": 3300 }, { "epoch": 0.1041650710007506, "grad_norm": 2.3385934829711914, "learning_rate": 8.345653031794292e-06, "loss": 2.8349, "step": 3400 }, { "epoch": 0.1072287495595962, "grad_norm": 2.2659566402435303, "learning_rate": 8.213938048432697e-06, "loss": 2.8306, "step": 3500 }, { "epoch": 0.11029242811844181, "grad_norm": 2.508164882659912, "learning_rate": 8.078307376628292e-06, "loss": 2.8225, "step": 3600 }, { "epoch": 0.11335610667728742, "grad_norm": 2.302351713180542, "learning_rate": 7.938926261462366e-06, "loss": 2.808, "step": 3700 }, { "epoch": 0.11641978523613303, "grad_norm": 2.3972647190093994, "learning_rate": 7.795964517353734e-06, "loss": 2.8214, "step": 3800 }, { "epoch": 0.11948346379497864, "grad_norm": 2.4006614685058594, "learning_rate": 7.649596321166024e-06, "loss": 2.8198, "step": 3900 }, { "epoch": 0.12254714235382423, "grad_norm": 2.271172285079956, "learning_rate": 7.500000000000001e-06, "loss": 2.8186, "step": 4000 } ], "logging_steps": 100, "max_steps": 10000, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 3.82656621576192e+17, "train_batch_size": 16, "trial_name": null, "trial_params": null }