{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.9968337730870713, "eval_steps": 500, "global_step": 473, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.04221635883905013, "grad_norm": 0.50788813829422, "learning_rate": 2.5e-05, "loss": 4.0916, "step": 10 }, { "epoch": 0.08443271767810026, "grad_norm": 0.31596097350120544, "learning_rate": 2.9996874776728528e-05, "loss": 3.8989, "step": 20 }, { "epoch": 0.1266490765171504, "grad_norm": 0.28351184725761414, "learning_rate": 2.998418103705505e-05, "loss": 3.6225, "step": 30 }, { "epoch": 0.16886543535620052, "grad_norm": 0.2747008502483368, "learning_rate": 2.996173263030885e-05, "loss": 3.3931, "step": 40 }, { "epoch": 0.21108179419525067, "grad_norm": 0.27435237169265747, "learning_rate": 2.9929545796017835e-05, "loss": 3.1614, "step": 50 }, { "epoch": 0.2532981530343008, "grad_norm": 0.31030479073524475, "learning_rate": 2.9887643818640412e-05, "loss": 2.882, "step": 60 }, { "epoch": 0.2955145118733509, "grad_norm": 0.3085922598838806, "learning_rate": 2.983605701072119e-05, "loss": 2.6283, "step": 70 }, { "epoch": 0.33773087071240104, "grad_norm": 0.230041041970253, "learning_rate": 2.9774822690962358e-05, "loss": 2.3723, "step": 80 }, { "epoch": 0.37994722955145116, "grad_norm": 0.19185876846313477, "learning_rate": 2.9703985157226802e-05, "loss": 2.1931, "step": 90 }, { "epoch": 0.42216358839050133, "grad_norm": 0.17534567415714264, "learning_rate": 2.9623595654492328e-05, "loss": 2.1193, "step": 100 }, { "epoch": 0.46437994722955145, "grad_norm": 0.15833114087581635, "learning_rate": 2.953371233778022e-05, "loss": 2.0044, "step": 110 }, { "epoch": 0.5065963060686016, "grad_norm": 0.152941033244133, "learning_rate": 2.943440023008502e-05, "loss": 2.0208, "step": 120 }, { "epoch": 0.5488126649076517, "grad_norm": 0.14308719336986542, "learning_rate": 2.932573117533585e-05, "loss": 2.0013, "step": 130 }, { "epoch": 0.5910290237467019, "grad_norm": 0.12415236979722977, "learning_rate": 2.9207783786423436e-05, "loss": 1.9719, "step": 140 }, { "epoch": 0.633245382585752, "grad_norm": 0.14564257860183716, "learning_rate": 2.9080643388330266e-05, "loss": 1.9659, "step": 150 }, { "epoch": 0.6754617414248021, "grad_norm": 0.14092124998569489, "learning_rate": 2.8944401956405192e-05, "loss": 1.9645, "step": 160 }, { "epoch": 0.7176781002638523, "grad_norm": 0.14931683242321014, "learning_rate": 2.8799158049827027e-05, "loss": 1.902, "step": 170 }, { "epoch": 0.7598944591029023, "grad_norm": 0.13305355608463287, "learning_rate": 2.8645016740305286e-05, "loss": 2.0203, "step": 180 }, { "epoch": 0.8021108179419525, "grad_norm": 0.13253405690193176, "learning_rate": 2.8482089536069683e-05, "loss": 1.92, "step": 190 }, { "epoch": 0.8443271767810027, "grad_norm": 0.15814724564552307, "learning_rate": 2.8310494301203323e-05, "loss": 1.9354, "step": 200 }, { "epoch": 0.8865435356200527, "grad_norm": 0.12858152389526367, "learning_rate": 2.8130355170378002e-05, "loss": 1.8576, "step": 210 }, { "epoch": 0.9287598944591029, "grad_norm": 0.15734779834747314, "learning_rate": 2.7941802459053222e-05, "loss": 1.8994, "step": 220 }, { "epoch": 0.9709762532981531, "grad_norm": 0.13302397727966309, "learning_rate": 2.7744972569203985e-05, "loss": 1.9127, "step": 230 }, { "epoch": 1.0131926121372032, "grad_norm": 0.14125587046146393, "learning_rate": 2.754000789064544e-05, "loss": 1.856, "step": 240 }, { "epoch": 1.0554089709762533, "grad_norm": 0.1589905321598053, "learning_rate": 2.7327056698025907e-05, "loss": 1.8638, "step": 250 }, { "epoch": 1.0976253298153034, "grad_norm": 0.15254752337932587, "learning_rate": 2.710627304356264e-05, "loss": 1.8925, "step": 260 }, { "epoch": 1.1398416886543536, "grad_norm": 0.1510193943977356, "learning_rate": 2.6877816645598093e-05, "loss": 1.8423, "step": 270 }, { "epoch": 1.1820580474934037, "grad_norm": 0.15128286182880402, "learning_rate": 2.664185277305712e-05, "loss": 1.8433, "step": 280 }, { "epoch": 1.2242744063324538, "grad_norm": 0.16148072481155396, "learning_rate": 2.639855212588892e-05, "loss": 1.8482, "step": 290 }, { "epoch": 1.266490765171504, "grad_norm": 0.1490674763917923, "learning_rate": 2.6148090711579976e-05, "loss": 1.8667, "step": 300 }, { "epoch": 1.3087071240105541, "grad_norm": 0.141664519906044, "learning_rate": 2.5890649717827517e-05, "loss": 1.8034, "step": 310 }, { "epoch": 1.3509234828496042, "grad_norm": 0.13706474006175995, "learning_rate": 2.5626415381465506e-05, "loss": 1.8521, "step": 320 }, { "epoch": 1.3931398416886545, "grad_norm": 0.1380421221256256, "learning_rate": 2.535557885373801e-05, "loss": 1.8737, "step": 330 }, { "epoch": 1.4353562005277045, "grad_norm": 0.16114376485347748, "learning_rate": 2.5078336062017396e-05, "loss": 1.8536, "step": 340 }, { "epoch": 1.4775725593667546, "grad_norm": 0.14424873888492584, "learning_rate": 2.4794887568067413e-05, "loss": 1.8225, "step": 350 }, { "epoch": 1.5197889182058049, "grad_norm": 0.15957856178283691, "learning_rate": 2.4505438422953686e-05, "loss": 1.8637, "step": 360 }, { "epoch": 1.562005277044855, "grad_norm": 0.16451793909072876, "learning_rate": 2.421019801870658e-05, "loss": 1.8643, "step": 370 }, { "epoch": 1.604221635883905, "grad_norm": 0.1663391888141632, "learning_rate": 2.390937993684371e-05, "loss": 1.8301, "step": 380 }, { "epoch": 1.6464379947229553, "grad_norm": 0.15829075872898102, "learning_rate": 2.3603201793861776e-05, "loss": 1.8388, "step": 390 }, { "epoch": 1.6886543535620053, "grad_norm": 0.14315567910671234, "learning_rate": 2.329188508380936e-05, "loss": 1.8571, "step": 400 }, { "epoch": 1.7308707124010554, "grad_norm": 0.16106358170509338, "learning_rate": 2.2975655018054685e-05, "loss": 1.8783, "step": 410 }, { "epoch": 1.7730870712401057, "grad_norm": 0.14807620644569397, "learning_rate": 2.2654740362364196e-05, "loss": 1.8006, "step": 420 }, { "epoch": 1.8153034300791555, "grad_norm": 0.1798703372478485, "learning_rate": 2.232937327140983e-05, "loss": 1.7707, "step": 430 }, { "epoch": 1.8575197889182058, "grad_norm": 0.15553659200668335, "learning_rate": 2.1999789120824702e-05, "loss": 1.8307, "step": 440 }, { "epoch": 1.899736147757256, "grad_norm": 0.14261312782764435, "learning_rate": 2.166622633692871e-05, "loss": 1.7768, "step": 450 }, { "epoch": 1.941952506596306, "grad_norm": 0.18098565936088562, "learning_rate": 2.13289262242472e-05, "loss": 1.7687, "step": 460 }, { "epoch": 1.9841688654353562, "grad_norm": 0.15806493163108826, "learning_rate": 2.0988132790947478e-05, "loss": 1.8417, "step": 470 } ], "logging_steps": 10, "max_steps": 1180, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 2.1390125387913216e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }