{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.7504690431519699, "eval_steps": 13, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0150093808630394, "grad_norm": 51.73713302612305, "learning_rate": 1e-05, "loss": 7.7969, "step": 1 }, { "epoch": 0.0150093808630394, "eval_loss": 7.751512050628662, "eval_runtime": 43.6829, "eval_samples_per_second": 10.279, "eval_steps_per_second": 1.305, "step": 1 }, { "epoch": 0.0300187617260788, "grad_norm": 48.8546028137207, "learning_rate": 2e-05, "loss": 7.8854, "step": 2 }, { "epoch": 0.0450281425891182, "grad_norm": 43.629215240478516, "learning_rate": 3e-05, "loss": 7.4761, "step": 3 }, { "epoch": 0.0600375234521576, "grad_norm": 40.31449508666992, "learning_rate": 4e-05, "loss": 6.7166, "step": 4 }, { "epoch": 0.075046904315197, "grad_norm": 31.518842697143555, "learning_rate": 5e-05, "loss": 6.0265, "step": 5 }, { "epoch": 0.0900562851782364, "grad_norm": 27.299909591674805, "learning_rate": 6e-05, "loss": 5.1312, "step": 6 }, { "epoch": 0.1050656660412758, "grad_norm": 24.453201293945312, "learning_rate": 7e-05, "loss": 4.2289, "step": 7 }, { "epoch": 0.1200750469043152, "grad_norm": 21.977622985839844, "learning_rate": 8e-05, "loss": 3.409, "step": 8 }, { "epoch": 0.1350844277673546, "grad_norm": 21.064271926879883, "learning_rate": 9e-05, "loss": 2.7192, "step": 9 }, { "epoch": 0.150093808630394, "grad_norm": 18.23163414001465, "learning_rate": 0.0001, "loss": 1.9203, "step": 10 }, { "epoch": 0.1651031894934334, "grad_norm": 18.346233367919922, "learning_rate": 9.98458666866564e-05, "loss": 1.3844, "step": 11 }, { "epoch": 0.1801125703564728, "grad_norm": 17.36043357849121, "learning_rate": 9.938441702975689e-05, "loss": 1.0357, "step": 12 }, { "epoch": 0.1951219512195122, "grad_norm": 6.1927690505981445, "learning_rate": 9.861849601988383e-05, "loss": 0.8596, "step": 13 }, { "epoch": 0.1951219512195122, "eval_loss": 0.7876977324485779, "eval_runtime": 4.0203, "eval_samples_per_second": 111.684, "eval_steps_per_second": 14.178, "step": 13 }, { "epoch": 0.2101313320825516, "grad_norm": 6.852001667022705, "learning_rate": 9.755282581475769e-05, "loss": 0.8059, "step": 14 }, { "epoch": 0.225140712945591, "grad_norm": 15.55766487121582, "learning_rate": 9.619397662556435e-05, "loss": 0.7977, "step": 15 }, { "epoch": 0.2401500938086304, "grad_norm": 6.66907262802124, "learning_rate": 9.45503262094184e-05, "loss": 0.7264, "step": 16 }, { "epoch": 0.2551594746716698, "grad_norm": 6.188997745513916, "learning_rate": 9.263200821770461e-05, "loss": 0.7352, "step": 17 }, { "epoch": 0.2701688555347092, "grad_norm": 2.3706018924713135, "learning_rate": 9.045084971874738e-05, "loss": 0.719, "step": 18 }, { "epoch": 0.2851782363977486, "grad_norm": 5.199508190155029, "learning_rate": 8.802029828000156e-05, "loss": 0.755, "step": 19 }, { "epoch": 0.300187617260788, "grad_norm": 2.7987897396087646, "learning_rate": 8.535533905932738e-05, "loss": 0.7206, "step": 20 }, { "epoch": 0.3151969981238274, "grad_norm": 3.605015516281128, "learning_rate": 8.247240241650918e-05, "loss": 0.7075, "step": 21 }, { "epoch": 0.3302063789868668, "grad_norm": 1.987611174583435, "learning_rate": 7.938926261462366e-05, "loss": 0.7034, "step": 22 }, { "epoch": 0.3452157598499062, "grad_norm": 2.15010666847229, "learning_rate": 7.612492823579745e-05, "loss": 0.698, "step": 23 }, { "epoch": 0.3602251407129456, "grad_norm": 2.248389959335327, "learning_rate": 7.269952498697734e-05, "loss": 0.717, "step": 24 }, { "epoch": 0.37523452157598497, "grad_norm": 1.4804418087005615, "learning_rate": 6.91341716182545e-05, "loss": 0.707, "step": 25 }, { "epoch": 0.3902439024390244, "grad_norm": 3.2395734786987305, "learning_rate": 6.545084971874738e-05, "loss": 0.708, "step": 26 }, { "epoch": 0.3902439024390244, "eval_loss": 0.6942992806434631, "eval_runtime": 4.0219, "eval_samples_per_second": 111.638, "eval_steps_per_second": 14.172, "step": 26 }, { "epoch": 0.4052532833020638, "grad_norm": 2.5209052562713623, "learning_rate": 6.167226819279528e-05, "loss": 0.7009, "step": 27 }, { "epoch": 0.4202626641651032, "grad_norm": 2.8235585689544678, "learning_rate": 5.782172325201155e-05, "loss": 0.7067, "step": 28 }, { "epoch": 0.4352720450281426, "grad_norm": 1.5787421464920044, "learning_rate": 5.392295478639225e-05, "loss": 0.707, "step": 29 }, { "epoch": 0.450281425891182, "grad_norm": 3.7540314197540283, "learning_rate": 5e-05, "loss": 0.715, "step": 30 }, { "epoch": 0.4652908067542214, "grad_norm": 3.978583574295044, "learning_rate": 4.607704521360776e-05, "loss": 0.7071, "step": 31 }, { "epoch": 0.4803001876172608, "grad_norm": 1.993086338043213, "learning_rate": 4.2178276747988446e-05, "loss": 0.6994, "step": 32 }, { "epoch": 0.49530956848030017, "grad_norm": 3.182072639465332, "learning_rate": 3.832773180720475e-05, "loss": 0.7108, "step": 33 }, { "epoch": 0.5103189493433395, "grad_norm": 1.1114157438278198, "learning_rate": 3.4549150281252636e-05, "loss": 0.6889, "step": 34 }, { "epoch": 0.525328330206379, "grad_norm": 1.384256362915039, "learning_rate": 3.086582838174551e-05, "loss": 0.7044, "step": 35 }, { "epoch": 0.5403377110694184, "grad_norm": 3.324063777923584, "learning_rate": 2.7300475013022663e-05, "loss": 0.7098, "step": 36 }, { "epoch": 0.5553470919324578, "grad_norm": 2.3583667278289795, "learning_rate": 2.3875071764202563e-05, "loss": 0.695, "step": 37 }, { "epoch": 0.5703564727954972, "grad_norm": 1.7996037006378174, "learning_rate": 2.061073738537635e-05, "loss": 0.688, "step": 38 }, { "epoch": 0.5853658536585366, "grad_norm": 0.9545679092407227, "learning_rate": 1.7527597583490822e-05, "loss": 0.6888, "step": 39 }, { "epoch": 0.5853658536585366, "eval_loss": 0.6947711110115051, "eval_runtime": 5.7325, "eval_samples_per_second": 78.325, "eval_steps_per_second": 9.943, "step": 39 }, { "epoch": 0.600375234521576, "grad_norm": 1.6693542003631592, "learning_rate": 1.4644660940672627e-05, "loss": 0.6815, "step": 40 }, { "epoch": 0.6153846153846154, "grad_norm": 2.4521377086639404, "learning_rate": 1.1979701719998453e-05, "loss": 0.7027, "step": 41 }, { "epoch": 0.6303939962476548, "grad_norm": 1.4812160730361938, "learning_rate": 9.549150281252633e-06, "loss": 0.7015, "step": 42 }, { "epoch": 0.6454033771106942, "grad_norm": 2.221867799758911, "learning_rate": 7.367991782295391e-06, "loss": 0.6957, "step": 43 }, { "epoch": 0.6604127579737336, "grad_norm": 3.274115800857544, "learning_rate": 5.449673790581611e-06, "loss": 0.7147, "step": 44 }, { "epoch": 0.6754221388367729, "grad_norm": 1.3704664707183838, "learning_rate": 3.8060233744356633e-06, "loss": 0.6968, "step": 45 }, { "epoch": 0.6904315196998124, "grad_norm": 1.5144388675689697, "learning_rate": 2.4471741852423237e-06, "loss": 0.7031, "step": 46 }, { "epoch": 0.7054409005628518, "grad_norm": 2.387979030609131, "learning_rate": 1.3815039801161721e-06, "loss": 0.699, "step": 47 }, { "epoch": 0.7204502814258912, "grad_norm": 1.8138819932937622, "learning_rate": 6.15582970243117e-07, "loss": 0.7033, "step": 48 }, { "epoch": 0.7354596622889306, "grad_norm": 1.4316879510879517, "learning_rate": 1.5413331334360182e-07, "loss": 0.6938, "step": 49 }, { "epoch": 0.7504690431519699, "grad_norm": 2.234066963195801, "learning_rate": 0.0, "loss": 0.7067, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 13, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 5.7676344066048e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }