{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.8651162790697673, "eval_steps": 9, "global_step": 100, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.018604651162790697, "eval_loss": 7.122097015380859, "eval_runtime": 1.8245, "eval_samples_per_second": 49.877, "eval_steps_per_second": 6.577, "step": 1 }, { "epoch": 0.05581395348837209, "grad_norm": 5.93759298324585, "learning_rate": 3e-05, "loss": 7.0049, "step": 3 }, { "epoch": 0.11162790697674418, "grad_norm": 3.5059802532196045, "learning_rate": 6e-05, "loss": 6.785, "step": 6 }, { "epoch": 0.16744186046511628, "grad_norm": 2.8853189945220947, "learning_rate": 9e-05, "loss": 6.2763, "step": 9 }, { "epoch": 0.16744186046511628, "eval_loss": 5.901803016662598, "eval_runtime": 1.4176, "eval_samples_per_second": 64.193, "eval_steps_per_second": 8.465, "step": 9 }, { "epoch": 0.22325581395348837, "grad_norm": 2.918773889541626, "learning_rate": 9.987820251299122e-05, "loss": 5.7888, "step": 12 }, { "epoch": 0.27906976744186046, "grad_norm": 2.4992666244506836, "learning_rate": 9.924038765061042e-05, "loss": 5.5229, "step": 15 }, { "epoch": 0.33488372093023255, "grad_norm": 2.410930633544922, "learning_rate": 9.806308479691595e-05, "loss": 5.2322, "step": 18 }, { "epoch": 0.33488372093023255, "eval_loss": 5.058493614196777, "eval_runtime": 1.4209, "eval_samples_per_second": 64.043, "eval_steps_per_second": 8.445, "step": 18 }, { "epoch": 0.39069767441860465, "grad_norm": 2.452528953552246, "learning_rate": 9.635919272833938e-05, "loss": 4.993, "step": 21 }, { "epoch": 0.44651162790697674, "grad_norm": 2.0651979446411133, "learning_rate": 9.414737964294636e-05, "loss": 4.8643, "step": 24 }, { "epoch": 0.5023255813953489, "grad_norm": 2.8087124824523926, "learning_rate": 9.145187862775209e-05, "loss": 4.6, "step": 27 }, { "epoch": 0.5023255813953489, "eval_loss": 4.6170830726623535, "eval_runtime": 1.4244, "eval_samples_per_second": 63.885, "eval_steps_per_second": 8.424, "step": 27 }, { "epoch": 0.5581395348837209, "grad_norm": 2.3394060134887695, "learning_rate": 8.83022221559489e-05, "loss": 4.4692, "step": 30 }, { "epoch": 0.6139534883720931, "grad_norm": 2.6000590324401855, "learning_rate": 8.473291852294987e-05, "loss": 4.3967, "step": 33 }, { "epoch": 0.6697674418604651, "grad_norm": 2.269150972366333, "learning_rate": 8.07830737662829e-05, "loss": 4.3017, "step": 36 }, { "epoch": 0.6697674418604651, "eval_loss": 4.316805362701416, "eval_runtime": 1.4232, "eval_samples_per_second": 63.942, "eval_steps_per_second": 8.432, "step": 36 }, { "epoch": 0.7255813953488373, "grad_norm": 2.6373918056488037, "learning_rate": 7.649596321166024e-05, "loss": 4.3059, "step": 39 }, { "epoch": 0.7813953488372093, "grad_norm": 2.009603977203369, "learning_rate": 7.191855733945387e-05, "loss": 4.2137, "step": 42 }, { "epoch": 0.8372093023255814, "grad_norm": 2.5424704551696777, "learning_rate": 6.710100716628344e-05, "loss": 4.0419, "step": 45 }, { "epoch": 0.8372093023255814, "eval_loss": 4.111220359802246, "eval_runtime": 1.427, "eval_samples_per_second": 63.768, "eval_steps_per_second": 8.409, "step": 45 }, { "epoch": 0.8930232558139535, "grad_norm": 2.416537284851074, "learning_rate": 6.209609477998338e-05, "loss": 3.8657, "step": 48 }, { "epoch": 0.9488372093023256, "grad_norm": 2.1381163597106934, "learning_rate": 5.695865504800327e-05, "loss": 3.9941, "step": 51 }, { "epoch": 1.0093023255813953, "grad_norm": 3.770850419998169, "learning_rate": 5.174497483512506e-05, "loss": 4.5154, "step": 54 }, { "epoch": 1.0093023255813953, "eval_loss": 3.986980438232422, "eval_runtime": 1.4232, "eval_samples_per_second": 63.942, "eval_steps_per_second": 8.432, "step": 54 }, { "epoch": 1.0651162790697675, "grad_norm": 2.297471523284912, "learning_rate": 4.6512176312793736e-05, "loss": 3.7592, "step": 57 }, { "epoch": 1.1209302325581396, "grad_norm": 2.4820058345794678, "learning_rate": 4.131759111665349e-05, "loss": 3.8182, "step": 60 }, { "epoch": 1.1767441860465115, "grad_norm": 46.66299057006836, "learning_rate": 3.6218132209150045e-05, "loss": 3.8323, "step": 63 }, { "epoch": 1.1767441860465115, "eval_loss": 3.9052999019622803, "eval_runtime": 1.4226, "eval_samples_per_second": 63.966, "eval_steps_per_second": 8.435, "step": 63 }, { "epoch": 1.2325581395348837, "grad_norm": 2.4314541816711426, "learning_rate": 3.12696703292044e-05, "loss": 3.8175, "step": 66 }, { "epoch": 1.2883720930232558, "grad_norm": 2.642011880874634, "learning_rate": 2.6526421860705473e-05, "loss": 3.7243, "step": 69 }, { "epoch": 1.344186046511628, "grad_norm": 2.5912115573883057, "learning_rate": 2.2040354826462668e-05, "loss": 3.5854, "step": 72 }, { "epoch": 1.344186046511628, "eval_loss": 3.826810836791992, "eval_runtime": 1.4278, "eval_samples_per_second": 63.734, "eval_steps_per_second": 8.405, "step": 72 }, { "epoch": 1.4, "grad_norm": 2.162818193435669, "learning_rate": 1.7860619515673033e-05, "loss": 3.6049, "step": 75 }, { "epoch": 1.455813953488372, "grad_norm": 2.701357126235962, "learning_rate": 1.4033009983067452e-05, "loss": 3.7687, "step": 78 }, { "epoch": 1.5116279069767442, "grad_norm": 2.41788387298584, "learning_rate": 1.0599462319663905e-05, "loss": 3.8804, "step": 81 }, { "epoch": 1.5116279069767442, "eval_loss": 3.793916702270508, "eval_runtime": 1.4278, "eval_samples_per_second": 63.736, "eval_steps_per_second": 8.405, "step": 81 }, { "epoch": 1.5674418604651161, "grad_norm": 2.8578102588653564, "learning_rate": 7.597595192178702e-06, "loss": 3.5929, "step": 84 }, { "epoch": 1.6232558139534885, "grad_norm": 2.4142818450927734, "learning_rate": 5.060297685041659e-06, "loss": 3.7192, "step": 87 }, { "epoch": 1.6790697674418604, "grad_norm": 2.4720561504364014, "learning_rate": 3.0153689607045845e-06, "loss": 3.6054, "step": 90 }, { "epoch": 1.6790697674418604, "eval_loss": 3.775722026824951, "eval_runtime": 1.4214, "eval_samples_per_second": 64.019, "eval_steps_per_second": 8.442, "step": 90 }, { "epoch": 1.7348837209302326, "grad_norm": 2.3214516639709473, "learning_rate": 1.4852136862001764e-06, "loss": 3.6552, "step": 93 }, { "epoch": 1.7906976744186047, "grad_norm": 2.328660726547241, "learning_rate": 4.865965629214819e-07, "loss": 3.671, "step": 96 }, { "epoch": 1.8465116279069766, "grad_norm": 2.30373477935791, "learning_rate": 3.04586490452119e-08, "loss": 3.5648, "step": 99 }, { "epoch": 1.8465116279069766, "eval_loss": 3.7725934982299805, "eval_runtime": 1.4221, "eval_samples_per_second": 63.991, "eval_steps_per_second": 8.438, "step": 99 } ], "logging_steps": 3, "max_steps": 100, "num_input_tokens_seen": 0, "num_train_epochs": 2, "save_steps": 9, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 6658776367104000.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }