{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.9984, "eval_steps": 500, "global_step": 156, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.032, "grad_norm": 0.3039781153202057, "learning_rate": 2.9924022525939684e-05, "loss": 0.678, "num_input_tokens_seen": 163840, "step": 5 }, { "epoch": 0.064, "grad_norm": 0.2683364450931549, "learning_rate": 2.9696859780634016e-05, "loss": 0.6551, "num_input_tokens_seen": 327680, "step": 10 }, { "epoch": 0.096, "grad_norm": 0.246008038520813, "learning_rate": 2.9320812997628184e-05, "loss": 0.6372, "num_input_tokens_seen": 491520, "step": 15 }, { "epoch": 0.128, "grad_norm": 0.23320266604423523, "learning_rate": 2.8799691654882365e-05, "loss": 0.6201, "num_input_tokens_seen": 655360, "step": 20 }, { "epoch": 0.16, "grad_norm": 0.24441155791282654, "learning_rate": 2.8138774883503317e-05, "loss": 0.5965, "num_input_tokens_seen": 819200, "step": 25 }, { "epoch": 0.192, "grad_norm": 0.2341088205575943, "learning_rate": 2.7344757988404845e-05, "loss": 0.5959, "num_input_tokens_seen": 983040, "step": 30 }, { "epoch": 0.224, "grad_norm": 0.23481066524982452, "learning_rate": 2.6425684622660387e-05, "loss": 0.6006, "num_input_tokens_seen": 1146880, "step": 35 }, { "epoch": 0.256, "grad_norm": 0.2456846982240677, "learning_rate": 2.5390865302643993e-05, "loss": 0.594, "num_input_tokens_seen": 1310720, "step": 40 }, { "epoch": 0.288, "grad_norm": 0.25728079676628113, "learning_rate": 2.425078308942815e-05, "loss": 0.5825, "num_input_tokens_seen": 1474560, "step": 45 }, { "epoch": 0.32, "grad_norm": 0.2505452334880829, "learning_rate": 2.3016987391917016e-05, "loss": 0.5871, "num_input_tokens_seen": 1638400, "step": 50 }, { "epoch": 0.352, "grad_norm": 0.27519550919532776, "learning_rate": 2.1701976967524388e-05, "loss": 0.5771, "num_input_tokens_seen": 1802240, "step": 55 }, { "epoch": 0.384, "grad_norm": 0.2705497741699219, "learning_rate": 2.0319073305638035e-05, "loss": 0.5544, "num_input_tokens_seen": 1966080, "step": 60 }, { "epoch": 0.416, "grad_norm": 0.2861919701099396, "learning_rate": 1.888228567653781e-05, "loss": 0.5768, "num_input_tokens_seen": 2129920, "step": 65 }, { "epoch": 0.448, "grad_norm": 0.29395216703414917, "learning_rate": 1.7406169212866405e-05, "loss": 0.5534, "num_input_tokens_seen": 2293760, "step": 70 }, { "epoch": 0.48, "grad_norm": 0.285727322101593, "learning_rate": 1.5905677461334292e-05, "loss": 0.5597, "num_input_tokens_seen": 2457600, "step": 75 }, { "epoch": 0.512, "grad_norm": 0.30645114183425903, "learning_rate": 1.4396010898358778e-05, "loss": 0.571, "num_input_tokens_seen": 2621440, "step": 80 }, { "epoch": 0.544, "grad_norm": 0.2912521958351135, "learning_rate": 1.2892462944223613e-05, "loss": 0.5572, "num_input_tokens_seen": 2785280, "step": 85 }, { "epoch": 0.576, "grad_norm": 0.3027022182941437, "learning_rate": 1.1410265035686639e-05, "loss": 0.5686, "num_input_tokens_seen": 2949120, "step": 90 }, { "epoch": 0.608, "grad_norm": 0.32110294699668884, "learning_rate": 9.964432326500933e-06, "loss": 0.5525, "num_input_tokens_seen": 3112960, "step": 95 }, { "epoch": 0.64, "grad_norm": 0.31834056973457336, "learning_rate": 8.569611578954186e-06, "loss": 0.5594, "num_input_tokens_seen": 3276800, "step": 100 }, { "epoch": 0.672, "grad_norm": 0.3223641812801361, "learning_rate": 7.239932787335147e-06, "loss": 0.5709, "num_input_tokens_seen": 3440640, "step": 105 }, { "epoch": 0.704, "grad_norm": 0.32236120104789734, "learning_rate": 5.988866036430314e-06, "loss": 0.5555, "num_input_tokens_seen": 3604480, "step": 110 }, { "epoch": 0.736, "grad_norm": 0.32125550508499146, "learning_rate": 4.829085045121636e-06, "loss": 0.5597, "num_input_tokens_seen": 3768320, "step": 115 }, { "epoch": 0.768, "grad_norm": 0.325810968875885, "learning_rate": 3.772338777433482e-06, "loss": 0.5432, "num_input_tokens_seen": 3932160, "step": 120 }, { "epoch": 0.8, "grad_norm": 0.32014375925064087, "learning_rate": 2.829332421651404e-06, "loss": 0.5363, "num_input_tokens_seen": 4096000, "step": 125 }, { "epoch": 0.832, "grad_norm": 0.32001549005508423, "learning_rate": 2.0096189432334194e-06, "loss": 0.5582, "num_input_tokens_seen": 4259840, "step": 130 }, { "epoch": 0.864, "grad_norm": 0.33245849609375, "learning_rate": 1.321502310118649e-06, "loss": 0.5539, "num_input_tokens_seen": 4423680, "step": 135 }, { "epoch": 0.896, "grad_norm": 0.3493448495864868, "learning_rate": 7.719533707928178e-07, "loss": 0.5529, "num_input_tokens_seen": 4587520, "step": 140 }, { "epoch": 0.928, "grad_norm": 0.3279431462287903, "learning_rate": 3.665392372935922e-07, "loss": 0.5584, "num_input_tokens_seen": 4751360, "step": 145 }, { "epoch": 0.96, "grad_norm": 0.3298741281032562, "learning_rate": 1.0936688852919042e-07, "loss": 0.5503, "num_input_tokens_seen": 4915200, "step": 150 }, { "epoch": 0.992, "grad_norm": 0.33253204822540283, "learning_rate": 3.0415652272480776e-09, "loss": 0.5549, "num_input_tokens_seen": 5079040, "step": 155 }, { "epoch": 0.9984, "num_input_tokens_seen": 5111808, "step": 156, "total_flos": 2.1873318928633037e+17, "train_loss": 0.5760276004289969, "train_runtime": 1768.2955, "train_samples_per_second": 5.655, "train_steps_per_second": 0.088 } ], "logging_steps": 5, "max_steps": 156, "num_input_tokens_seen": 5111808, "num_train_epochs": 1, "save_steps": 100, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 2.1873318928633037e+17, "train_batch_size": 8, "trial_name": null, "trial_params": null }