{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.04, "eval_steps": 4, "global_step": 13, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.08, "grad_norm": 17.890169143676758, "learning_rate": 2e-05, "loss": 6.6299, "step": 1 }, { "epoch": 0.08, "eval_loss": 6.932005405426025, "eval_runtime": 5.2841, "eval_samples_per_second": 2.271, "eval_steps_per_second": 1.135, "step": 1 }, { "epoch": 0.16, "grad_norm": 17.0613956451416, "learning_rate": 4e-05, "loss": 6.8062, "step": 2 }, { "epoch": 0.24, "grad_norm": 19.356693267822266, "learning_rate": 6e-05, "loss": 7.0697, "step": 3 }, { "epoch": 0.32, "grad_norm": 12.688406944274902, "learning_rate": 8e-05, "loss": 5.9686, "step": 4 }, { "epoch": 0.32, "eval_loss": 4.446321964263916, "eval_runtime": 5.3077, "eval_samples_per_second": 2.261, "eval_steps_per_second": 1.13, "step": 4 }, { "epoch": 0.4, "grad_norm": 11.240882873535156, "learning_rate": 0.0001, "loss": 4.5161, "step": 5 }, { "epoch": 0.48, "grad_norm": 14.287125587463379, "learning_rate": 0.00012, "loss": 2.5736, "step": 6 }, { "epoch": 0.56, "grad_norm": 4.680168628692627, "learning_rate": 0.00014, "loss": 0.9119, "step": 7 }, { "epoch": 0.64, "grad_norm": 3.75272536277771, "learning_rate": 0.00016, "loss": 0.5956, "step": 8 }, { "epoch": 0.64, "eval_loss": 0.5577284693717957, "eval_runtime": 5.323, "eval_samples_per_second": 2.254, "eval_steps_per_second": 1.127, "step": 8 }, { "epoch": 0.72, "grad_norm": 7.569386959075928, "learning_rate": 0.00018, "loss": 0.6499, "step": 9 }, { "epoch": 0.8, "grad_norm": 14.337709426879883, "learning_rate": 0.0002, "loss": 1.6675, "step": 10 }, { "epoch": 0.88, "grad_norm": 3.6579396724700928, "learning_rate": 0.00019981755542233177, "loss": 0.5612, "step": 11 }, { "epoch": 0.96, "grad_norm": 2.5605533123016357, "learning_rate": 0.0001992708874098054, "loss": 0.4848, "step": 12 }, { "epoch": 0.96, "eval_loss": 0.8369883894920349, "eval_runtime": 5.327, "eval_samples_per_second": 2.253, "eval_steps_per_second": 1.126, "step": 12 }, { "epoch": 1.04, "grad_norm": 8.139214515686035, "learning_rate": 0.00019836199069471437, "loss": 0.9289, "step": 13 } ], "logging_steps": 1, "max_steps": 62, "num_input_tokens_seen": 0, "num_train_epochs": 6, "save_steps": 13, "total_flos": 8463871748603904.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }