{ "best_metric": 0.8665329813957214, "best_model_checkpoint": "./lora-alpaca/checkpoint-200", "epoch": 0.5119590432765379, "eval_steps": 200, "global_step": 200, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.025597952163826893, "grad_norm": 0.1311839073896408, "learning_rate": 1e-05, "loss": 1.2034, "step": 10 }, { "epoch": 0.051195904327653786, "grad_norm": 0.22223667800426483, "learning_rate": 2e-05, "loss": 1.4009, "step": 20 }, { "epoch": 0.07679385649148068, "grad_norm": 0.47330865263938904, "learning_rate": 3e-05, "loss": 1.6715, "step": 30 }, { "epoch": 0.10239180865530757, "grad_norm": 0.9922175407409668, "learning_rate": 4e-05, "loss": 1.9561, "step": 40 }, { "epoch": 0.12798976081913446, "grad_norm": 2.9832212924957275, "learning_rate": 5e-05, "loss": 1.8355, "step": 50 }, { "epoch": 0.15358771298296137, "grad_norm": 0.2527308464050293, "learning_rate": 6e-05, "loss": 0.9926, "step": 60 }, { "epoch": 0.17918566514678827, "grad_norm": 0.37248659133911133, "learning_rate": 7e-05, "loss": 1.0282, "step": 70 }, { "epoch": 0.20478361731061515, "grad_norm": 0.29374104738235474, "learning_rate": 8e-05, "loss": 0.983, "step": 80 }, { "epoch": 0.23038156947444205, "grad_norm": 0.2772407829761505, "learning_rate": 9e-05, "loss": 0.8514, "step": 90 }, { "epoch": 0.2559795216382689, "grad_norm": 0.44678089022636414, "learning_rate": 0.0001, "loss": 0.6994, "step": 100 }, { "epoch": 0.28157747380209586, "grad_norm": 0.1145705059170723, "learning_rate": 9.906542056074767e-05, "loss": 0.8862, "step": 110 }, { "epoch": 0.30717542596592273, "grad_norm": 0.1596599817276001, "learning_rate": 9.813084112149533e-05, "loss": 0.9123, "step": 120 }, { "epoch": 0.3327733781297496, "grad_norm": 0.15576007962226868, "learning_rate": 9.7196261682243e-05, "loss": 0.8971, "step": 130 }, { "epoch": 0.35837133029357654, "grad_norm": 0.17032374441623688, "learning_rate": 9.626168224299066e-05, "loss": 0.7929, "step": 140 }, { "epoch": 0.3839692824574034, "grad_norm": 0.505789577960968, "learning_rate": 9.532710280373832e-05, "loss": 0.6719, "step": 150 }, { "epoch": 0.4095672346212303, "grad_norm": 0.11598572134971619, "learning_rate": 9.439252336448599e-05, "loss": 0.8772, "step": 160 }, { "epoch": 0.4351651867850572, "grad_norm": 0.17679552733898163, "learning_rate": 9.345794392523365e-05, "loss": 0.8909, "step": 170 }, { "epoch": 0.4607631389488841, "grad_norm": 0.177357017993927, "learning_rate": 9.252336448598131e-05, "loss": 0.8708, "step": 180 }, { "epoch": 0.486361091112711, "grad_norm": 0.14735093712806702, "learning_rate": 9.158878504672898e-05, "loss": 0.7538, "step": 190 }, { "epoch": 0.5119590432765379, "grad_norm": 0.36298009753227234, "learning_rate": 9.065420560747664e-05, "loss": 0.6309, "step": 200 }, { "epoch": 0.5119590432765379, "eval_loss": 0.8665329813957214, "eval_runtime": 147.8844, "eval_samples_per_second": 13.524, "eval_steps_per_second": 1.691, "step": 200 } ], "logging_steps": 10, "max_steps": 1170, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 200, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 2.2872832292683776e+17, "train_batch_size": 4, "trial_name": null, "trial_params": null }