{ "best_metric": 0.18187156319618225, "best_model_checkpoint": "mgh6/TCS_Pair/run-bad69_00000/checkpoint-2000", "epoch": 0.9412397472098964, "eval_steps": 500, "global_step": 3500, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.13, "learning_rate": 0.00079158678443389, "loss": 0.1584, "step": 500 }, { "epoch": 0.13, "eval_loss": 0.2451040893793106, "eval_runtime": 21.3792, "eval_samples_per_second": 224.096, "eval_steps_per_second": 28.018, "step": 500 }, { "epoch": 0.27, "learning_rate": 0.000786228087003387, "loss": 0.1285, "step": 1000 }, { "epoch": 0.27, "eval_loss": 0.19461262226104736, "eval_runtime": 21.0052, "eval_samples_per_second": 228.086, "eval_steps_per_second": 28.517, "step": 1000 }, { "epoch": 0.4, "learning_rate": 0.0007808693895728842, "loss": 0.1232, "step": 1500 }, { "epoch": 0.4, "eval_loss": 0.268852174282074, "eval_runtime": 21.1069, "eval_samples_per_second": 226.988, "eval_steps_per_second": 28.379, "step": 1500 }, { "epoch": 0.54, "learning_rate": 0.0007755106921423813, "loss": 0.1182, "step": 2000 }, { "epoch": 0.54, "eval_loss": 0.18187156319618225, "eval_runtime": 21.0753, "eval_samples_per_second": 227.328, "eval_steps_per_second": 28.422, "step": 2000 }, { "epoch": 0.67, "learning_rate": 0.0007701519947118784, "loss": 0.1642, "step": 2500 }, { "epoch": 0.67, "eval_loss": 0.25392478704452515, "eval_runtime": 21.48, "eval_samples_per_second": 223.045, "eval_steps_per_second": 27.886, "step": 2500 }, { "epoch": 0.81, "learning_rate": 0.0007647932972813753, "loss": 0.1779, "step": 3000 }, { "epoch": 0.81, "eval_loss": 0.2446264624595642, "eval_runtime": 21.3207, "eval_samples_per_second": 224.711, "eval_steps_per_second": 28.095, "step": 3000 }, { "epoch": 0.94, "learning_rate": 0.0007594345998508726, "loss": 0.1776, "step": 3500 }, { "epoch": 0.94, "eval_loss": 0.2562008798122406, "eval_runtime": 21.0095, "eval_samples_per_second": 228.039, "eval_steps_per_second": 28.511, "step": 3500 } ], "logging_steps": 500, "max_steps": 74360, "num_train_epochs": 20, "save_steps": 500, "total_flos": 3555307819392000.0, "trial_name": null, "trial_params": { "classifier_dropout": 0.18727005942368125, "gradient_accumulation_steps": 2, "learning_rate": 0.0007969454818643929, "weight_decay": 0.029106359131330688 } }