{ "best_metric": 0.11002114415168762, "best_model_checkpoint": "tuple-1k-t5/checkpoint-300", "epoch": 3.0, "eval_steps": 500, "global_step": 300, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.05, "grad_norm": 29.495084762573242, "learning_rate": 5e-06, "loss": 10.2171, "step": 5 }, { "epoch": 0.1, "grad_norm": 37.105918884277344, "learning_rate": 1.1666666666666668e-05, "loss": 8.2109, "step": 10 }, { "epoch": 0.15, "grad_norm": 45.43325424194336, "learning_rate": 2e-05, "loss": 7.2074, "step": 15 }, { "epoch": 0.2, "grad_norm": 4.679723739624023, "learning_rate": 2.8333333333333335e-05, "loss": 2.2548, "step": 20 }, { "epoch": 0.25, "grad_norm": 3.3433218002319336, "learning_rate": 3.6666666666666666e-05, "loss": 1.315, "step": 25 }, { "epoch": 0.3, "grad_norm": 3.405259370803833, "learning_rate": 4.5e-05, "loss": 1.0132, "step": 30 }, { "epoch": 0.35, "grad_norm": 2.41959810256958, "learning_rate": 4.962962962962963e-05, "loss": 0.7069, "step": 35 }, { "epoch": 0.4, "grad_norm": 2.0366761684417725, "learning_rate": 4.8703703703703704e-05, "loss": 0.4676, "step": 40 }, { "epoch": 0.45, "grad_norm": 1.5966047048568726, "learning_rate": 4.7777777777777784e-05, "loss": 0.325, "step": 45 }, { "epoch": 0.5, "grad_norm": 0.965764045715332, "learning_rate": 4.685185185185185e-05, "loss": 0.2708, "step": 50 }, { "epoch": 0.55, "grad_norm": 1.0757189989089966, "learning_rate": 4.592592592592593e-05, "loss": 0.241, "step": 55 }, { "epoch": 0.6, "grad_norm": 0.9667235612869263, "learning_rate": 4.5e-05, "loss": 0.2218, "step": 60 }, { "epoch": 0.65, "grad_norm": 0.9872345924377441, "learning_rate": 4.4074074074074076e-05, "loss": 0.205, "step": 65 }, { "epoch": 0.7, "grad_norm": 0.7139382362365723, "learning_rate": 4.314814814814815e-05, "loss": 0.1784, "step": 70 }, { "epoch": 0.75, "grad_norm": 0.9039368033409119, "learning_rate": 4.222222222222222e-05, "loss": 0.1794, "step": 75 }, { "epoch": 0.8, "grad_norm": 0.6482570767402649, "learning_rate": 4.12962962962963e-05, "loss": 0.1837, "step": 80 }, { "epoch": 0.85, "grad_norm": 0.7324709296226501, "learning_rate": 4.0370370370370374e-05, "loss": 0.1669, "step": 85 }, { "epoch": 0.9, "grad_norm": 0.5565744638442993, "learning_rate": 3.944444444444445e-05, "loss": 0.164, "step": 90 }, { "epoch": 0.95, "grad_norm": 0.5513789653778076, "learning_rate": 3.851851851851852e-05, "loss": 0.15, "step": 95 }, { "epoch": 1.0, "grad_norm": 0.5113586187362671, "learning_rate": 3.759259259259259e-05, "loss": 0.149, "step": 100 }, { "epoch": 1.0, "eval_gen_len": 19.0, "eval_loss": 0.13265687227249146, "eval_rouge1": 14.8515, "eval_rouge2": 12.024, "eval_rougeL": 14.8324, "eval_rougeLsum": 14.8405, "eval_runtime": 11.6075, "eval_samples_per_second": 17.23, "eval_steps_per_second": 1.12, "step": 100 }, { "epoch": 1.05, "grad_norm": 0.6177830696105957, "learning_rate": 3.6666666666666666e-05, "loss": 0.1681, "step": 105 }, { "epoch": 1.1, "grad_norm": 0.46891486644744873, "learning_rate": 3.574074074074074e-05, "loss": 0.1295, "step": 110 }, { "epoch": 1.15, "grad_norm": 0.572552502155304, "learning_rate": 3.481481481481482e-05, "loss": 0.1357, "step": 115 }, { "epoch": 1.2, "grad_norm": 0.4963364601135254, "learning_rate": 3.388888888888889e-05, "loss": 0.149, "step": 120 }, { "epoch": 1.25, "grad_norm": 0.538290798664093, "learning_rate": 3.2962962962962964e-05, "loss": 0.1358, "step": 125 }, { "epoch": 1.3, "grad_norm": 0.528228223323822, "learning_rate": 3.203703703703704e-05, "loss": 0.1571, "step": 130 }, { "epoch": 1.35, "grad_norm": 0.7794693112373352, "learning_rate": 3.111111111111111e-05, "loss": 0.1361, "step": 135 }, { "epoch": 1.4, "grad_norm": 0.5048784017562866, "learning_rate": 3.018518518518519e-05, "loss": 0.1376, "step": 140 }, { "epoch": 1.45, "grad_norm": 0.568100094795227, "learning_rate": 2.925925925925926e-05, "loss": 0.1575, "step": 145 }, { "epoch": 1.5, "grad_norm": 0.58453369140625, "learning_rate": 2.8333333333333335e-05, "loss": 0.1402, "step": 150 }, { "epoch": 1.55, "grad_norm": 0.4158877730369568, "learning_rate": 2.7407407407407408e-05, "loss": 0.1238, "step": 155 }, { "epoch": 1.6, "grad_norm": 0.5241145491600037, "learning_rate": 2.6481481481481485e-05, "loss": 0.1318, "step": 160 }, { "epoch": 1.65, "grad_norm": 0.6674245595932007, "learning_rate": 2.5555555555555554e-05, "loss": 0.1347, "step": 165 }, { "epoch": 1.7, "grad_norm": 0.43407198786735535, "learning_rate": 2.462962962962963e-05, "loss": 0.1406, "step": 170 }, { "epoch": 1.75, "grad_norm": 0.5406764149665833, "learning_rate": 2.3703703703703707e-05, "loss": 0.1401, "step": 175 }, { "epoch": 1.8, "grad_norm": 0.5779006481170654, "learning_rate": 2.277777777777778e-05, "loss": 0.1619, "step": 180 }, { "epoch": 1.85, "grad_norm": 0.507857620716095, "learning_rate": 2.1851851851851852e-05, "loss": 0.1208, "step": 185 }, { "epoch": 1.9, "grad_norm": 0.41192862391471863, "learning_rate": 2.0925925925925925e-05, "loss": 0.1303, "step": 190 }, { "epoch": 1.95, "grad_norm": 0.495255708694458, "learning_rate": 2e-05, "loss": 0.1224, "step": 195 }, { "epoch": 2.0, "grad_norm": 0.3549606204032898, "learning_rate": 1.9074074074074075e-05, "loss": 0.1275, "step": 200 }, { "epoch": 2.0, "eval_gen_len": 19.0, "eval_loss": 0.11091431230306625, "eval_rouge1": 14.8075, "eval_rouge2": 11.9873, "eval_rougeL": 14.7889, "eval_rougeLsum": 14.8017, "eval_runtime": 10.2766, "eval_samples_per_second": 19.462, "eval_steps_per_second": 1.265, "step": 200 }, { "epoch": 2.05, "grad_norm": 0.41942712664604187, "learning_rate": 1.814814814814815e-05, "loss": 0.1361, "step": 205 }, { "epoch": 2.1, "grad_norm": 0.46660855412483215, "learning_rate": 1.7222222222222224e-05, "loss": 0.1144, "step": 210 }, { "epoch": 2.15, "grad_norm": 0.5672628879547119, "learning_rate": 1.62962962962963e-05, "loss": 0.1363, "step": 215 }, { "epoch": 2.2, "grad_norm": 0.4134034216403961, "learning_rate": 1.537037037037037e-05, "loss": 0.1309, "step": 220 }, { "epoch": 2.25, "grad_norm": 0.5884687304496765, "learning_rate": 1.4444444444444444e-05, "loss": 0.1329, "step": 225 }, { "epoch": 2.3, "grad_norm": 0.44717901945114136, "learning_rate": 1.3518518518518519e-05, "loss": 0.1219, "step": 230 }, { "epoch": 2.35, "grad_norm": 0.6104301810264587, "learning_rate": 1.2592592592592592e-05, "loss": 0.1268, "step": 235 }, { "epoch": 2.4, "grad_norm": 0.5338377952575684, "learning_rate": 1.1666666666666668e-05, "loss": 0.1154, "step": 240 }, { "epoch": 2.45, "grad_norm": 0.41602200269699097, "learning_rate": 1.074074074074074e-05, "loss": 0.1283, "step": 245 }, { "epoch": 2.5, "grad_norm": 0.4216574728488922, "learning_rate": 9.814814814814815e-06, "loss": 0.1285, "step": 250 }, { "epoch": 2.55, "grad_norm": 0.4989544749259949, "learning_rate": 8.88888888888889e-06, "loss": 0.1246, "step": 255 }, { "epoch": 2.6, "grad_norm": 0.6025294065475464, "learning_rate": 7.962962962962963e-06, "loss": 0.1269, "step": 260 }, { "epoch": 2.65, "grad_norm": 0.38507816195487976, "learning_rate": 7.0370370370370375e-06, "loss": 0.1138, "step": 265 }, { "epoch": 2.7, "grad_norm": 0.3658128082752228, "learning_rate": 6.111111111111111e-06, "loss": 0.1237, "step": 270 }, { "epoch": 2.75, "grad_norm": 0.4835338592529297, "learning_rate": 5.185185185185185e-06, "loss": 0.1527, "step": 275 }, { "epoch": 2.8, "grad_norm": 0.5483537316322327, "learning_rate": 4.2592592592592596e-06, "loss": 0.1222, "step": 280 }, { "epoch": 2.85, "grad_norm": 0.586328387260437, "learning_rate": 3.3333333333333333e-06, "loss": 0.1281, "step": 285 }, { "epoch": 2.9, "grad_norm": 0.6712453961372375, "learning_rate": 2.4074074074074075e-06, "loss": 0.1384, "step": 290 }, { "epoch": 2.95, "grad_norm": 0.37056586146354675, "learning_rate": 1.4814814814814817e-06, "loss": 0.1181, "step": 295 }, { "epoch": 3.0, "grad_norm": 0.374318927526474, "learning_rate": 5.555555555555556e-07, "loss": 0.1206, "step": 300 }, { "epoch": 3.0, "eval_gen_len": 19.0, "eval_loss": 0.11002114415168762, "eval_rouge1": 14.787, "eval_rouge2": 11.9709, "eval_rougeL": 14.7712, "eval_rougeLsum": 14.7825, "eval_runtime": 10.2272, "eval_samples_per_second": 19.556, "eval_steps_per_second": 1.271, "step": 300 } ], "logging_steps": 5, "max_steps": 300, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 500, "total_flos": 641137509089280.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }