|
{ |
|
"best_metric": 1.5397371053695679, |
|
"best_model_checkpoint": "./results/t5-bow-decoder-base\\checkpoint-13500", |
|
"epoch": 6.0, |
|
"global_step": 13500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 5e-05, |
|
"loss": 4.0305, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0001, |
|
"loss": 2.3805, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 9.943820224719102e-05, |
|
"loss": 2.1631, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 9.887640449438202e-05, |
|
"loss": 2.0932, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 9.831460674157303e-05, |
|
"loss": 1.9341, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 9.775280898876405e-05, |
|
"loss": 1.9336, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 9.719101123595506e-05, |
|
"loss": 1.8884, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 9.662921348314608e-05, |
|
"loss": 1.899, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 9.606741573033708e-05, |
|
"loss": 1.867, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.7203162908554077, |
|
"eval_runtime": 17.6762, |
|
"eval_samples_per_second": 56.573, |
|
"eval_steps_per_second": 3.564, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 9.550561797752809e-05, |
|
"loss": 1.8113, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 9.49438202247191e-05, |
|
"loss": 1.8082, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 9.438202247191012e-05, |
|
"loss": 1.8161, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 9.382022471910112e-05, |
|
"loss": 1.7548, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 9.325842696629214e-05, |
|
"loss": 1.7057, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 9.269662921348315e-05, |
|
"loss": 1.7636, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 9.213483146067416e-05, |
|
"loss": 1.7652, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 9.157303370786518e-05, |
|
"loss": 1.693, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 9.101123595505618e-05, |
|
"loss": 1.7073, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.624514102935791, |
|
"eval_runtime": 17.0363, |
|
"eval_samples_per_second": 58.698, |
|
"eval_steps_per_second": 3.698, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 9.04494382022472e-05, |
|
"loss": 1.668, |
|
"step": 4750 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 8.988764044943821e-05, |
|
"loss": 1.6579, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 8.932584269662921e-05, |
|
"loss": 1.6282, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 8.876404494382022e-05, |
|
"loss": 1.6937, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 8.820224719101124e-05, |
|
"loss": 1.6562, |
|
"step": 5750 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 8.764044943820225e-05, |
|
"loss": 1.61, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 8.707865168539327e-05, |
|
"loss": 1.6144, |
|
"step": 6250 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 8.651685393258427e-05, |
|
"loss": 1.5554, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 8.595505617977528e-05, |
|
"loss": 1.6599, |
|
"step": 6750 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 1.593631386756897, |
|
"eval_runtime": 19.1501, |
|
"eval_samples_per_second": 52.219, |
|
"eval_steps_per_second": 3.29, |
|
"step": 6750 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 8.53932584269663e-05, |
|
"loss": 1.5611, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 8.483146067415731e-05, |
|
"loss": 1.5177, |
|
"step": 7250 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 8.426966292134831e-05, |
|
"loss": 1.5332, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 8.370786516853934e-05, |
|
"loss": 1.6529, |
|
"step": 7750 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 8.314606741573034e-05, |
|
"loss": 1.5575, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 8.258426966292135e-05, |
|
"loss": 1.5424, |
|
"step": 8250 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 8.202247191011237e-05, |
|
"loss": 1.5496, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 8.146067415730337e-05, |
|
"loss": 1.5465, |
|
"step": 8750 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 8.089887640449438e-05, |
|
"loss": 1.5903, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 1.5624333620071411, |
|
"eval_runtime": 19.0571, |
|
"eval_samples_per_second": 52.474, |
|
"eval_steps_per_second": 3.306, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 8.03370786516854e-05, |
|
"loss": 1.4976, |
|
"step": 9250 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 7.97752808988764e-05, |
|
"loss": 1.4719, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 7.921348314606743e-05, |
|
"loss": 1.4781, |
|
"step": 9750 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 7.865168539325843e-05, |
|
"loss": 1.4652, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 7.808988764044944e-05, |
|
"loss": 1.4997, |
|
"step": 10250 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 7.752808988764046e-05, |
|
"loss": 1.5537, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 7.696629213483147e-05, |
|
"loss": 1.5558, |
|
"step": 10750 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 7.640449438202247e-05, |
|
"loss": 1.4763, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 7.584269662921349e-05, |
|
"loss": 1.5011, |
|
"step": 11250 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 1.5619711875915527, |
|
"eval_runtime": 19.201, |
|
"eval_samples_per_second": 52.081, |
|
"eval_steps_per_second": 3.281, |
|
"step": 11250 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"learning_rate": 7.52808988764045e-05, |
|
"loss": 1.4501, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"learning_rate": 7.471910112359551e-05, |
|
"loss": 1.434, |
|
"step": 11750 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 7.415730337078653e-05, |
|
"loss": 1.3839, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.44, |
|
"learning_rate": 7.359550561797753e-05, |
|
"loss": 1.4426, |
|
"step": 12250 |
|
}, |
|
{ |
|
"epoch": 5.56, |
|
"learning_rate": 7.303370786516854e-05, |
|
"loss": 1.4481, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 7.247191011235956e-05, |
|
"loss": 1.452, |
|
"step": 12750 |
|
}, |
|
{ |
|
"epoch": 5.78, |
|
"learning_rate": 7.191011235955056e-05, |
|
"loss": 1.485, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 5.89, |
|
"learning_rate": 7.134831460674157e-05, |
|
"loss": 1.5027, |
|
"step": 13250 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 7.078651685393259e-05, |
|
"loss": 1.4556, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 1.5397371053695679, |
|
"eval_runtime": 22.8085, |
|
"eval_samples_per_second": 43.843, |
|
"eval_steps_per_second": 2.762, |
|
"step": 13500 |
|
} |
|
], |
|
"max_steps": 45000, |
|
"num_train_epochs": 20, |
|
"total_flos": 3.288372609024e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|