|
{ |
|
"best_metric": 0.8127853881278538, |
|
"best_model_checkpoint": "deit-tiny-patch16-224-finetuned-piid/checkpoint-266", |
|
"epoch": 19.51219512195122, |
|
"eval_steps": 500, |
|
"global_step": 400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.25e-05, |
|
"loss": 1.5326, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.2298, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_accuracy": 0.4703196347031963, |
|
"eval_loss": 1.1137923002243042, |
|
"eval_runtime": 0.7498, |
|
"eval_samples_per_second": 292.079, |
|
"eval_steps_per_second": 37.343, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.9126, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7642, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.5981735159817352, |
|
"eval_loss": 0.9123256206512451, |
|
"eval_runtime": 0.7291, |
|
"eval_samples_per_second": 300.354, |
|
"eval_steps_per_second": 38.401, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 4.8611111111111115e-05, |
|
"loss": 0.6623, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 4.722222222222222e-05, |
|
"loss": 0.6167, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"eval_accuracy": 0.684931506849315, |
|
"eval_loss": 0.6735855937004089, |
|
"eval_runtime": 1.0942, |
|
"eval_samples_per_second": 200.146, |
|
"eval_steps_per_second": 25.589, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 4.5833333333333334e-05, |
|
"loss": 0.5708, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.5628, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.7031963470319634, |
|
"eval_loss": 0.6615465879440308, |
|
"eval_runtime": 0.7444, |
|
"eval_samples_per_second": 294.203, |
|
"eval_steps_per_second": 37.615, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 4.305555555555556e-05, |
|
"loss": 0.6059, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.5616, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"eval_accuracy": 0.7351598173515982, |
|
"eval_loss": 0.5984886288642883, |
|
"eval_runtime": 0.9744, |
|
"eval_samples_per_second": 224.752, |
|
"eval_steps_per_second": 28.735, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 5.37, |
|
"learning_rate": 4.027777777777778e-05, |
|
"loss": 0.5088, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 0.4742, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.7853881278538812, |
|
"eval_loss": 0.4980608820915222, |
|
"eval_runtime": 0.7506, |
|
"eval_samples_per_second": 291.763, |
|
"eval_steps_per_second": 37.303, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 6.34, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.4212, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 6.83, |
|
"learning_rate": 3.611111111111111e-05, |
|
"loss": 0.3434, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 6.98, |
|
"eval_accuracy": 0.7488584474885844, |
|
"eval_loss": 0.5729327201843262, |
|
"eval_runtime": 0.7717, |
|
"eval_samples_per_second": 283.805, |
|
"eval_steps_per_second": 36.286, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 7.32, |
|
"learning_rate": 3.472222222222222e-05, |
|
"loss": 0.3907, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 7.8, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.3691, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.7397260273972602, |
|
"eval_loss": 0.5779256820678711, |
|
"eval_runtime": 0.7364, |
|
"eval_samples_per_second": 297.399, |
|
"eval_steps_per_second": 38.024, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 8.29, |
|
"learning_rate": 3.194444444444444e-05, |
|
"loss": 0.3043, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 8.78, |
|
"learning_rate": 3.055555555555556e-05, |
|
"loss": 0.3375, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 8.98, |
|
"eval_accuracy": 0.7579908675799086, |
|
"eval_loss": 0.541674792766571, |
|
"eval_runtime": 0.7286, |
|
"eval_samples_per_second": 300.584, |
|
"eval_steps_per_second": 38.431, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 9.27, |
|
"learning_rate": 2.916666666666667e-05, |
|
"loss": 0.2907, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 9.76, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.3192, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.7534246575342466, |
|
"eval_loss": 0.5554245710372925, |
|
"eval_runtime": 0.7702, |
|
"eval_samples_per_second": 284.349, |
|
"eval_steps_per_second": 36.355, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 10.24, |
|
"learning_rate": 2.6388888888888892e-05, |
|
"loss": 0.3006, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 10.73, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.2795, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 10.98, |
|
"eval_accuracy": 0.776255707762557, |
|
"eval_loss": 0.5656077265739441, |
|
"eval_runtime": 0.7297, |
|
"eval_samples_per_second": 300.112, |
|
"eval_steps_per_second": 38.37, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 11.22, |
|
"learning_rate": 2.361111111111111e-05, |
|
"loss": 0.2999, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 11.71, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.242, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.7990867579908676, |
|
"eval_loss": 0.5318821668624878, |
|
"eval_runtime": 0.9729, |
|
"eval_samples_per_second": 225.104, |
|
"eval_steps_per_second": 28.78, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 12.2, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 0.2281, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 12.68, |
|
"learning_rate": 1.9444444444444445e-05, |
|
"loss": 0.2557, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 12.98, |
|
"eval_accuracy": 0.8127853881278538, |
|
"eval_loss": 0.5153664350509644, |
|
"eval_runtime": 0.7247, |
|
"eval_samples_per_second": 302.183, |
|
"eval_steps_per_second": 38.635, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 13.17, |
|
"learning_rate": 1.8055555555555555e-05, |
|
"loss": 0.2134, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 13.66, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.2465, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.7990867579908676, |
|
"eval_loss": 0.5763050317764282, |
|
"eval_runtime": 0.7251, |
|
"eval_samples_per_second": 302.014, |
|
"eval_steps_per_second": 38.614, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 14.15, |
|
"learning_rate": 1.527777777777778e-05, |
|
"loss": 0.3016, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 14.63, |
|
"learning_rate": 1.388888888888889e-05, |
|
"loss": 0.221, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 14.98, |
|
"eval_accuracy": 0.8036529680365296, |
|
"eval_loss": 0.5682752132415771, |
|
"eval_runtime": 0.7177, |
|
"eval_samples_per_second": 305.151, |
|
"eval_steps_per_second": 39.015, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 15.12, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.2028, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 15.61, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 0.2058, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.8127853881278538, |
|
"eval_loss": 0.5852413177490234, |
|
"eval_runtime": 0.7065, |
|
"eval_samples_per_second": 309.982, |
|
"eval_steps_per_second": 39.632, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 16.1, |
|
"learning_rate": 9.722222222222223e-06, |
|
"loss": 0.1959, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 16.59, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.1809, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 16.98, |
|
"eval_accuracy": 0.8082191780821918, |
|
"eval_loss": 0.6281502842903137, |
|
"eval_runtime": 0.7065, |
|
"eval_samples_per_second": 309.973, |
|
"eval_steps_per_second": 39.631, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 17.07, |
|
"learning_rate": 6.944444444444445e-06, |
|
"loss": 0.19, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 17.56, |
|
"learning_rate": 5.555555555555556e-06, |
|
"loss": 0.1638, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy": 0.7945205479452054, |
|
"eval_loss": 0.6289016604423523, |
|
"eval_runtime": 0.7099, |
|
"eval_samples_per_second": 308.501, |
|
"eval_steps_per_second": 39.443, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 18.05, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 0.2779, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 18.54, |
|
"learning_rate": 2.777777777777778e-06, |
|
"loss": 0.155, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 18.98, |
|
"eval_accuracy": 0.8036529680365296, |
|
"eval_loss": 0.613444447517395, |
|
"eval_runtime": 0.9887, |
|
"eval_samples_per_second": 221.506, |
|
"eval_steps_per_second": 28.32, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 19.02, |
|
"learning_rate": 1.388888888888889e-06, |
|
"loss": 0.2105, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 19.51, |
|
"learning_rate": 0.0, |
|
"loss": 0.2094, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 19.51, |
|
"eval_accuracy": 0.8082191780821918, |
|
"eval_loss": 0.6113923192024231, |
|
"eval_runtime": 0.6901, |
|
"eval_samples_per_second": 317.344, |
|
"eval_steps_per_second": 40.574, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 19.51, |
|
"step": 400, |
|
"total_flos": 6.358977448272691e+16, |
|
"train_loss": 0.40896380335092547, |
|
"train_runtime": 131.9389, |
|
"train_samples_per_second": 98.985, |
|
"train_steps_per_second": 3.032 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 400, |
|
"num_train_epochs": 20, |
|
"save_steps": 500, |
|
"total_flos": 6.358977448272691e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|