|
{
|
|
"best_metric": 0.6822429906542056,
|
|
"best_model_checkpoint": "swinv2-tiny-patch4-window8-256-RH\\checkpoint-296",
|
|
"epoch": 40.0,
|
|
"eval_steps": 500,
|
|
"global_step": 320,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_accuracy": 0.411214953271028,
|
|
"eval_loss": 4.626469612121582,
|
|
"eval_runtime": 1.941,
|
|
"eval_samples_per_second": 55.127,
|
|
"eval_steps_per_second": 3.606,
|
|
"step": 8
|
|
},
|
|
{
|
|
"epoch": 1.25,
|
|
"learning_rate": 1.25e-05,
|
|
"loss": 4.5369,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_accuracy": 0.411214953271028,
|
|
"eval_loss": 4.52950382232666,
|
|
"eval_runtime": 1.8274,
|
|
"eval_samples_per_second": 58.552,
|
|
"eval_steps_per_second": 3.831,
|
|
"step": 16
|
|
},
|
|
{
|
|
"epoch": 2.5,
|
|
"learning_rate": 2.5e-05,
|
|
"loss": 4.6305,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_accuracy": 0.411214953271028,
|
|
"eval_loss": 4.143911361694336,
|
|
"eval_runtime": 1.8444,
|
|
"eval_samples_per_second": 58.012,
|
|
"eval_steps_per_second": 3.795,
|
|
"step": 24
|
|
},
|
|
{
|
|
"epoch": 3.75,
|
|
"learning_rate": 3.7500000000000003e-05,
|
|
"loss": 4.0918,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 0.411214953271028,
|
|
"eval_loss": 3.369269609451294,
|
|
"eval_runtime": 1.8599,
|
|
"eval_samples_per_second": 57.529,
|
|
"eval_steps_per_second": 3.764,
|
|
"step": 32
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"learning_rate": 3.888888888888889e-05,
|
|
"loss": 3.1767,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"eval_accuracy": 0.411214953271028,
|
|
"eval_loss": 2.4324560165405273,
|
|
"eval_runtime": 1.894,
|
|
"eval_samples_per_second": 56.496,
|
|
"eval_steps_per_second": 3.696,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 6.0,
|
|
"eval_accuracy": 0.411214953271028,
|
|
"eval_loss": 1.5421515703201294,
|
|
"eval_runtime": 1.8605,
|
|
"eval_samples_per_second": 57.513,
|
|
"eval_steps_per_second": 3.763,
|
|
"step": 48
|
|
},
|
|
{
|
|
"epoch": 6.25,
|
|
"learning_rate": 3.7500000000000003e-05,
|
|
"loss": 2.0113,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 7.0,
|
|
"eval_accuracy": 0.411214953271028,
|
|
"eval_loss": 0.8834472894668579,
|
|
"eval_runtime": 1.8524,
|
|
"eval_samples_per_second": 57.762,
|
|
"eval_steps_per_second": 3.779,
|
|
"step": 56
|
|
},
|
|
{
|
|
"epoch": 7.5,
|
|
"learning_rate": 3.6111111111111116e-05,
|
|
"loss": 1.0593,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"eval_accuracy": 0.5887850467289719,
|
|
"eval_loss": 0.6790158748626709,
|
|
"eval_runtime": 1.8709,
|
|
"eval_samples_per_second": 57.19,
|
|
"eval_steps_per_second": 3.741,
|
|
"step": 64
|
|
},
|
|
{
|
|
"epoch": 8.75,
|
|
"learning_rate": 3.472222222222223e-05,
|
|
"loss": 0.696,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 9.0,
|
|
"eval_accuracy": 0.5887850467289719,
|
|
"eval_loss": 0.7044364213943481,
|
|
"eval_runtime": 1.8454,
|
|
"eval_samples_per_second": 57.981,
|
|
"eval_steps_per_second": 3.793,
|
|
"step": 72
|
|
},
|
|
{
|
|
"epoch": 10.0,
|
|
"learning_rate": 3.3333333333333335e-05,
|
|
"loss": 0.6893,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 10.0,
|
|
"eval_accuracy": 0.5887850467289719,
|
|
"eval_loss": 0.6777833104133606,
|
|
"eval_runtime": 1.8339,
|
|
"eval_samples_per_second": 58.344,
|
|
"eval_steps_per_second": 3.817,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 11.0,
|
|
"eval_accuracy": 0.5887850467289719,
|
|
"eval_loss": 0.686574399471283,
|
|
"eval_runtime": 1.8689,
|
|
"eval_samples_per_second": 57.252,
|
|
"eval_steps_per_second": 3.745,
|
|
"step": 88
|
|
},
|
|
{
|
|
"epoch": 11.25,
|
|
"learning_rate": 3.194444444444445e-05,
|
|
"loss": 0.6961,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 12.0,
|
|
"eval_accuracy": 0.5887850467289719,
|
|
"eval_loss": 0.6933708786964417,
|
|
"eval_runtime": 1.8659,
|
|
"eval_samples_per_second": 57.344,
|
|
"eval_steps_per_second": 3.751,
|
|
"step": 96
|
|
},
|
|
{
|
|
"epoch": 12.5,
|
|
"learning_rate": 3.0555555555555554e-05,
|
|
"loss": 0.7329,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 13.0,
|
|
"eval_accuracy": 0.5887850467289719,
|
|
"eval_loss": 0.6915177702903748,
|
|
"eval_runtime": 1.8689,
|
|
"eval_samples_per_second": 57.252,
|
|
"eval_steps_per_second": 3.745,
|
|
"step": 104
|
|
},
|
|
{
|
|
"epoch": 13.75,
|
|
"learning_rate": 2.9166666666666666e-05,
|
|
"loss": 0.6948,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 14.0,
|
|
"eval_accuracy": 0.5887850467289719,
|
|
"eval_loss": 0.6761561036109924,
|
|
"eval_runtime": 1.873,
|
|
"eval_samples_per_second": 57.129,
|
|
"eval_steps_per_second": 3.737,
|
|
"step": 112
|
|
},
|
|
{
|
|
"epoch": 15.0,
|
|
"learning_rate": 2.777777777777778e-05,
|
|
"loss": 0.6771,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 15.0,
|
|
"eval_accuracy": 0.5887850467289719,
|
|
"eval_loss": 0.6794873476028442,
|
|
"eval_runtime": 1.8549,
|
|
"eval_samples_per_second": 57.684,
|
|
"eval_steps_per_second": 3.774,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 16.0,
|
|
"eval_accuracy": 0.5887850467289719,
|
|
"eval_loss": 0.6800874471664429,
|
|
"eval_runtime": 1.871,
|
|
"eval_samples_per_second": 57.19,
|
|
"eval_steps_per_second": 3.741,
|
|
"step": 128
|
|
},
|
|
{
|
|
"epoch": 16.25,
|
|
"learning_rate": 2.6388888888888892e-05,
|
|
"loss": 0.6763,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 17.0,
|
|
"eval_accuracy": 0.5887850467289719,
|
|
"eval_loss": 0.6819825172424316,
|
|
"eval_runtime": 1.8614,
|
|
"eval_samples_per_second": 57.483,
|
|
"eval_steps_per_second": 3.761,
|
|
"step": 136
|
|
},
|
|
{
|
|
"epoch": 17.5,
|
|
"learning_rate": 2.5e-05,
|
|
"loss": 0.6822,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 18.0,
|
|
"eval_accuracy": 0.5887850467289719,
|
|
"eval_loss": 0.6799528002738953,
|
|
"eval_runtime": 1.8664,
|
|
"eval_samples_per_second": 57.328,
|
|
"eval_steps_per_second": 3.75,
|
|
"step": 144
|
|
},
|
|
{
|
|
"epoch": 18.75,
|
|
"learning_rate": 2.3611111111111114e-05,
|
|
"loss": 0.6723,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 19.0,
|
|
"eval_accuracy": 0.5887850467289719,
|
|
"eval_loss": 0.6740810871124268,
|
|
"eval_runtime": 1.8689,
|
|
"eval_samples_per_second": 57.252,
|
|
"eval_steps_per_second": 3.745,
|
|
"step": 152
|
|
},
|
|
{
|
|
"epoch": 20.0,
|
|
"learning_rate": 2.2222222222222227e-05,
|
|
"loss": 0.6757,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 20.0,
|
|
"eval_accuracy": 0.5887850467289719,
|
|
"eval_loss": 0.681536078453064,
|
|
"eval_runtime": 1.8439,
|
|
"eval_samples_per_second": 58.028,
|
|
"eval_steps_per_second": 3.796,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 21.0,
|
|
"eval_accuracy": 0.5887850467289719,
|
|
"eval_loss": 0.6728676557540894,
|
|
"eval_runtime": 1.8774,
|
|
"eval_samples_per_second": 56.993,
|
|
"eval_steps_per_second": 3.729,
|
|
"step": 168
|
|
},
|
|
{
|
|
"epoch": 21.25,
|
|
"learning_rate": 2.0833333333333336e-05,
|
|
"loss": 0.6711,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 22.0,
|
|
"eval_accuracy": 0.5887850467289719,
|
|
"eval_loss": 0.6811678409576416,
|
|
"eval_runtime": 1.8474,
|
|
"eval_samples_per_second": 57.918,
|
|
"eval_steps_per_second": 3.789,
|
|
"step": 176
|
|
},
|
|
{
|
|
"epoch": 22.5,
|
|
"learning_rate": 1.9444444444444445e-05,
|
|
"loss": 0.6784,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 23.0,
|
|
"eval_accuracy": 0.5794392523364486,
|
|
"eval_loss": 0.6780757308006287,
|
|
"eval_runtime": 1.9144,
|
|
"eval_samples_per_second": 55.891,
|
|
"eval_steps_per_second": 3.656,
|
|
"step": 184
|
|
},
|
|
{
|
|
"epoch": 23.75,
|
|
"learning_rate": 1.8055555555555558e-05,
|
|
"loss": 0.6665,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 24.0,
|
|
"eval_accuracy": 0.5794392523364486,
|
|
"eval_loss": 0.6697570085525513,
|
|
"eval_runtime": 1.8669,
|
|
"eval_samples_per_second": 57.313,
|
|
"eval_steps_per_second": 3.749,
|
|
"step": 192
|
|
},
|
|
{
|
|
"epoch": 25.0,
|
|
"learning_rate": 1.6666666666666667e-05,
|
|
"loss": 0.6723,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 25.0,
|
|
"eval_accuracy": 0.5981308411214953,
|
|
"eval_loss": 0.6646633744239807,
|
|
"eval_runtime": 1.8769,
|
|
"eval_samples_per_second": 57.008,
|
|
"eval_steps_per_second": 3.729,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 26.0,
|
|
"eval_accuracy": 0.5794392523364486,
|
|
"eval_loss": 0.6761506199836731,
|
|
"eval_runtime": 1.8489,
|
|
"eval_samples_per_second": 57.871,
|
|
"eval_steps_per_second": 3.786,
|
|
"step": 208
|
|
},
|
|
{
|
|
"epoch": 26.25,
|
|
"learning_rate": 1.5277777777777777e-05,
|
|
"loss": 0.6675,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 27.0,
|
|
"eval_accuracy": 0.5700934579439252,
|
|
"eval_loss": 0.659662127494812,
|
|
"eval_runtime": 1.8699,
|
|
"eval_samples_per_second": 57.221,
|
|
"eval_steps_per_second": 3.743,
|
|
"step": 216
|
|
},
|
|
{
|
|
"epoch": 27.5,
|
|
"learning_rate": 1.388888888888889e-05,
|
|
"loss": 0.6628,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 28.0,
|
|
"eval_accuracy": 0.6355140186915887,
|
|
"eval_loss": 0.6563044190406799,
|
|
"eval_runtime": 1.901,
|
|
"eval_samples_per_second": 56.288,
|
|
"eval_steps_per_second": 3.682,
|
|
"step": 224
|
|
},
|
|
{
|
|
"epoch": 28.75,
|
|
"learning_rate": 1.25e-05,
|
|
"loss": 0.6478,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 29.0,
|
|
"eval_accuracy": 0.5794392523364486,
|
|
"eval_loss": 0.6790612936019897,
|
|
"eval_runtime": 1.8634,
|
|
"eval_samples_per_second": 57.421,
|
|
"eval_steps_per_second": 3.757,
|
|
"step": 232
|
|
},
|
|
{
|
|
"epoch": 30.0,
|
|
"learning_rate": 1.1111111111111113e-05,
|
|
"loss": 0.6642,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 30.0,
|
|
"eval_accuracy": 0.5887850467289719,
|
|
"eval_loss": 0.6573609709739685,
|
|
"eval_runtime": 1.884,
|
|
"eval_samples_per_second": 56.796,
|
|
"eval_steps_per_second": 3.716,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 31.0,
|
|
"eval_accuracy": 0.5607476635514018,
|
|
"eval_loss": 0.6556074023246765,
|
|
"eval_runtime": 1.872,
|
|
"eval_samples_per_second": 57.159,
|
|
"eval_steps_per_second": 3.739,
|
|
"step": 248
|
|
},
|
|
{
|
|
"epoch": 31.25,
|
|
"learning_rate": 9.722222222222223e-06,
|
|
"loss": 0.654,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 32.0,
|
|
"eval_accuracy": 0.5887850467289719,
|
|
"eval_loss": 0.6523457169532776,
|
|
"eval_runtime": 1.871,
|
|
"eval_samples_per_second": 57.19,
|
|
"eval_steps_per_second": 3.741,
|
|
"step": 256
|
|
},
|
|
{
|
|
"epoch": 32.5,
|
|
"learning_rate": 8.333333333333334e-06,
|
|
"loss": 0.6602,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 33.0,
|
|
"eval_accuracy": 0.6261682242990654,
|
|
"eval_loss": 0.6463862657546997,
|
|
"eval_runtime": 1.8604,
|
|
"eval_samples_per_second": 57.513,
|
|
"eval_steps_per_second": 3.763,
|
|
"step": 264
|
|
},
|
|
{
|
|
"epoch": 33.75,
|
|
"learning_rate": 6.944444444444445e-06,
|
|
"loss": 0.6535,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 34.0,
|
|
"eval_accuracy": 0.616822429906542,
|
|
"eval_loss": 0.6450387835502625,
|
|
"eval_runtime": 2.0415,
|
|
"eval_samples_per_second": 52.413,
|
|
"eval_steps_per_second": 3.429,
|
|
"step": 272
|
|
},
|
|
{
|
|
"epoch": 35.0,
|
|
"learning_rate": 5.555555555555557e-06,
|
|
"loss": 0.6506,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 35.0,
|
|
"eval_accuracy": 0.5794392523364486,
|
|
"eval_loss": 0.6549742221832275,
|
|
"eval_runtime": 1.8524,
|
|
"eval_samples_per_second": 57.762,
|
|
"eval_steps_per_second": 3.779,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 36.0,
|
|
"eval_accuracy": 0.6074766355140186,
|
|
"eval_loss": 0.6437696218490601,
|
|
"eval_runtime": 1.837,
|
|
"eval_samples_per_second": 58.249,
|
|
"eval_steps_per_second": 3.811,
|
|
"step": 288
|
|
},
|
|
{
|
|
"epoch": 36.25,
|
|
"learning_rate": 4.166666666666667e-06,
|
|
"loss": 0.6533,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 37.0,
|
|
"eval_accuracy": 0.6822429906542056,
|
|
"eval_loss": 0.6395505666732788,
|
|
"eval_runtime": 1.8519,
|
|
"eval_samples_per_second": 57.777,
|
|
"eval_steps_per_second": 3.78,
|
|
"step": 296
|
|
},
|
|
{
|
|
"epoch": 37.5,
|
|
"learning_rate": 2.7777777777777783e-06,
|
|
"loss": 0.6443,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 38.0,
|
|
"eval_accuracy": 0.6635514018691588,
|
|
"eval_loss": 0.6383424997329712,
|
|
"eval_runtime": 1.8994,
|
|
"eval_samples_per_second": 56.332,
|
|
"eval_steps_per_second": 3.685,
|
|
"step": 304
|
|
},
|
|
{
|
|
"epoch": 38.75,
|
|
"learning_rate": 1.3888888888888892e-06,
|
|
"loss": 0.6263,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 39.0,
|
|
"eval_accuracy": 0.6448598130841121,
|
|
"eval_loss": 0.6377760171890259,
|
|
"eval_runtime": 1.8969,
|
|
"eval_samples_per_second": 56.406,
|
|
"eval_steps_per_second": 3.69,
|
|
"step": 312
|
|
},
|
|
{
|
|
"epoch": 40.0,
|
|
"learning_rate": 0.0,
|
|
"loss": 0.6283,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 40.0,
|
|
"eval_accuracy": 0.6448598130841121,
|
|
"eval_loss": 0.6378776431083679,
|
|
"eval_runtime": 1.8389,
|
|
"eval_samples_per_second": 58.186,
|
|
"eval_steps_per_second": 3.807,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 40.0,
|
|
"step": 320,
|
|
"total_flos": 6.480838238876467e+17,
|
|
"train_loss": 1.1531404286623002,
|
|
"train_runtime": 569.0029,
|
|
"train_samples_per_second": 35.009,
|
|
"train_steps_per_second": 0.562
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 320,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 40,
|
|
"save_steps": 500,
|
|
"total_flos": 6.480838238876467e+17,
|
|
"train_batch_size": 16,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|