|
{
|
|
"best_metric": 0.782608695652174,
|
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-ve-U13-b-80b\\checkpoint-117",
|
|
"epoch": 73.84615384615384,
|
|
"eval_steps": 500,
|
|
"global_step": 480,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.92,
|
|
"eval_accuracy": 0.13043478260869565,
|
|
"eval_loss": 1.385452389717102,
|
|
"eval_runtime": 0.715,
|
|
"eval_samples_per_second": 64.335,
|
|
"eval_steps_per_second": 2.797,
|
|
"step": 6
|
|
},
|
|
{
|
|
"epoch": 1.54,
|
|
"learning_rate": 2.2916666666666667e-05,
|
|
"loss": 1.3852,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_accuracy": 0.2826086956521739,
|
|
"eval_loss": 1.3762171268463135,
|
|
"eval_runtime": 0.8255,
|
|
"eval_samples_per_second": 55.723,
|
|
"eval_steps_per_second": 2.423,
|
|
"step": 13
|
|
},
|
|
{
|
|
"epoch": 2.92,
|
|
"eval_accuracy": 0.2826086956521739,
|
|
"eval_loss": 1.3521002531051636,
|
|
"eval_runtime": 0.7696,
|
|
"eval_samples_per_second": 59.773,
|
|
"eval_steps_per_second": 2.599,
|
|
"step": 19
|
|
},
|
|
{
|
|
"epoch": 3.08,
|
|
"learning_rate": 4.5833333333333334e-05,
|
|
"loss": 1.3565,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 0.34782608695652173,
|
|
"eval_loss": 1.250961422920227,
|
|
"eval_runtime": 0.7773,
|
|
"eval_samples_per_second": 59.179,
|
|
"eval_steps_per_second": 2.573,
|
|
"step": 26
|
|
},
|
|
{
|
|
"epoch": 4.62,
|
|
"learning_rate": 5.4276315789473686e-05,
|
|
"loss": 1.2024,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 4.92,
|
|
"eval_accuracy": 0.34782608695652173,
|
|
"eval_loss": 1.1528408527374268,
|
|
"eval_runtime": 0.7354,
|
|
"eval_samples_per_second": 62.55,
|
|
"eval_steps_per_second": 2.72,
|
|
"step": 32
|
|
},
|
|
{
|
|
"epoch": 6.0,
|
|
"eval_accuracy": 0.5,
|
|
"eval_loss": 1.0294172763824463,
|
|
"eval_runtime": 0.6853,
|
|
"eval_samples_per_second": 67.121,
|
|
"eval_steps_per_second": 2.918,
|
|
"step": 39
|
|
},
|
|
{
|
|
"epoch": 6.15,
|
|
"learning_rate": 5.3070175438596496e-05,
|
|
"loss": 1.0453,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 6.92,
|
|
"eval_accuracy": 0.5217391304347826,
|
|
"eval_loss": 0.9608190655708313,
|
|
"eval_runtime": 0.7141,
|
|
"eval_samples_per_second": 64.421,
|
|
"eval_steps_per_second": 2.801,
|
|
"step": 45
|
|
},
|
|
{
|
|
"epoch": 7.69,
|
|
"learning_rate": 5.18640350877193e-05,
|
|
"loss": 0.8827,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"eval_accuracy": 0.6086956521739131,
|
|
"eval_loss": 0.8800680041313171,
|
|
"eval_runtime": 0.7421,
|
|
"eval_samples_per_second": 61.99,
|
|
"eval_steps_per_second": 2.695,
|
|
"step": 52
|
|
},
|
|
{
|
|
"epoch": 8.92,
|
|
"eval_accuracy": 0.5652173913043478,
|
|
"eval_loss": 0.988355278968811,
|
|
"eval_runtime": 0.7152,
|
|
"eval_samples_per_second": 64.319,
|
|
"eval_steps_per_second": 2.796,
|
|
"step": 58
|
|
},
|
|
{
|
|
"epoch": 9.23,
|
|
"learning_rate": 5.0657894736842104e-05,
|
|
"loss": 0.7887,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 10.0,
|
|
"eval_accuracy": 0.6521739130434783,
|
|
"eval_loss": 0.7927354574203491,
|
|
"eval_runtime": 0.7043,
|
|
"eval_samples_per_second": 65.311,
|
|
"eval_steps_per_second": 2.84,
|
|
"step": 65
|
|
},
|
|
{
|
|
"epoch": 10.77,
|
|
"learning_rate": 4.9451754385964915e-05,
|
|
"loss": 0.6795,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 10.92,
|
|
"eval_accuracy": 0.6521739130434783,
|
|
"eval_loss": 0.7237101793289185,
|
|
"eval_runtime": 0.6992,
|
|
"eval_samples_per_second": 65.792,
|
|
"eval_steps_per_second": 2.861,
|
|
"step": 71
|
|
},
|
|
{
|
|
"epoch": 12.0,
|
|
"eval_accuracy": 0.6739130434782609,
|
|
"eval_loss": 0.7249867916107178,
|
|
"eval_runtime": 0.6943,
|
|
"eval_samples_per_second": 66.251,
|
|
"eval_steps_per_second": 2.88,
|
|
"step": 78
|
|
},
|
|
{
|
|
"epoch": 12.31,
|
|
"learning_rate": 4.824561403508772e-05,
|
|
"loss": 0.5777,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 12.92,
|
|
"eval_accuracy": 0.6956521739130435,
|
|
"eval_loss": 0.7140356302261353,
|
|
"eval_runtime": 0.6964,
|
|
"eval_samples_per_second": 66.052,
|
|
"eval_steps_per_second": 2.872,
|
|
"step": 84
|
|
},
|
|
{
|
|
"epoch": 13.85,
|
|
"learning_rate": 4.703947368421053e-05,
|
|
"loss": 0.496,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 14.0,
|
|
"eval_accuracy": 0.6956521739130435,
|
|
"eval_loss": 0.8013545274734497,
|
|
"eval_runtime": 0.7185,
|
|
"eval_samples_per_second": 64.019,
|
|
"eval_steps_per_second": 2.783,
|
|
"step": 91
|
|
},
|
|
{
|
|
"epoch": 14.92,
|
|
"eval_accuracy": 0.6739130434782609,
|
|
"eval_loss": 0.8701084852218628,
|
|
"eval_runtime": 0.7086,
|
|
"eval_samples_per_second": 64.917,
|
|
"eval_steps_per_second": 2.822,
|
|
"step": 97
|
|
},
|
|
{
|
|
"epoch": 15.38,
|
|
"learning_rate": 4.5833333333333334e-05,
|
|
"loss": 0.4224,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 16.0,
|
|
"eval_accuracy": 0.6521739130434783,
|
|
"eval_loss": 0.93837571144104,
|
|
"eval_runtime": 0.6717,
|
|
"eval_samples_per_second": 68.483,
|
|
"eval_steps_per_second": 2.978,
|
|
"step": 104
|
|
},
|
|
{
|
|
"epoch": 16.92,
|
|
"learning_rate": 4.462719298245614e-05,
|
|
"loss": 0.3744,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 16.92,
|
|
"eval_accuracy": 0.717391304347826,
|
|
"eval_loss": 0.7593847513198853,
|
|
"eval_runtime": 0.8804,
|
|
"eval_samples_per_second": 52.248,
|
|
"eval_steps_per_second": 2.272,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 18.0,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.6122156977653503,
|
|
"eval_runtime": 0.7027,
|
|
"eval_samples_per_second": 65.461,
|
|
"eval_steps_per_second": 2.846,
|
|
"step": 117
|
|
},
|
|
{
|
|
"epoch": 18.46,
|
|
"learning_rate": 4.342105263157895e-05,
|
|
"loss": 0.3775,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 18.92,
|
|
"eval_accuracy": 0.717391304347826,
|
|
"eval_loss": 0.814321756362915,
|
|
"eval_runtime": 0.7065,
|
|
"eval_samples_per_second": 65.112,
|
|
"eval_steps_per_second": 2.831,
|
|
"step": 123
|
|
},
|
|
{
|
|
"epoch": 20.0,
|
|
"learning_rate": 4.221491228070176e-05,
|
|
"loss": 0.3275,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 20.0,
|
|
"eval_accuracy": 0.6521739130434783,
|
|
"eval_loss": 0.9981420040130615,
|
|
"eval_runtime": 0.6931,
|
|
"eval_samples_per_second": 66.366,
|
|
"eval_steps_per_second": 2.885,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 20.92,
|
|
"eval_accuracy": 0.717391304347826,
|
|
"eval_loss": 0.8602655529975891,
|
|
"eval_runtime": 0.7452,
|
|
"eval_samples_per_second": 61.724,
|
|
"eval_steps_per_second": 2.684,
|
|
"step": 136
|
|
},
|
|
{
|
|
"epoch": 21.54,
|
|
"learning_rate": 4.100877192982456e-05,
|
|
"loss": 0.3202,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 22.0,
|
|
"eval_accuracy": 0.6956521739130435,
|
|
"eval_loss": 0.8412013649940491,
|
|
"eval_runtime": 0.6933,
|
|
"eval_samples_per_second": 66.346,
|
|
"eval_steps_per_second": 2.885,
|
|
"step": 143
|
|
},
|
|
{
|
|
"epoch": 22.92,
|
|
"eval_accuracy": 0.717391304347826,
|
|
"eval_loss": 0.8653804659843445,
|
|
"eval_runtime": 0.7152,
|
|
"eval_samples_per_second": 64.32,
|
|
"eval_steps_per_second": 2.797,
|
|
"step": 149
|
|
},
|
|
{
|
|
"epoch": 23.08,
|
|
"learning_rate": 3.9802631578947374e-05,
|
|
"loss": 0.2849,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 24.0,
|
|
"eval_accuracy": 0.6956521739130435,
|
|
"eval_loss": 0.9650171399116516,
|
|
"eval_runtime": 0.703,
|
|
"eval_samples_per_second": 65.437,
|
|
"eval_steps_per_second": 2.845,
|
|
"step": 156
|
|
},
|
|
{
|
|
"epoch": 24.62,
|
|
"learning_rate": 3.859649122807018e-05,
|
|
"loss": 0.2518,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 24.92,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 0.8102234601974487,
|
|
"eval_runtime": 0.6748,
|
|
"eval_samples_per_second": 68.166,
|
|
"eval_steps_per_second": 2.964,
|
|
"step": 162
|
|
},
|
|
{
|
|
"epoch": 26.0,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.7203320264816284,
|
|
"eval_runtime": 0.6843,
|
|
"eval_samples_per_second": 67.221,
|
|
"eval_steps_per_second": 2.923,
|
|
"step": 169
|
|
},
|
|
{
|
|
"epoch": 26.15,
|
|
"learning_rate": 3.739035087719298e-05,
|
|
"loss": 0.2467,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 26.92,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 0.9435244798660278,
|
|
"eval_runtime": 0.6884,
|
|
"eval_samples_per_second": 66.821,
|
|
"eval_steps_per_second": 2.905,
|
|
"step": 175
|
|
},
|
|
{
|
|
"epoch": 27.69,
|
|
"learning_rate": 3.618421052631579e-05,
|
|
"loss": 0.2218,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 28.0,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 0.8905265927314758,
|
|
"eval_runtime": 0.7164,
|
|
"eval_samples_per_second": 64.207,
|
|
"eval_steps_per_second": 2.792,
|
|
"step": 182
|
|
},
|
|
{
|
|
"epoch": 28.92,
|
|
"eval_accuracy": 0.6956521739130435,
|
|
"eval_loss": 1.0828336477279663,
|
|
"eval_runtime": 0.6739,
|
|
"eval_samples_per_second": 68.256,
|
|
"eval_steps_per_second": 2.968,
|
|
"step": 188
|
|
},
|
|
{
|
|
"epoch": 29.23,
|
|
"learning_rate": 3.49780701754386e-05,
|
|
"loss": 0.2075,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 30.0,
|
|
"eval_accuracy": 0.717391304347826,
|
|
"eval_loss": 0.8935770988464355,
|
|
"eval_runtime": 0.7007,
|
|
"eval_samples_per_second": 65.647,
|
|
"eval_steps_per_second": 2.854,
|
|
"step": 195
|
|
},
|
|
{
|
|
"epoch": 30.77,
|
|
"learning_rate": 3.377192982456141e-05,
|
|
"loss": 0.1893,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 30.92,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.8835997581481934,
|
|
"eval_runtime": 0.7042,
|
|
"eval_samples_per_second": 65.326,
|
|
"eval_steps_per_second": 2.84,
|
|
"step": 201
|
|
},
|
|
{
|
|
"epoch": 32.0,
|
|
"eval_accuracy": 0.717391304347826,
|
|
"eval_loss": 0.9691875576972961,
|
|
"eval_runtime": 0.834,
|
|
"eval_samples_per_second": 55.158,
|
|
"eval_steps_per_second": 2.398,
|
|
"step": 208
|
|
},
|
|
{
|
|
"epoch": 32.31,
|
|
"learning_rate": 3.256578947368421e-05,
|
|
"loss": 0.194,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 32.92,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 1.039009690284729,
|
|
"eval_runtime": 0.7007,
|
|
"eval_samples_per_second": 65.651,
|
|
"eval_steps_per_second": 2.854,
|
|
"step": 214
|
|
},
|
|
{
|
|
"epoch": 33.85,
|
|
"learning_rate": 3.1359649122807015e-05,
|
|
"loss": 0.1739,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 34.0,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 0.8694977164268494,
|
|
"eval_runtime": 0.6645,
|
|
"eval_samples_per_second": 69.221,
|
|
"eval_steps_per_second": 3.01,
|
|
"step": 221
|
|
},
|
|
{
|
|
"epoch": 34.92,
|
|
"eval_accuracy": 0.6739130434782609,
|
|
"eval_loss": 1.1835598945617676,
|
|
"eval_runtime": 0.7151,
|
|
"eval_samples_per_second": 64.33,
|
|
"eval_steps_per_second": 2.797,
|
|
"step": 227
|
|
},
|
|
{
|
|
"epoch": 35.38,
|
|
"learning_rate": 3.0153508771929826e-05,
|
|
"loss": 0.1895,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 36.0,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 1.0130637884140015,
|
|
"eval_runtime": 0.7672,
|
|
"eval_samples_per_second": 59.96,
|
|
"eval_steps_per_second": 2.607,
|
|
"step": 234
|
|
},
|
|
{
|
|
"epoch": 36.92,
|
|
"learning_rate": 2.894736842105263e-05,
|
|
"loss": 0.1428,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 36.92,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 0.9618039131164551,
|
|
"eval_runtime": 0.7041,
|
|
"eval_samples_per_second": 65.327,
|
|
"eval_steps_per_second": 2.84,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 38.0,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 0.995048999786377,
|
|
"eval_runtime": 0.7508,
|
|
"eval_samples_per_second": 61.272,
|
|
"eval_steps_per_second": 2.664,
|
|
"step": 247
|
|
},
|
|
{
|
|
"epoch": 38.46,
|
|
"learning_rate": 2.774122807017544e-05,
|
|
"loss": 0.1443,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 38.92,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.9112874865531921,
|
|
"eval_runtime": 0.6953,
|
|
"eval_samples_per_second": 66.156,
|
|
"eval_steps_per_second": 2.876,
|
|
"step": 253
|
|
},
|
|
{
|
|
"epoch": 40.0,
|
|
"learning_rate": 2.6535087719298248e-05,
|
|
"loss": 0.1574,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 40.0,
|
|
"eval_accuracy": 0.717391304347826,
|
|
"eval_loss": 0.9213477969169617,
|
|
"eval_runtime": 0.6866,
|
|
"eval_samples_per_second": 67.001,
|
|
"eval_steps_per_second": 2.913,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 40.92,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 0.9436842203140259,
|
|
"eval_runtime": 0.6962,
|
|
"eval_samples_per_second": 66.074,
|
|
"eval_steps_per_second": 2.873,
|
|
"step": 266
|
|
},
|
|
{
|
|
"epoch": 41.54,
|
|
"learning_rate": 2.5328947368421052e-05,
|
|
"loss": 0.1442,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 42.0,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 0.9226171970367432,
|
|
"eval_runtime": 0.6819,
|
|
"eval_samples_per_second": 67.458,
|
|
"eval_steps_per_second": 2.933,
|
|
"step": 273
|
|
},
|
|
{
|
|
"epoch": 42.92,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 0.9430357813835144,
|
|
"eval_runtime": 0.6947,
|
|
"eval_samples_per_second": 66.212,
|
|
"eval_steps_per_second": 2.879,
|
|
"step": 279
|
|
},
|
|
{
|
|
"epoch": 43.08,
|
|
"learning_rate": 2.412280701754386e-05,
|
|
"loss": 0.1186,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 44.0,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.9759092926979065,
|
|
"eval_runtime": 0.7192,
|
|
"eval_samples_per_second": 63.959,
|
|
"eval_steps_per_second": 2.781,
|
|
"step": 286
|
|
},
|
|
{
|
|
"epoch": 44.62,
|
|
"learning_rate": 2.2916666666666667e-05,
|
|
"loss": 0.1135,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 44.92,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 0.9650900363922119,
|
|
"eval_runtime": 0.7171,
|
|
"eval_samples_per_second": 64.148,
|
|
"eval_steps_per_second": 2.789,
|
|
"step": 292
|
|
},
|
|
{
|
|
"epoch": 46.0,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 0.9536284804344177,
|
|
"eval_runtime": 0.7029,
|
|
"eval_samples_per_second": 65.444,
|
|
"eval_steps_per_second": 2.845,
|
|
"step": 299
|
|
},
|
|
{
|
|
"epoch": 46.15,
|
|
"learning_rate": 2.1710526315789474e-05,
|
|
"loss": 0.1299,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 46.92,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 0.9118357300758362,
|
|
"eval_runtime": 0.691,
|
|
"eval_samples_per_second": 66.567,
|
|
"eval_steps_per_second": 2.894,
|
|
"step": 305
|
|
},
|
|
{
|
|
"epoch": 47.69,
|
|
"learning_rate": 2.050438596491228e-05,
|
|
"loss": 0.134,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 48.0,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.9847990274429321,
|
|
"eval_runtime": 0.693,
|
|
"eval_samples_per_second": 66.381,
|
|
"eval_steps_per_second": 2.886,
|
|
"step": 312
|
|
},
|
|
{
|
|
"epoch": 48.92,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 0.864098072052002,
|
|
"eval_runtime": 0.6669,
|
|
"eval_samples_per_second": 68.973,
|
|
"eval_steps_per_second": 2.999,
|
|
"step": 318
|
|
},
|
|
{
|
|
"epoch": 49.23,
|
|
"learning_rate": 1.929824561403509e-05,
|
|
"loss": 0.1418,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 50.0,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 1.055314064025879,
|
|
"eval_runtime": 0.6987,
|
|
"eval_samples_per_second": 65.838,
|
|
"eval_steps_per_second": 2.863,
|
|
"step": 325
|
|
},
|
|
{
|
|
"epoch": 50.77,
|
|
"learning_rate": 1.8092105263157896e-05,
|
|
"loss": 0.1074,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 50.92,
|
|
"eval_accuracy": 0.6956521739130435,
|
|
"eval_loss": 1.2511317729949951,
|
|
"eval_runtime": 0.6763,
|
|
"eval_samples_per_second": 68.018,
|
|
"eval_steps_per_second": 2.957,
|
|
"step": 331
|
|
},
|
|
{
|
|
"epoch": 52.0,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 1.0186011791229248,
|
|
"eval_runtime": 0.6748,
|
|
"eval_samples_per_second": 68.173,
|
|
"eval_steps_per_second": 2.964,
|
|
"step": 338
|
|
},
|
|
{
|
|
"epoch": 52.31,
|
|
"learning_rate": 1.6885964912280704e-05,
|
|
"loss": 0.1144,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 52.92,
|
|
"eval_accuracy": 0.717391304347826,
|
|
"eval_loss": 1.0467170476913452,
|
|
"eval_runtime": 0.7339,
|
|
"eval_samples_per_second": 62.676,
|
|
"eval_steps_per_second": 2.725,
|
|
"step": 344
|
|
},
|
|
{
|
|
"epoch": 53.85,
|
|
"learning_rate": 1.5679824561403508e-05,
|
|
"loss": 0.0999,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 54.0,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 0.9897727966308594,
|
|
"eval_runtime": 0.724,
|
|
"eval_samples_per_second": 63.533,
|
|
"eval_steps_per_second": 2.762,
|
|
"step": 351
|
|
},
|
|
{
|
|
"epoch": 54.92,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 1.1779977083206177,
|
|
"eval_runtime": 0.6747,
|
|
"eval_samples_per_second": 68.177,
|
|
"eval_steps_per_second": 2.964,
|
|
"step": 357
|
|
},
|
|
{
|
|
"epoch": 55.38,
|
|
"learning_rate": 1.4473684210526315e-05,
|
|
"loss": 0.1131,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 56.0,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 1.0015190839767456,
|
|
"eval_runtime": 0.6671,
|
|
"eval_samples_per_second": 68.953,
|
|
"eval_steps_per_second": 2.998,
|
|
"step": 364
|
|
},
|
|
{
|
|
"epoch": 56.92,
|
|
"learning_rate": 1.3267543859649124e-05,
|
|
"loss": 0.1152,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 56.92,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 1.0759129524230957,
|
|
"eval_runtime": 0.6833,
|
|
"eval_samples_per_second": 67.322,
|
|
"eval_steps_per_second": 2.927,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 58.0,
|
|
"eval_accuracy": 0.717391304347826,
|
|
"eval_loss": 1.1293679475784302,
|
|
"eval_runtime": 0.7265,
|
|
"eval_samples_per_second": 63.317,
|
|
"eval_steps_per_second": 2.753,
|
|
"step": 377
|
|
},
|
|
{
|
|
"epoch": 58.46,
|
|
"learning_rate": 1.206140350877193e-05,
|
|
"loss": 0.1012,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 58.92,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 1.0893771648406982,
|
|
"eval_runtime": 0.7929,
|
|
"eval_samples_per_second": 58.015,
|
|
"eval_steps_per_second": 2.522,
|
|
"step": 383
|
|
},
|
|
{
|
|
"epoch": 60.0,
|
|
"learning_rate": 1.0855263157894737e-05,
|
|
"loss": 0.0938,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 60.0,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 1.076356291770935,
|
|
"eval_runtime": 0.7122,
|
|
"eval_samples_per_second": 64.587,
|
|
"eval_steps_per_second": 2.808,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 60.92,
|
|
"eval_accuracy": 0.717391304347826,
|
|
"eval_loss": 1.1784361600875854,
|
|
"eval_runtime": 0.6759,
|
|
"eval_samples_per_second": 68.056,
|
|
"eval_steps_per_second": 2.959,
|
|
"step": 396
|
|
},
|
|
{
|
|
"epoch": 61.54,
|
|
"learning_rate": 9.649122807017545e-06,
|
|
"loss": 0.0944,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 62.0,
|
|
"eval_accuracy": 0.717391304347826,
|
|
"eval_loss": 1.158086895942688,
|
|
"eval_runtime": 0.6568,
|
|
"eval_samples_per_second": 70.035,
|
|
"eval_steps_per_second": 3.045,
|
|
"step": 403
|
|
},
|
|
{
|
|
"epoch": 62.92,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 1.0443789958953857,
|
|
"eval_runtime": 0.7668,
|
|
"eval_samples_per_second": 59.992,
|
|
"eval_steps_per_second": 2.608,
|
|
"step": 409
|
|
},
|
|
{
|
|
"epoch": 63.08,
|
|
"learning_rate": 8.442982456140352e-06,
|
|
"loss": 0.1015,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 64.0,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 1.0996026992797852,
|
|
"eval_runtime": 0.6974,
|
|
"eval_samples_per_second": 65.962,
|
|
"eval_steps_per_second": 2.868,
|
|
"step": 416
|
|
},
|
|
{
|
|
"epoch": 64.62,
|
|
"learning_rate": 7.2368421052631575e-06,
|
|
"loss": 0.0762,
|
|
"step": 420
|
|
},
|
|
{
|
|
"epoch": 64.92,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 1.1234647035598755,
|
|
"eval_runtime": 0.6919,
|
|
"eval_samples_per_second": 66.48,
|
|
"eval_steps_per_second": 2.89,
|
|
"step": 422
|
|
},
|
|
{
|
|
"epoch": 66.0,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 1.0999064445495605,
|
|
"eval_runtime": 0.6675,
|
|
"eval_samples_per_second": 68.916,
|
|
"eval_steps_per_second": 2.996,
|
|
"step": 429
|
|
},
|
|
{
|
|
"epoch": 66.15,
|
|
"learning_rate": 6.030701754385965e-06,
|
|
"loss": 0.0775,
|
|
"step": 430
|
|
},
|
|
{
|
|
"epoch": 66.92,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 1.077625036239624,
|
|
"eval_runtime": 0.7156,
|
|
"eval_samples_per_second": 64.279,
|
|
"eval_steps_per_second": 2.795,
|
|
"step": 435
|
|
},
|
|
{
|
|
"epoch": 67.69,
|
|
"learning_rate": 4.824561403508772e-06,
|
|
"loss": 0.0787,
|
|
"step": 440
|
|
},
|
|
{
|
|
"epoch": 68.0,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 1.0878902673721313,
|
|
"eval_runtime": 0.8099,
|
|
"eval_samples_per_second": 56.794,
|
|
"eval_steps_per_second": 2.469,
|
|
"step": 442
|
|
},
|
|
{
|
|
"epoch": 68.92,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 1.0913468599319458,
|
|
"eval_runtime": 0.72,
|
|
"eval_samples_per_second": 63.891,
|
|
"eval_steps_per_second": 2.778,
|
|
"step": 448
|
|
},
|
|
{
|
|
"epoch": 69.23,
|
|
"learning_rate": 3.6184210526315788e-06,
|
|
"loss": 0.081,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 70.0,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 1.0558408498764038,
|
|
"eval_runtime": 0.6843,
|
|
"eval_samples_per_second": 67.219,
|
|
"eval_steps_per_second": 2.923,
|
|
"step": 455
|
|
},
|
|
{
|
|
"epoch": 70.77,
|
|
"learning_rate": 2.412280701754386e-06,
|
|
"loss": 0.0749,
|
|
"step": 460
|
|
},
|
|
{
|
|
"epoch": 70.92,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 1.0401302576065063,
|
|
"eval_runtime": 0.6922,
|
|
"eval_samples_per_second": 66.457,
|
|
"eval_steps_per_second": 2.889,
|
|
"step": 461
|
|
},
|
|
{
|
|
"epoch": 72.0,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 1.0538554191589355,
|
|
"eval_runtime": 0.6836,
|
|
"eval_samples_per_second": 67.294,
|
|
"eval_steps_per_second": 2.926,
|
|
"step": 468
|
|
},
|
|
{
|
|
"epoch": 72.31,
|
|
"learning_rate": 1.206140350877193e-06,
|
|
"loss": 0.0841,
|
|
"step": 470
|
|
},
|
|
{
|
|
"epoch": 72.92,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 1.0662652254104614,
|
|
"eval_runtime": 0.727,
|
|
"eval_samples_per_second": 63.271,
|
|
"eval_steps_per_second": 2.751,
|
|
"step": 474
|
|
},
|
|
{
|
|
"epoch": 73.85,
|
|
"learning_rate": 0.0,
|
|
"loss": 0.0928,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 73.85,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 1.0711960792541504,
|
|
"eval_runtime": 0.704,
|
|
"eval_samples_per_second": 65.342,
|
|
"eval_steps_per_second": 2.841,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 73.85,
|
|
"step": 480,
|
|
"total_flos": 1.5036476561209958e+18,
|
|
"train_loss": 0.3110019433001677,
|
|
"train_runtime": 594.8679,
|
|
"train_samples_per_second": 110.142,
|
|
"train_steps_per_second": 0.807
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 480,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 80,
|
|
"save_steps": 500,
|
|
"total_flos": 1.5036476561209958e+18,
|
|
"train_batch_size": 32,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|