t1msan's picture
End of training
bf057cc verified
{
"best_metric": 0.008771373890340328,
"best_model_checkpoint": "swin-tiny-patch4-window7-224-Kontur-competition/checkpoint-807",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 1345,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"grad_norm": 5.5162248611450195,
"learning_rate": 3.7037037037037037e-06,
"loss": 0.6819,
"step": 10
},
{
"epoch": 0.07,
"grad_norm": 3.1403090953826904,
"learning_rate": 7.4074074074074075e-06,
"loss": 0.5394,
"step": 20
},
{
"epoch": 0.11,
"grad_norm": 2.9430086612701416,
"learning_rate": 1.1111111111111112e-05,
"loss": 0.3795,
"step": 30
},
{
"epoch": 0.15,
"grad_norm": 3.1442313194274902,
"learning_rate": 1.4814814814814815e-05,
"loss": 0.2726,
"step": 40
},
{
"epoch": 0.19,
"grad_norm": 2.8489677906036377,
"learning_rate": 1.8518518518518518e-05,
"loss": 0.1909,
"step": 50
},
{
"epoch": 0.22,
"grad_norm": 2.6733438968658447,
"learning_rate": 2.2222222222222223e-05,
"loss": 0.0903,
"step": 60
},
{
"epoch": 0.26,
"grad_norm": 6.4875969886779785,
"learning_rate": 2.5925925925925925e-05,
"loss": 0.064,
"step": 70
},
{
"epoch": 0.3,
"grad_norm": 10.464859008789062,
"learning_rate": 2.962962962962963e-05,
"loss": 0.0439,
"step": 80
},
{
"epoch": 0.33,
"grad_norm": 3.629943609237671,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.0384,
"step": 90
},
{
"epoch": 0.37,
"grad_norm": 12.326812744140625,
"learning_rate": 3.7037037037037037e-05,
"loss": 0.0418,
"step": 100
},
{
"epoch": 0.41,
"grad_norm": 4.8540754318237305,
"learning_rate": 4.074074074074074e-05,
"loss": 0.0378,
"step": 110
},
{
"epoch": 0.45,
"grad_norm": 7.512868881225586,
"learning_rate": 4.4444444444444447e-05,
"loss": 0.0261,
"step": 120
},
{
"epoch": 0.48,
"grad_norm": 4.206182479858398,
"learning_rate": 4.814814814814815e-05,
"loss": 0.0317,
"step": 130
},
{
"epoch": 0.52,
"grad_norm": 4.706695556640625,
"learning_rate": 4.979338842975207e-05,
"loss": 0.0353,
"step": 140
},
{
"epoch": 0.56,
"grad_norm": 1.3356839418411255,
"learning_rate": 4.9380165289256205e-05,
"loss": 0.0261,
"step": 150
},
{
"epoch": 0.59,
"grad_norm": 0.30266404151916504,
"learning_rate": 4.896694214876033e-05,
"loss": 0.0174,
"step": 160
},
{
"epoch": 0.63,
"grad_norm": 4.922573566436768,
"learning_rate": 4.855371900826447e-05,
"loss": 0.0533,
"step": 170
},
{
"epoch": 0.67,
"grad_norm": 2.4459125995635986,
"learning_rate": 4.8140495867768596e-05,
"loss": 0.0275,
"step": 180
},
{
"epoch": 0.71,
"grad_norm": 4.6485915184021,
"learning_rate": 4.772727272727273e-05,
"loss": 0.0382,
"step": 190
},
{
"epoch": 0.74,
"grad_norm": 7.907899379730225,
"learning_rate": 4.731404958677686e-05,
"loss": 0.0312,
"step": 200
},
{
"epoch": 0.78,
"grad_norm": 1.321392297744751,
"learning_rate": 4.6900826446280993e-05,
"loss": 0.0174,
"step": 210
},
{
"epoch": 0.82,
"grad_norm": 0.5613566040992737,
"learning_rate": 4.648760330578513e-05,
"loss": 0.0128,
"step": 220
},
{
"epoch": 0.86,
"grad_norm": 0.357917845249176,
"learning_rate": 4.607438016528926e-05,
"loss": 0.0173,
"step": 230
},
{
"epoch": 0.89,
"grad_norm": 1.047433853149414,
"learning_rate": 4.566115702479339e-05,
"loss": 0.0231,
"step": 240
},
{
"epoch": 0.93,
"grad_norm": 0.5947940349578857,
"learning_rate": 4.524793388429752e-05,
"loss": 0.0279,
"step": 250
},
{
"epoch": 0.97,
"grad_norm": 1.0275371074676514,
"learning_rate": 4.4834710743801654e-05,
"loss": 0.0216,
"step": 260
},
{
"epoch": 1.0,
"eval_loss": 0.06352325528860092,
"eval_runtime": 43.5548,
"eval_samples_per_second": 87.82,
"eval_steps_per_second": 2.755,
"step": 269
},
{
"epoch": 1.0,
"grad_norm": 8.136920928955078,
"learning_rate": 4.442148760330579e-05,
"loss": 0.0226,
"step": 270
},
{
"epoch": 1.04,
"grad_norm": 5.6582255363464355,
"learning_rate": 4.400826446280992e-05,
"loss": 0.0142,
"step": 280
},
{
"epoch": 1.08,
"grad_norm": 7.312340259552002,
"learning_rate": 4.359504132231405e-05,
"loss": 0.0298,
"step": 290
},
{
"epoch": 1.12,
"grad_norm": 0.31576940417289734,
"learning_rate": 4.318181818181819e-05,
"loss": 0.018,
"step": 300
},
{
"epoch": 1.15,
"grad_norm": 14.974953651428223,
"learning_rate": 4.2768595041322315e-05,
"loss": 0.0367,
"step": 310
},
{
"epoch": 1.19,
"grad_norm": 4.256837844848633,
"learning_rate": 4.235537190082644e-05,
"loss": 0.0064,
"step": 320
},
{
"epoch": 1.23,
"grad_norm": 0.972306489944458,
"learning_rate": 4.194214876033058e-05,
"loss": 0.014,
"step": 330
},
{
"epoch": 1.26,
"grad_norm": 0.03949430584907532,
"learning_rate": 4.152892561983471e-05,
"loss": 0.0121,
"step": 340
},
{
"epoch": 1.3,
"grad_norm": 0.8921181559562683,
"learning_rate": 4.111570247933885e-05,
"loss": 0.0135,
"step": 350
},
{
"epoch": 1.34,
"grad_norm": 4.770934581756592,
"learning_rate": 4.0702479338842975e-05,
"loss": 0.0279,
"step": 360
},
{
"epoch": 1.38,
"grad_norm": 12.435590744018555,
"learning_rate": 4.028925619834711e-05,
"loss": 0.0263,
"step": 370
},
{
"epoch": 1.41,
"grad_norm": 4.150902271270752,
"learning_rate": 3.9876033057851245e-05,
"loss": 0.0169,
"step": 380
},
{
"epoch": 1.45,
"grad_norm": 1.9341884851455688,
"learning_rate": 3.946280991735537e-05,
"loss": 0.0126,
"step": 390
},
{
"epoch": 1.49,
"grad_norm": 2.019587516784668,
"learning_rate": 3.90495867768595e-05,
"loss": 0.0075,
"step": 400
},
{
"epoch": 1.52,
"grad_norm": 0.34746670722961426,
"learning_rate": 3.8636363636363636e-05,
"loss": 0.0186,
"step": 410
},
{
"epoch": 1.56,
"grad_norm": 4.3460187911987305,
"learning_rate": 3.822314049586777e-05,
"loss": 0.015,
"step": 420
},
{
"epoch": 1.6,
"grad_norm": 0.06722448021173477,
"learning_rate": 3.7809917355371906e-05,
"loss": 0.0314,
"step": 430
},
{
"epoch": 1.64,
"grad_norm": 5.969712257385254,
"learning_rate": 3.7396694214876034e-05,
"loss": 0.0132,
"step": 440
},
{
"epoch": 1.67,
"grad_norm": 5.939075946807861,
"learning_rate": 3.698347107438017e-05,
"loss": 0.0191,
"step": 450
},
{
"epoch": 1.71,
"grad_norm": 2.4963324069976807,
"learning_rate": 3.65702479338843e-05,
"loss": 0.0107,
"step": 460
},
{
"epoch": 1.75,
"grad_norm": 4.111177921295166,
"learning_rate": 3.615702479338843e-05,
"loss": 0.0204,
"step": 470
},
{
"epoch": 1.78,
"grad_norm": 0.07025858759880066,
"learning_rate": 3.574380165289256e-05,
"loss": 0.0142,
"step": 480
},
{
"epoch": 1.82,
"grad_norm": 9.363666534423828,
"learning_rate": 3.5330578512396694e-05,
"loss": 0.0251,
"step": 490
},
{
"epoch": 1.86,
"grad_norm": 2.941878080368042,
"learning_rate": 3.491735537190083e-05,
"loss": 0.0123,
"step": 500
},
{
"epoch": 1.9,
"grad_norm": 5.7476348876953125,
"learning_rate": 3.4504132231404964e-05,
"loss": 0.0096,
"step": 510
},
{
"epoch": 1.93,
"grad_norm": 27.465803146362305,
"learning_rate": 3.409090909090909e-05,
"loss": 0.028,
"step": 520
},
{
"epoch": 1.97,
"grad_norm": 0.047597743570804596,
"learning_rate": 3.367768595041322e-05,
"loss": 0.0286,
"step": 530
},
{
"epoch": 2.0,
"eval_loss": 0.0467231348156929,
"eval_runtime": 43.1197,
"eval_samples_per_second": 88.707,
"eval_steps_per_second": 2.783,
"step": 538
},
{
"epoch": 2.01,
"grad_norm": 0.10430701822042465,
"learning_rate": 3.3264462809917355e-05,
"loss": 0.009,
"step": 540
},
{
"epoch": 2.04,
"grad_norm": 4.288072109222412,
"learning_rate": 3.285123966942149e-05,
"loss": 0.0041,
"step": 550
},
{
"epoch": 2.08,
"grad_norm": 1.3560419082641602,
"learning_rate": 3.243801652892562e-05,
"loss": 0.0175,
"step": 560
},
{
"epoch": 2.12,
"grad_norm": 5.84924840927124,
"learning_rate": 3.202479338842975e-05,
"loss": 0.0095,
"step": 570
},
{
"epoch": 2.16,
"grad_norm": 8.711688041687012,
"learning_rate": 3.161157024793389e-05,
"loss": 0.028,
"step": 580
},
{
"epoch": 2.19,
"grad_norm": 1.378418207168579,
"learning_rate": 3.119834710743802e-05,
"loss": 0.0244,
"step": 590
},
{
"epoch": 2.23,
"grad_norm": 4.585868835449219,
"learning_rate": 3.078512396694215e-05,
"loss": 0.0045,
"step": 600
},
{
"epoch": 2.27,
"grad_norm": 14.25820541381836,
"learning_rate": 3.0371900826446282e-05,
"loss": 0.0217,
"step": 610
},
{
"epoch": 2.3,
"grad_norm": 0.5124061107635498,
"learning_rate": 2.9958677685950414e-05,
"loss": 0.0015,
"step": 620
},
{
"epoch": 2.34,
"grad_norm": 5.597954750061035,
"learning_rate": 2.954545454545455e-05,
"loss": 0.0096,
"step": 630
},
{
"epoch": 2.38,
"grad_norm": 4.180160045623779,
"learning_rate": 2.9132231404958676e-05,
"loss": 0.016,
"step": 640
},
{
"epoch": 2.42,
"grad_norm": 1.3298313617706299,
"learning_rate": 2.871900826446281e-05,
"loss": 0.0104,
"step": 650
},
{
"epoch": 2.45,
"grad_norm": 0.11605699360370636,
"learning_rate": 2.8305785123966943e-05,
"loss": 0.0176,
"step": 660
},
{
"epoch": 2.49,
"grad_norm": 0.0669424757361412,
"learning_rate": 2.7892561983471078e-05,
"loss": 0.0043,
"step": 670
},
{
"epoch": 2.53,
"grad_norm": 0.7822141647338867,
"learning_rate": 2.7479338842975206e-05,
"loss": 0.0041,
"step": 680
},
{
"epoch": 2.57,
"grad_norm": 0.9650640487670898,
"learning_rate": 2.7066115702479337e-05,
"loss": 0.0096,
"step": 690
},
{
"epoch": 2.6,
"grad_norm": 10.423120498657227,
"learning_rate": 2.6652892561983472e-05,
"loss": 0.0065,
"step": 700
},
{
"epoch": 2.64,
"grad_norm": 10.540702819824219,
"learning_rate": 2.6239669421487607e-05,
"loss": 0.0068,
"step": 710
},
{
"epoch": 2.68,
"grad_norm": 0.06544263660907745,
"learning_rate": 2.5826446280991735e-05,
"loss": 0.0142,
"step": 720
},
{
"epoch": 2.71,
"grad_norm": 1.903037428855896,
"learning_rate": 2.5413223140495866e-05,
"loss": 0.0079,
"step": 730
},
{
"epoch": 2.75,
"grad_norm": 8.562331199645996,
"learning_rate": 2.5e-05,
"loss": 0.0109,
"step": 740
},
{
"epoch": 2.79,
"grad_norm": 2.383655548095703,
"learning_rate": 2.4586776859504136e-05,
"loss": 0.0063,
"step": 750
},
{
"epoch": 2.83,
"grad_norm": 6.162371635437012,
"learning_rate": 2.4173553719008264e-05,
"loss": 0.0118,
"step": 760
},
{
"epoch": 2.86,
"grad_norm": 0.7068905234336853,
"learning_rate": 2.37603305785124e-05,
"loss": 0.016,
"step": 770
},
{
"epoch": 2.9,
"grad_norm": 0.07612281292676926,
"learning_rate": 2.334710743801653e-05,
"loss": 0.008,
"step": 780
},
{
"epoch": 2.94,
"grad_norm": 5.8349199295043945,
"learning_rate": 2.2933884297520665e-05,
"loss": 0.0091,
"step": 790
},
{
"epoch": 2.97,
"grad_norm": 0.30163004994392395,
"learning_rate": 2.2520661157024793e-05,
"loss": 0.0252,
"step": 800
},
{
"epoch": 3.0,
"eval_loss": 0.008771373890340328,
"eval_runtime": 43.1769,
"eval_samples_per_second": 88.589,
"eval_steps_per_second": 2.779,
"step": 807
},
{
"epoch": 3.01,
"grad_norm": 1.3629869222640991,
"learning_rate": 2.2107438016528928e-05,
"loss": 0.0095,
"step": 810
},
{
"epoch": 3.05,
"grad_norm": 1.119672179222107,
"learning_rate": 2.169421487603306e-05,
"loss": 0.006,
"step": 820
},
{
"epoch": 3.09,
"grad_norm": 8.719965934753418,
"learning_rate": 2.128099173553719e-05,
"loss": 0.0144,
"step": 830
},
{
"epoch": 3.12,
"grad_norm": 0.12227091938257217,
"learning_rate": 2.0867768595041323e-05,
"loss": 0.0033,
"step": 840
},
{
"epoch": 3.16,
"grad_norm": 1.8717501163482666,
"learning_rate": 2.0454545454545457e-05,
"loss": 0.0027,
"step": 850
},
{
"epoch": 3.2,
"grad_norm": 6.553406238555908,
"learning_rate": 2.004132231404959e-05,
"loss": 0.0133,
"step": 860
},
{
"epoch": 3.23,
"grad_norm": 2.087083101272583,
"learning_rate": 1.962809917355372e-05,
"loss": 0.0134,
"step": 870
},
{
"epoch": 3.27,
"grad_norm": 1.4037662744522095,
"learning_rate": 1.9214876033057852e-05,
"loss": 0.0031,
"step": 880
},
{
"epoch": 3.31,
"grad_norm": 2.297409772872925,
"learning_rate": 1.8801652892561987e-05,
"loss": 0.0162,
"step": 890
},
{
"epoch": 3.35,
"grad_norm": 0.025398077443242073,
"learning_rate": 1.8388429752066118e-05,
"loss": 0.0061,
"step": 900
},
{
"epoch": 3.38,
"grad_norm": 0.037510234862565994,
"learning_rate": 1.797520661157025e-05,
"loss": 0.0064,
"step": 910
},
{
"epoch": 3.42,
"grad_norm": 0.3090139627456665,
"learning_rate": 1.756198347107438e-05,
"loss": 0.004,
"step": 920
},
{
"epoch": 3.46,
"grad_norm": 3.6702754497528076,
"learning_rate": 1.7148760330578516e-05,
"loss": 0.0046,
"step": 930
},
{
"epoch": 3.49,
"grad_norm": 0.22104105353355408,
"learning_rate": 1.6735537190082644e-05,
"loss": 0.0003,
"step": 940
},
{
"epoch": 3.53,
"grad_norm": 2.2657175064086914,
"learning_rate": 1.632231404958678e-05,
"loss": 0.0048,
"step": 950
},
{
"epoch": 3.57,
"grad_norm": 0.0027564549818634987,
"learning_rate": 1.590909090909091e-05,
"loss": 0.0112,
"step": 960
},
{
"epoch": 3.61,
"grad_norm": 0.024479985237121582,
"learning_rate": 1.549586776859504e-05,
"loss": 0.0031,
"step": 970
},
{
"epoch": 3.64,
"grad_norm": 0.3030235767364502,
"learning_rate": 1.5082644628099175e-05,
"loss": 0.0031,
"step": 980
},
{
"epoch": 3.68,
"grad_norm": 0.01274044532328844,
"learning_rate": 1.4669421487603308e-05,
"loss": 0.0086,
"step": 990
},
{
"epoch": 3.72,
"grad_norm": 0.9196871519088745,
"learning_rate": 1.4256198347107438e-05,
"loss": 0.01,
"step": 1000
},
{
"epoch": 3.75,
"grad_norm": 0.09063485264778137,
"learning_rate": 1.3842975206611573e-05,
"loss": 0.0043,
"step": 1010
},
{
"epoch": 3.79,
"grad_norm": 0.008292087353765965,
"learning_rate": 1.3429752066115702e-05,
"loss": 0.0028,
"step": 1020
},
{
"epoch": 3.83,
"grad_norm": 0.17935311794281006,
"learning_rate": 1.3016528925619837e-05,
"loss": 0.006,
"step": 1030
},
{
"epoch": 3.87,
"grad_norm": 0.2591820955276489,
"learning_rate": 1.2603305785123967e-05,
"loss": 0.0072,
"step": 1040
},
{
"epoch": 3.9,
"grad_norm": 0.8280309438705444,
"learning_rate": 1.21900826446281e-05,
"loss": 0.0044,
"step": 1050
},
{
"epoch": 3.94,
"grad_norm": 1.8738752603530884,
"learning_rate": 1.1776859504132231e-05,
"loss": 0.0033,
"step": 1060
},
{
"epoch": 3.98,
"grad_norm": 0.01168906595557928,
"learning_rate": 1.1363636363636365e-05,
"loss": 0.0003,
"step": 1070
},
{
"epoch": 4.0,
"eval_loss": 0.033934369683265686,
"eval_runtime": 42.9173,
"eval_samples_per_second": 89.125,
"eval_steps_per_second": 2.796,
"step": 1076
},
{
"epoch": 4.01,
"grad_norm": 1.7786719799041748,
"learning_rate": 1.0950413223140496e-05,
"loss": 0.005,
"step": 1080
},
{
"epoch": 4.05,
"grad_norm": 0.44828712940216064,
"learning_rate": 1.0537190082644628e-05,
"loss": 0.001,
"step": 1090
},
{
"epoch": 4.09,
"grad_norm": 0.0037061686161905527,
"learning_rate": 1.012396694214876e-05,
"loss": 0.0005,
"step": 1100
},
{
"epoch": 4.13,
"grad_norm": 7.8463358879089355,
"learning_rate": 9.710743801652892e-06,
"loss": 0.0054,
"step": 1110
},
{
"epoch": 4.16,
"grad_norm": 0.07012557983398438,
"learning_rate": 9.297520661157025e-06,
"loss": 0.0037,
"step": 1120
},
{
"epoch": 4.2,
"grad_norm": 2.830775260925293,
"learning_rate": 8.884297520661157e-06,
"loss": 0.0015,
"step": 1130
},
{
"epoch": 4.24,
"grad_norm": 0.2861256003379822,
"learning_rate": 8.47107438016529e-06,
"loss": 0.0042,
"step": 1140
},
{
"epoch": 4.28,
"grad_norm": 0.9108858108520508,
"learning_rate": 8.057851239669421e-06,
"loss": 0.0003,
"step": 1150
},
{
"epoch": 4.31,
"grad_norm": 0.06316760182380676,
"learning_rate": 7.644628099173553e-06,
"loss": 0.0002,
"step": 1160
},
{
"epoch": 4.35,
"grad_norm": 0.006003058515489101,
"learning_rate": 7.231404958677686e-06,
"loss": 0.001,
"step": 1170
},
{
"epoch": 4.39,
"grad_norm": 0.0066665462218225,
"learning_rate": 6.818181818181818e-06,
"loss": 0.012,
"step": 1180
},
{
"epoch": 4.42,
"grad_norm": 7.692056179046631,
"learning_rate": 6.404958677685951e-06,
"loss": 0.0029,
"step": 1190
},
{
"epoch": 4.46,
"grad_norm": 0.01048464234918356,
"learning_rate": 5.991735537190083e-06,
"loss": 0.0007,
"step": 1200
},
{
"epoch": 4.5,
"grad_norm": 0.016681592911481857,
"learning_rate": 5.578512396694215e-06,
"loss": 0.0015,
"step": 1210
},
{
"epoch": 4.54,
"grad_norm": 0.035201072692871094,
"learning_rate": 5.1652892561983475e-06,
"loss": 0.0028,
"step": 1220
},
{
"epoch": 4.57,
"grad_norm": 0.05388149991631508,
"learning_rate": 4.75206611570248e-06,
"loss": 0.0008,
"step": 1230
},
{
"epoch": 4.61,
"grad_norm": 0.14326803386211395,
"learning_rate": 4.338842975206612e-06,
"loss": 0.0001,
"step": 1240
},
{
"epoch": 4.65,
"grad_norm": 0.001435384969227016,
"learning_rate": 3.925619834710744e-06,
"loss": 0.0048,
"step": 1250
},
{
"epoch": 4.68,
"grad_norm": 0.11075280606746674,
"learning_rate": 3.5123966942148763e-06,
"loss": 0.0006,
"step": 1260
},
{
"epoch": 4.72,
"grad_norm": 0.05017843842506409,
"learning_rate": 3.0991735537190086e-06,
"loss": 0.0001,
"step": 1270
},
{
"epoch": 4.76,
"grad_norm": 0.002463772427290678,
"learning_rate": 2.685950413223141e-06,
"loss": 0.0003,
"step": 1280
},
{
"epoch": 4.8,
"grad_norm": 13.101515769958496,
"learning_rate": 2.2727272727272728e-06,
"loss": 0.0011,
"step": 1290
},
{
"epoch": 4.83,
"grad_norm": 0.003497660392895341,
"learning_rate": 1.859504132231405e-06,
"loss": 0.0014,
"step": 1300
},
{
"epoch": 4.87,
"grad_norm": 0.07368556410074234,
"learning_rate": 1.4462809917355374e-06,
"loss": 0.0013,
"step": 1310
},
{
"epoch": 4.91,
"grad_norm": 0.0647512823343277,
"learning_rate": 1.0330578512396695e-06,
"loss": 0.0045,
"step": 1320
},
{
"epoch": 4.94,
"grad_norm": 0.0005362280062399805,
"learning_rate": 6.198347107438017e-07,
"loss": 0.0141,
"step": 1330
},
{
"epoch": 4.98,
"grad_norm": 0.001157268532551825,
"learning_rate": 2.066115702479339e-07,
"loss": 0.0019,
"step": 1340
},
{
"epoch": 5.0,
"eval_loss": 0.012286508455872536,
"eval_runtime": 42.4964,
"eval_samples_per_second": 90.008,
"eval_steps_per_second": 2.824,
"step": 1345
},
{
"epoch": 5.0,
"step": 1345,
"total_flos": 4.2775914245208883e+18,
"train_loss": 0.028647772959333094,
"train_runtime": 3141.2683,
"train_samples_per_second": 54.785,
"train_steps_per_second": 0.428
}
],
"logging_steps": 10,
"max_steps": 1345,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 4.2775914245208883e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}