Anukul-02's picture
End of training
3c1fa1d
{
"best_metric": 0.855072463768116,
"best_model_checkpoint": "videomae-base-finetuned-ucf101-subset/checkpoint-1620",
"epoch": 5.166666666666667,
"eval_steps": 500,
"global_step": 1620,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 3.0864197530864196e-06,
"loss": 0.0532,
"step": 10
},
{
"epoch": 0.01,
"learning_rate": 6.172839506172839e-06,
"loss": 0.1573,
"step": 20
},
{
"epoch": 0.02,
"learning_rate": 9.259259259259259e-06,
"loss": 0.4842,
"step": 30
},
{
"epoch": 0.02,
"learning_rate": 1.2345679012345678e-05,
"loss": 0.0059,
"step": 40
},
{
"epoch": 0.03,
"learning_rate": 1.54320987654321e-05,
"loss": 0.1714,
"step": 50
},
{
"epoch": 0.04,
"learning_rate": 1.8518518518518518e-05,
"loss": 0.0123,
"step": 60
},
{
"epoch": 0.04,
"learning_rate": 2.1604938271604937e-05,
"loss": 0.0539,
"step": 70
},
{
"epoch": 0.05,
"learning_rate": 2.4691358024691357e-05,
"loss": 0.5923,
"step": 80
},
{
"epoch": 0.06,
"learning_rate": 2.777777777777778e-05,
"loss": 0.0074,
"step": 90
},
{
"epoch": 0.06,
"learning_rate": 3.08641975308642e-05,
"loss": 0.0048,
"step": 100
},
{
"epoch": 0.07,
"learning_rate": 3.395061728395062e-05,
"loss": 1.7082,
"step": 110
},
{
"epoch": 0.07,
"learning_rate": 3.7037037037037037e-05,
"loss": 1.4331,
"step": 120
},
{
"epoch": 0.08,
"learning_rate": 4.012345679012346e-05,
"loss": 1.0093,
"step": 130
},
{
"epoch": 0.09,
"learning_rate": 4.3209876543209875e-05,
"loss": 0.217,
"step": 140
},
{
"epoch": 0.09,
"learning_rate": 4.62962962962963e-05,
"loss": 0.681,
"step": 150
},
{
"epoch": 0.1,
"learning_rate": 4.938271604938271e-05,
"loss": 1.2396,
"step": 160
},
{
"epoch": 0.1,
"learning_rate": 4.972565157750343e-05,
"loss": 0.6086,
"step": 170
},
{
"epoch": 0.11,
"learning_rate": 4.938271604938271e-05,
"loss": 1.5907,
"step": 180
},
{
"epoch": 0.12,
"learning_rate": 4.9039780521262005e-05,
"loss": 1.307,
"step": 190
},
{
"epoch": 0.12,
"learning_rate": 4.86968449931413e-05,
"loss": 2.1886,
"step": 200
},
{
"epoch": 0.13,
"learning_rate": 4.835390946502058e-05,
"loss": 1.197,
"step": 210
},
{
"epoch": 0.14,
"learning_rate": 4.801097393689987e-05,
"loss": 2.7063,
"step": 220
},
{
"epoch": 0.14,
"learning_rate": 4.766803840877915e-05,
"loss": 1.61,
"step": 230
},
{
"epoch": 0.15,
"learning_rate": 4.732510288065844e-05,
"loss": 0.9283,
"step": 240
},
{
"epoch": 0.15,
"learning_rate": 4.6982167352537723e-05,
"loss": 1.4604,
"step": 250
},
{
"epoch": 0.16,
"learning_rate": 4.6639231824417016e-05,
"loss": 1.8419,
"step": 260
},
{
"epoch": 0.17,
"learning_rate": 4.62962962962963e-05,
"loss": 2.4355,
"step": 270
},
{
"epoch": 0.17,
"eval_accuracy": 0.48792270531400966,
"eval_loss": 2.3795628547668457,
"eval_runtime": 41.3663,
"eval_samples_per_second": 5.004,
"eval_steps_per_second": 5.004,
"step": 270
},
{
"epoch": 1.01,
"learning_rate": 4.5953360768175586e-05,
"loss": 1.9276,
"step": 280
},
{
"epoch": 1.01,
"learning_rate": 4.561042524005487e-05,
"loss": 1.525,
"step": 290
},
{
"epoch": 1.02,
"learning_rate": 4.5267489711934157e-05,
"loss": 0.4212,
"step": 300
},
{
"epoch": 1.02,
"learning_rate": 4.492455418381344e-05,
"loss": 0.3908,
"step": 310
},
{
"epoch": 1.03,
"learning_rate": 4.4581618655692734e-05,
"loss": 0.1007,
"step": 320
},
{
"epoch": 1.04,
"learning_rate": 4.423868312757202e-05,
"loss": 1.1603,
"step": 330
},
{
"epoch": 1.04,
"learning_rate": 4.3895747599451304e-05,
"loss": 1.0381,
"step": 340
},
{
"epoch": 1.05,
"learning_rate": 4.355281207133059e-05,
"loss": 0.627,
"step": 350
},
{
"epoch": 1.06,
"learning_rate": 4.3209876543209875e-05,
"loss": 1.0063,
"step": 360
},
{
"epoch": 1.06,
"learning_rate": 4.286694101508916e-05,
"loss": 0.9529,
"step": 370
},
{
"epoch": 1.07,
"learning_rate": 4.252400548696845e-05,
"loss": 1.1548,
"step": 380
},
{
"epoch": 1.07,
"learning_rate": 4.2181069958847744e-05,
"loss": 0.8734,
"step": 390
},
{
"epoch": 1.08,
"learning_rate": 4.183813443072703e-05,
"loss": 0.9007,
"step": 400
},
{
"epoch": 1.09,
"learning_rate": 4.1495198902606315e-05,
"loss": 0.8088,
"step": 410
},
{
"epoch": 1.09,
"learning_rate": 4.11522633744856e-05,
"loss": 0.5867,
"step": 420
},
{
"epoch": 1.1,
"learning_rate": 4.0809327846364885e-05,
"loss": 0.9826,
"step": 430
},
{
"epoch": 1.1,
"learning_rate": 4.046639231824417e-05,
"loss": 0.3514,
"step": 440
},
{
"epoch": 1.11,
"learning_rate": 4.012345679012346e-05,
"loss": 0.4446,
"step": 450
},
{
"epoch": 1.12,
"learning_rate": 3.978052126200275e-05,
"loss": 0.0763,
"step": 460
},
{
"epoch": 1.12,
"learning_rate": 3.943758573388203e-05,
"loss": 1.1436,
"step": 470
},
{
"epoch": 1.13,
"learning_rate": 3.909465020576132e-05,
"loss": 1.3552,
"step": 480
},
{
"epoch": 1.14,
"learning_rate": 3.8751714677640603e-05,
"loss": 1.246,
"step": 490
},
{
"epoch": 1.14,
"learning_rate": 3.840877914951989e-05,
"loss": 0.9918,
"step": 500
},
{
"epoch": 1.15,
"learning_rate": 3.806584362139918e-05,
"loss": 0.4254,
"step": 510
},
{
"epoch": 1.15,
"learning_rate": 3.7722908093278466e-05,
"loss": 0.8668,
"step": 520
},
{
"epoch": 1.16,
"learning_rate": 3.737997256515775e-05,
"loss": 1.3486,
"step": 530
},
{
"epoch": 1.17,
"learning_rate": 3.7037037037037037e-05,
"loss": 0.8776,
"step": 540
},
{
"epoch": 1.17,
"eval_accuracy": 0.6811594202898551,
"eval_loss": 1.4838005304336548,
"eval_runtime": 41.5741,
"eval_samples_per_second": 4.979,
"eval_steps_per_second": 4.979,
"step": 540
},
{
"epoch": 2.01,
"learning_rate": 3.669410150891632e-05,
"loss": 0.0505,
"step": 550
},
{
"epoch": 2.01,
"learning_rate": 3.635116598079561e-05,
"loss": 0.5841,
"step": 560
},
{
"epoch": 2.02,
"learning_rate": 3.60082304526749e-05,
"loss": 1.117,
"step": 570
},
{
"epoch": 2.02,
"learning_rate": 3.566529492455419e-05,
"loss": 0.7054,
"step": 580
},
{
"epoch": 2.03,
"learning_rate": 3.5322359396433476e-05,
"loss": 0.2651,
"step": 590
},
{
"epoch": 2.04,
"learning_rate": 3.497942386831276e-05,
"loss": 0.0156,
"step": 600
},
{
"epoch": 2.04,
"learning_rate": 3.463648834019205e-05,
"loss": 0.0324,
"step": 610
},
{
"epoch": 2.05,
"learning_rate": 3.429355281207133e-05,
"loss": 1.4944,
"step": 620
},
{
"epoch": 2.06,
"learning_rate": 3.395061728395062e-05,
"loss": 1.1175,
"step": 630
},
{
"epoch": 2.06,
"learning_rate": 3.360768175582991e-05,
"loss": 1.8927,
"step": 640
},
{
"epoch": 2.07,
"learning_rate": 3.3264746227709195e-05,
"loss": 0.3666,
"step": 650
},
{
"epoch": 2.07,
"learning_rate": 3.292181069958848e-05,
"loss": 0.5128,
"step": 660
},
{
"epoch": 2.08,
"learning_rate": 3.2578875171467765e-05,
"loss": 0.6543,
"step": 670
},
{
"epoch": 2.09,
"learning_rate": 3.223593964334705e-05,
"loss": 0.7869,
"step": 680
},
{
"epoch": 2.09,
"learning_rate": 3.1893004115226336e-05,
"loss": 0.5005,
"step": 690
},
{
"epoch": 2.1,
"learning_rate": 3.155006858710563e-05,
"loss": 0.5968,
"step": 700
},
{
"epoch": 2.1,
"learning_rate": 3.120713305898491e-05,
"loss": 0.7869,
"step": 710
},
{
"epoch": 2.11,
"learning_rate": 3.08641975308642e-05,
"loss": 0.8124,
"step": 720
},
{
"epoch": 2.12,
"learning_rate": 3.0521262002743484e-05,
"loss": 2.0808,
"step": 730
},
{
"epoch": 2.12,
"learning_rate": 3.017832647462277e-05,
"loss": 0.1758,
"step": 740
},
{
"epoch": 2.13,
"learning_rate": 2.9835390946502057e-05,
"loss": 0.1038,
"step": 750
},
{
"epoch": 2.14,
"learning_rate": 2.949245541838135e-05,
"loss": 0.5712,
"step": 760
},
{
"epoch": 2.14,
"learning_rate": 2.9149519890260635e-05,
"loss": 0.5609,
"step": 770
},
{
"epoch": 2.15,
"learning_rate": 2.880658436213992e-05,
"loss": 0.5059,
"step": 780
},
{
"epoch": 2.15,
"learning_rate": 2.846364883401921e-05,
"loss": 0.4832,
"step": 790
},
{
"epoch": 2.16,
"learning_rate": 2.8120713305898494e-05,
"loss": 0.0095,
"step": 800
},
{
"epoch": 2.17,
"learning_rate": 2.777777777777778e-05,
"loss": 0.0146,
"step": 810
},
{
"epoch": 2.17,
"eval_accuracy": 0.7391304347826086,
"eval_loss": 1.0877647399902344,
"eval_runtime": 41.9513,
"eval_samples_per_second": 4.934,
"eval_steps_per_second": 4.934,
"step": 810
},
{
"epoch": 3.01,
"learning_rate": 2.7434842249657068e-05,
"loss": 0.6179,
"step": 820
},
{
"epoch": 3.01,
"learning_rate": 2.7091906721536353e-05,
"loss": 0.4959,
"step": 830
},
{
"epoch": 3.02,
"learning_rate": 2.6748971193415638e-05,
"loss": 0.5077,
"step": 840
},
{
"epoch": 3.02,
"learning_rate": 2.6406035665294927e-05,
"loss": 0.5536,
"step": 850
},
{
"epoch": 3.03,
"learning_rate": 2.6063100137174212e-05,
"loss": 0.7469,
"step": 860
},
{
"epoch": 3.04,
"learning_rate": 2.5720164609053497e-05,
"loss": 0.1714,
"step": 870
},
{
"epoch": 3.04,
"learning_rate": 2.5377229080932786e-05,
"loss": 0.0627,
"step": 880
},
{
"epoch": 3.05,
"learning_rate": 2.503429355281207e-05,
"loss": 0.4998,
"step": 890
},
{
"epoch": 3.06,
"learning_rate": 2.4691358024691357e-05,
"loss": 0.0158,
"step": 900
},
{
"epoch": 3.06,
"learning_rate": 2.434842249657065e-05,
"loss": 1.1708,
"step": 910
},
{
"epoch": 3.07,
"learning_rate": 2.4005486968449934e-05,
"loss": 0.4583,
"step": 920
},
{
"epoch": 3.07,
"learning_rate": 2.366255144032922e-05,
"loss": 0.4836,
"step": 930
},
{
"epoch": 3.08,
"learning_rate": 2.3319615912208508e-05,
"loss": 0.0151,
"step": 940
},
{
"epoch": 3.09,
"learning_rate": 2.2976680384087793e-05,
"loss": 0.611,
"step": 950
},
{
"epoch": 3.09,
"learning_rate": 2.2633744855967078e-05,
"loss": 0.0026,
"step": 960
},
{
"epoch": 3.1,
"learning_rate": 2.2290809327846367e-05,
"loss": 0.0559,
"step": 970
},
{
"epoch": 3.1,
"learning_rate": 2.1947873799725652e-05,
"loss": 0.7487,
"step": 980
},
{
"epoch": 3.11,
"learning_rate": 2.1604938271604937e-05,
"loss": 0.0013,
"step": 990
},
{
"epoch": 3.12,
"learning_rate": 2.1262002743484226e-05,
"loss": 0.0213,
"step": 1000
},
{
"epoch": 3.12,
"learning_rate": 2.0919067215363515e-05,
"loss": 0.4467,
"step": 1010
},
{
"epoch": 3.13,
"learning_rate": 2.05761316872428e-05,
"loss": 0.7052,
"step": 1020
},
{
"epoch": 3.14,
"learning_rate": 2.0233196159122085e-05,
"loss": 0.1855,
"step": 1030
},
{
"epoch": 3.14,
"learning_rate": 1.9890260631001374e-05,
"loss": 0.3762,
"step": 1040
},
{
"epoch": 3.15,
"learning_rate": 1.954732510288066e-05,
"loss": 0.1378,
"step": 1050
},
{
"epoch": 3.15,
"learning_rate": 1.9204389574759944e-05,
"loss": 0.0199,
"step": 1060
},
{
"epoch": 3.16,
"learning_rate": 1.8861454046639233e-05,
"loss": 0.5699,
"step": 1070
},
{
"epoch": 3.17,
"learning_rate": 1.8518518518518518e-05,
"loss": 2.1899,
"step": 1080
},
{
"epoch": 3.17,
"eval_accuracy": 0.7294685990338164,
"eval_loss": 1.347913384437561,
"eval_runtime": 41.5677,
"eval_samples_per_second": 4.98,
"eval_steps_per_second": 4.98,
"step": 1080
},
{
"epoch": 4.01,
"learning_rate": 1.8175582990397804e-05,
"loss": 0.0017,
"step": 1090
},
{
"epoch": 4.01,
"learning_rate": 1.7832647462277096e-05,
"loss": 0.5574,
"step": 1100
},
{
"epoch": 4.02,
"learning_rate": 1.748971193415638e-05,
"loss": 1.044,
"step": 1110
},
{
"epoch": 4.02,
"learning_rate": 1.7146776406035666e-05,
"loss": 0.6995,
"step": 1120
},
{
"epoch": 4.03,
"learning_rate": 1.6803840877914955e-05,
"loss": 1.2995,
"step": 1130
},
{
"epoch": 4.04,
"learning_rate": 1.646090534979424e-05,
"loss": 0.4022,
"step": 1140
},
{
"epoch": 4.04,
"learning_rate": 1.6117969821673525e-05,
"loss": 0.6383,
"step": 1150
},
{
"epoch": 4.05,
"learning_rate": 1.5775034293552814e-05,
"loss": 0.2126,
"step": 1160
},
{
"epoch": 4.06,
"learning_rate": 1.54320987654321e-05,
"loss": 0.0057,
"step": 1170
},
{
"epoch": 4.06,
"learning_rate": 1.5089163237311384e-05,
"loss": 0.4209,
"step": 1180
},
{
"epoch": 4.07,
"learning_rate": 1.4746227709190675e-05,
"loss": 1.0974,
"step": 1190
},
{
"epoch": 4.07,
"learning_rate": 1.440329218106996e-05,
"loss": 0.1106,
"step": 1200
},
{
"epoch": 4.08,
"learning_rate": 1.4060356652949247e-05,
"loss": 0.0024,
"step": 1210
},
{
"epoch": 4.09,
"learning_rate": 1.3717421124828534e-05,
"loss": 0.004,
"step": 1220
},
{
"epoch": 4.09,
"learning_rate": 1.3374485596707819e-05,
"loss": 1.408,
"step": 1230
},
{
"epoch": 4.1,
"learning_rate": 1.3031550068587106e-05,
"loss": 0.3735,
"step": 1240
},
{
"epoch": 4.1,
"learning_rate": 1.2688614540466393e-05,
"loss": 0.401,
"step": 1250
},
{
"epoch": 4.11,
"learning_rate": 1.2345679012345678e-05,
"loss": 0.0215,
"step": 1260
},
{
"epoch": 4.12,
"learning_rate": 1.2002743484224967e-05,
"loss": 0.2846,
"step": 1270
},
{
"epoch": 4.12,
"learning_rate": 1.1659807956104254e-05,
"loss": 0.0585,
"step": 1280
},
{
"epoch": 4.13,
"learning_rate": 1.1316872427983539e-05,
"loss": 0.0521,
"step": 1290
},
{
"epoch": 4.14,
"learning_rate": 1.0973936899862826e-05,
"loss": 0.0031,
"step": 1300
},
{
"epoch": 4.14,
"learning_rate": 1.0631001371742113e-05,
"loss": 0.5282,
"step": 1310
},
{
"epoch": 4.15,
"learning_rate": 1.02880658436214e-05,
"loss": 0.0022,
"step": 1320
},
{
"epoch": 4.15,
"learning_rate": 9.945130315500687e-06,
"loss": 0.003,
"step": 1330
},
{
"epoch": 4.16,
"learning_rate": 9.602194787379972e-06,
"loss": 0.006,
"step": 1340
},
{
"epoch": 4.17,
"learning_rate": 9.259259259259259e-06,
"loss": 0.0019,
"step": 1350
},
{
"epoch": 4.17,
"eval_accuracy": 0.8260869565217391,
"eval_loss": 0.8615291714668274,
"eval_runtime": 41.432,
"eval_samples_per_second": 4.996,
"eval_steps_per_second": 4.996,
"step": 1350
},
{
"epoch": 5.01,
"learning_rate": 8.916323731138548e-06,
"loss": 0.9725,
"step": 1360
},
{
"epoch": 5.01,
"learning_rate": 8.573388203017833e-06,
"loss": 0.0022,
"step": 1370
},
{
"epoch": 5.02,
"learning_rate": 8.23045267489712e-06,
"loss": 0.0043,
"step": 1380
},
{
"epoch": 5.02,
"learning_rate": 7.887517146776407e-06,
"loss": 0.0018,
"step": 1390
},
{
"epoch": 5.03,
"learning_rate": 7.544581618655692e-06,
"loss": 0.0019,
"step": 1400
},
{
"epoch": 5.04,
"learning_rate": 7.20164609053498e-06,
"loss": 0.5719,
"step": 1410
},
{
"epoch": 5.04,
"learning_rate": 6.858710562414267e-06,
"loss": 0.0021,
"step": 1420
},
{
"epoch": 5.05,
"learning_rate": 6.515775034293553e-06,
"loss": 0.8107,
"step": 1430
},
{
"epoch": 5.06,
"learning_rate": 6.172839506172839e-06,
"loss": 0.0033,
"step": 1440
},
{
"epoch": 5.06,
"learning_rate": 5.829903978052127e-06,
"loss": 0.4235,
"step": 1450
},
{
"epoch": 5.07,
"learning_rate": 5.486968449931413e-06,
"loss": 0.0532,
"step": 1460
},
{
"epoch": 5.07,
"learning_rate": 5.1440329218107e-06,
"loss": 0.1546,
"step": 1470
},
{
"epoch": 5.08,
"learning_rate": 4.801097393689986e-06,
"loss": 0.4308,
"step": 1480
},
{
"epoch": 5.09,
"learning_rate": 4.458161865569274e-06,
"loss": 0.8503,
"step": 1490
},
{
"epoch": 5.09,
"learning_rate": 4.11522633744856e-06,
"loss": 0.3947,
"step": 1500
},
{
"epoch": 5.1,
"learning_rate": 3.772290809327846e-06,
"loss": 0.3845,
"step": 1510
},
{
"epoch": 5.1,
"learning_rate": 3.4293552812071335e-06,
"loss": 0.0024,
"step": 1520
},
{
"epoch": 5.11,
"learning_rate": 3.0864197530864196e-06,
"loss": 0.003,
"step": 1530
},
{
"epoch": 5.12,
"learning_rate": 2.7434842249657065e-06,
"loss": 0.4155,
"step": 1540
},
{
"epoch": 5.12,
"learning_rate": 2.400548696844993e-06,
"loss": 0.0027,
"step": 1550
},
{
"epoch": 5.13,
"learning_rate": 2.05761316872428e-06,
"loss": 0.1344,
"step": 1560
},
{
"epoch": 5.14,
"learning_rate": 1.7146776406035667e-06,
"loss": 0.0022,
"step": 1570
},
{
"epoch": 5.14,
"learning_rate": 1.3717421124828533e-06,
"loss": 0.0019,
"step": 1580
},
{
"epoch": 5.15,
"learning_rate": 1.02880658436214e-06,
"loss": 0.4305,
"step": 1590
},
{
"epoch": 5.15,
"learning_rate": 6.858710562414266e-07,
"loss": 0.0032,
"step": 1600
},
{
"epoch": 5.16,
"learning_rate": 3.429355281207133e-07,
"loss": 0.4733,
"step": 1610
},
{
"epoch": 5.17,
"learning_rate": 0.0,
"loss": 0.0021,
"step": 1620
},
{
"epoch": 5.17,
"eval_accuracy": 0.855072463768116,
"eval_loss": 0.6127785444259644,
"eval_runtime": 41.5765,
"eval_samples_per_second": 4.979,
"eval_steps_per_second": 4.979,
"step": 1620
},
{
"epoch": 5.17,
"step": 1620,
"total_flos": 2.0187506515909018e+18,
"train_loss": 0.5810601886580296,
"train_runtime": 1084.0062,
"train_samples_per_second": 1.494,
"train_steps_per_second": 1.494
},
{
"epoch": 5.17,
"eval_accuracy": 0.7816593886462883,
"eval_loss": 1.17023766040802,
"eval_runtime": 75.7169,
"eval_samples_per_second": 3.024,
"eval_steps_per_second": 3.024,
"step": 1620
},
{
"epoch": 5.17,
"eval_accuracy": 0.7816593886462883,
"eval_loss": 1.1702378988265991,
"eval_runtime": 76.9225,
"eval_samples_per_second": 2.977,
"eval_steps_per_second": 2.977,
"step": 1620
}
],
"logging_steps": 10,
"max_steps": 1620,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"total_flos": 2.0187506515909018e+18,
"trial_name": null,
"trial_params": null
}