{ "best_metric": 0.9285714285714286, "best_model_checkpoint": "videomae-base-finetuned-ucf101-subset/checkpoint-1200", "epoch": 3.25, "eval_steps": 500, "global_step": 1200, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.01, "grad_norm": 39.893043518066406, "learning_rate": 4.166666666666667e-06, "loss": 2.3912, "step": 10 }, { "epoch": 0.02, "grad_norm": 21.392333984375, "learning_rate": 8.333333333333334e-06, "loss": 2.4635, "step": 20 }, { "epoch": 0.03, "grad_norm": 22.904409408569336, "learning_rate": 1.25e-05, "loss": 2.3054, "step": 30 }, { "epoch": 0.03, "grad_norm": 18.382219314575195, "learning_rate": 1.6666666666666667e-05, "loss": 2.391, "step": 40 }, { "epoch": 0.04, "grad_norm": 18.44032859802246, "learning_rate": 2.0833333333333336e-05, "loss": 2.3256, "step": 50 }, { "epoch": 0.05, "grad_norm": 20.57721519470215, "learning_rate": 2.5e-05, "loss": 2.2661, "step": 60 }, { "epoch": 0.06, "grad_norm": 27.851577758789062, "learning_rate": 2.916666666666667e-05, "loss": 2.252, "step": 70 }, { "epoch": 0.07, "grad_norm": 20.621288299560547, "learning_rate": 3.3333333333333335e-05, "loss": 2.3036, "step": 80 }, { "epoch": 0.07, "grad_norm": 22.28342056274414, "learning_rate": 3.7500000000000003e-05, "loss": 2.0055, "step": 90 }, { "epoch": 0.08, "grad_norm": 24.50592613220215, "learning_rate": 4.166666666666667e-05, "loss": 2.0529, "step": 100 }, { "epoch": 0.09, "grad_norm": 28.007957458496094, "learning_rate": 4.5833333333333334e-05, "loss": 2.122, "step": 110 }, { "epoch": 0.1, "grad_norm": 22.624725341796875, "learning_rate": 5e-05, "loss": 2.1029, "step": 120 }, { "epoch": 0.11, "grad_norm": 23.730955123901367, "learning_rate": 4.9537037037037035e-05, "loss": 2.1566, "step": 130 }, { "epoch": 0.12, "grad_norm": 32.912437438964844, "learning_rate": 4.9074074074074075e-05, "loss": 1.9197, "step": 140 }, { "epoch": 0.12, "grad_norm": 28.358497619628906, "learning_rate": 4.8611111111111115e-05, "loss": 1.766, "step": 150 }, { "epoch": 0.13, "grad_norm": 38.748905181884766, "learning_rate": 4.814814814814815e-05, "loss": 1.5543, "step": 160 }, { "epoch": 0.14, "grad_norm": 19.396177291870117, "learning_rate": 4.768518518518519e-05, "loss": 1.8231, "step": 170 }, { "epoch": 0.15, "grad_norm": 20.669483184814453, "learning_rate": 4.722222222222222e-05, "loss": 1.8356, "step": 180 }, { "epoch": 0.16, "grad_norm": 16.87503433227539, "learning_rate": 4.675925925925926e-05, "loss": 2.0229, "step": 190 }, { "epoch": 0.17, "grad_norm": 37.929744720458984, "learning_rate": 4.62962962962963e-05, "loss": 1.4805, "step": 200 }, { "epoch": 0.17, "grad_norm": 23.615205764770508, "learning_rate": 4.5833333333333334e-05, "loss": 2.5876, "step": 210 }, { "epoch": 0.18, "grad_norm": 11.07725715637207, "learning_rate": 4.5370370370370374e-05, "loss": 1.4919, "step": 220 }, { "epoch": 0.19, "grad_norm": 22.73790740966797, "learning_rate": 4.490740740740741e-05, "loss": 1.9309, "step": 230 }, { "epoch": 0.2, "grad_norm": 22.745410919189453, "learning_rate": 4.4444444444444447e-05, "loss": 2.5053, "step": 240 }, { "epoch": 0.21, "grad_norm": 36.528160095214844, "learning_rate": 4.3981481481481486e-05, "loss": 2.1563, "step": 250 }, { "epoch": 0.22, "grad_norm": 64.73149108886719, "learning_rate": 4.351851851851852e-05, "loss": 2.5058, "step": 260 }, { "epoch": 0.23, "grad_norm": 33.93071365356445, "learning_rate": 4.305555555555556e-05, "loss": 2.1499, "step": 270 }, { "epoch": 0.23, "grad_norm": 23.45170783996582, "learning_rate": 4.259259259259259e-05, "loss": 1.7521, "step": 280 }, { "epoch": 0.24, "grad_norm": 21.1306209564209, "learning_rate": 4.212962962962963e-05, "loss": 1.1103, "step": 290 }, { "epoch": 0.25, "grad_norm": 75.54643249511719, "learning_rate": 4.166666666666667e-05, "loss": 1.7533, "step": 300 }, { "epoch": 0.25, "eval_accuracy": 0.32857142857142857, "eval_loss": 1.7997785806655884, "eval_runtime": 20.0261, "eval_samples_per_second": 3.495, "eval_steps_per_second": 3.495, "step": 300 }, { "epoch": 1.01, "grad_norm": 27.954673767089844, "learning_rate": 4.1203703703703705e-05, "loss": 1.0294, "step": 310 }, { "epoch": 1.02, "grad_norm": 29.380338668823242, "learning_rate": 4.074074074074074e-05, "loss": 1.5711, "step": 320 }, { "epoch": 1.02, "grad_norm": 105.077392578125, "learning_rate": 4.027777777777778e-05, "loss": 1.7588, "step": 330 }, { "epoch": 1.03, "grad_norm": 5.28167200088501, "learning_rate": 3.981481481481482e-05, "loss": 1.3781, "step": 340 }, { "epoch": 1.04, "grad_norm": 82.8572006225586, "learning_rate": 3.935185185185186e-05, "loss": 1.9861, "step": 350 }, { "epoch": 1.05, "grad_norm": 23.33498191833496, "learning_rate": 3.888888888888889e-05, "loss": 1.5307, "step": 360 }, { "epoch": 1.06, "grad_norm": 45.165435791015625, "learning_rate": 3.8425925925925924e-05, "loss": 0.9492, "step": 370 }, { "epoch": 1.07, "grad_norm": 1.3398464918136597, "learning_rate": 3.7962962962962964e-05, "loss": 1.3926, "step": 380 }, { "epoch": 1.07, "grad_norm": 29.025249481201172, "learning_rate": 3.7500000000000003e-05, "loss": 1.7036, "step": 390 }, { "epoch": 1.08, "grad_norm": 42.30586624145508, "learning_rate": 3.7037037037037037e-05, "loss": 1.1419, "step": 400 }, { "epoch": 1.09, "grad_norm": 23.552091598510742, "learning_rate": 3.6574074074074076e-05, "loss": 1.0914, "step": 410 }, { "epoch": 1.1, "grad_norm": 8.62332820892334, "learning_rate": 3.611111111111111e-05, "loss": 1.2866, "step": 420 }, { "epoch": 1.11, "grad_norm": 29.133724212646484, "learning_rate": 3.564814814814815e-05, "loss": 0.9732, "step": 430 }, { "epoch": 1.12, "grad_norm": 8.369429588317871, "learning_rate": 3.518518518518519e-05, "loss": 0.7792, "step": 440 }, { "epoch": 1.12, "grad_norm": 28.84503936767578, "learning_rate": 3.472222222222222e-05, "loss": 1.3297, "step": 450 }, { "epoch": 1.13, "grad_norm": 0.6899290680885315, "learning_rate": 3.425925925925926e-05, "loss": 1.1334, "step": 460 }, { "epoch": 1.14, "grad_norm": 3.0040957927703857, "learning_rate": 3.3796296296296295e-05, "loss": 0.6843, "step": 470 }, { "epoch": 1.15, "grad_norm": 0.4193795621395111, "learning_rate": 3.3333333333333335e-05, "loss": 1.3539, "step": 480 }, { "epoch": 1.16, "grad_norm": 0.723945140838623, "learning_rate": 3.2870370370370375e-05, "loss": 1.2607, "step": 490 }, { "epoch": 1.17, "grad_norm": 25.487699508666992, "learning_rate": 3.240740740740741e-05, "loss": 0.8374, "step": 500 }, { "epoch": 1.18, "grad_norm": 15.434425354003906, "learning_rate": 3.194444444444444e-05, "loss": 1.2351, "step": 510 }, { "epoch": 1.18, "grad_norm": 64.37287902832031, "learning_rate": 3.148148148148148e-05, "loss": 0.4603, "step": 520 }, { "epoch": 1.19, "grad_norm": 2.10884952545166, "learning_rate": 3.101851851851852e-05, "loss": 0.569, "step": 530 }, { "epoch": 1.2, "grad_norm": 38.082462310791016, "learning_rate": 3.055555555555556e-05, "loss": 2.0602, "step": 540 }, { "epoch": 1.21, "grad_norm": 0.26877835392951965, "learning_rate": 3.0092592592592593e-05, "loss": 0.2031, "step": 550 }, { "epoch": 1.22, "grad_norm": 45.29481506347656, "learning_rate": 2.962962962962963e-05, "loss": 0.1889, "step": 560 }, { "epoch": 1.23, "grad_norm": 0.8152311444282532, "learning_rate": 2.916666666666667e-05, "loss": 1.3687, "step": 570 }, { "epoch": 1.23, "grad_norm": 0.16306141018867493, "learning_rate": 2.8703703703703706e-05, "loss": 1.4408, "step": 580 }, { "epoch": 1.24, "grad_norm": 29.856517791748047, "learning_rate": 2.824074074074074e-05, "loss": 1.8685, "step": 590 }, { "epoch": 1.25, "grad_norm": 1.069265365600586, "learning_rate": 2.777777777777778e-05, "loss": 1.6337, "step": 600 }, { "epoch": 1.25, "eval_accuracy": 0.5571428571428572, "eval_loss": 1.2139314413070679, "eval_runtime": 19.6709, "eval_samples_per_second": 3.559, "eval_steps_per_second": 3.559, "step": 600 }, { "epoch": 2.01, "grad_norm": 53.667381286621094, "learning_rate": 2.7314814814814816e-05, "loss": 0.7485, "step": 610 }, { "epoch": 2.02, "grad_norm": 9.039528846740723, "learning_rate": 2.6851851851851855e-05, "loss": 0.5022, "step": 620 }, { "epoch": 2.02, "grad_norm": 2.096543550491333, "learning_rate": 2.6388888888888892e-05, "loss": 0.0704, "step": 630 }, { "epoch": 2.03, "grad_norm": 0.14604328572750092, "learning_rate": 2.5925925925925925e-05, "loss": 0.6426, "step": 640 }, { "epoch": 2.04, "grad_norm": 80.3462905883789, "learning_rate": 2.5462962962962965e-05, "loss": 0.4556, "step": 650 }, { "epoch": 2.05, "grad_norm": 29.6263484954834, "learning_rate": 2.5e-05, "loss": 0.5818, "step": 660 }, { "epoch": 2.06, "grad_norm": 5.271122932434082, "learning_rate": 2.4537037037037038e-05, "loss": 0.2213, "step": 670 }, { "epoch": 2.07, "grad_norm": 37.88823699951172, "learning_rate": 2.4074074074074074e-05, "loss": 0.7075, "step": 680 }, { "epoch": 2.08, "grad_norm": 0.3303954005241394, "learning_rate": 2.361111111111111e-05, "loss": 0.3398, "step": 690 }, { "epoch": 2.08, "grad_norm": 7.315389633178711, "learning_rate": 2.314814814814815e-05, "loss": 0.3905, "step": 700 }, { "epoch": 2.09, "grad_norm": 5.788703441619873, "learning_rate": 2.2685185185185187e-05, "loss": 0.6432, "step": 710 }, { "epoch": 2.1, "grad_norm": 10.666046142578125, "learning_rate": 2.2222222222222223e-05, "loss": 0.1781, "step": 720 }, { "epoch": 2.11, "grad_norm": 0.13134099543094635, "learning_rate": 2.175925925925926e-05, "loss": 0.7766, "step": 730 }, { "epoch": 2.12, "grad_norm": 5.6078314781188965, "learning_rate": 2.1296296296296296e-05, "loss": 0.0573, "step": 740 }, { "epoch": 2.12, "grad_norm": 57.8688850402832, "learning_rate": 2.0833333333333336e-05, "loss": 0.9231, "step": 750 }, { "epoch": 2.13, "grad_norm": 1.2845256328582764, "learning_rate": 2.037037037037037e-05, "loss": 0.1482, "step": 760 }, { "epoch": 2.14, "grad_norm": 75.59434509277344, "learning_rate": 1.990740740740741e-05, "loss": 0.7703, "step": 770 }, { "epoch": 2.15, "grad_norm": 0.07224389910697937, "learning_rate": 1.9444444444444445e-05, "loss": 0.2001, "step": 780 }, { "epoch": 2.16, "grad_norm": 2.151550769805908, "learning_rate": 1.8981481481481482e-05, "loss": 0.6879, "step": 790 }, { "epoch": 2.17, "grad_norm": 0.5874713063240051, "learning_rate": 1.8518518518518518e-05, "loss": 0.4532, "step": 800 }, { "epoch": 2.17, "grad_norm": 0.1461566686630249, "learning_rate": 1.8055555555555555e-05, "loss": 0.2781, "step": 810 }, { "epoch": 2.18, "grad_norm": 1.0537546873092651, "learning_rate": 1.7592592592592595e-05, "loss": 0.3174, "step": 820 }, { "epoch": 2.19, "grad_norm": 52.02813720703125, "learning_rate": 1.712962962962963e-05, "loss": 0.3687, "step": 830 }, { "epoch": 2.2, "grad_norm": 0.14705657958984375, "learning_rate": 1.6666666666666667e-05, "loss": 0.3095, "step": 840 }, { "epoch": 2.21, "grad_norm": 10.978480339050293, "learning_rate": 1.6203703703703704e-05, "loss": 0.3199, "step": 850 }, { "epoch": 2.22, "grad_norm": 0.08446181565523148, "learning_rate": 1.574074074074074e-05, "loss": 0.1921, "step": 860 }, { "epoch": 2.23, "grad_norm": 0.10818950086832047, "learning_rate": 1.527777777777778e-05, "loss": 0.1006, "step": 870 }, { "epoch": 2.23, "grad_norm": 0.05260080099105835, "learning_rate": 1.4814814814814815e-05, "loss": 0.0124, "step": 880 }, { "epoch": 2.24, "grad_norm": 0.15152961015701294, "learning_rate": 1.4351851851851853e-05, "loss": 0.1896, "step": 890 }, { "epoch": 2.25, "grad_norm": 104.01317596435547, "learning_rate": 1.388888888888889e-05, "loss": 0.1099, "step": 900 }, { "epoch": 2.25, "eval_accuracy": 0.8142857142857143, "eval_loss": 0.5458886027336121, "eval_runtime": 24.2511, "eval_samples_per_second": 2.886, "eval_steps_per_second": 2.886, "step": 900 }, { "epoch": 3.01, "grad_norm": 0.17340366542339325, "learning_rate": 1.3425925925925928e-05, "loss": 0.0997, "step": 910 }, { "epoch": 3.02, "grad_norm": 0.3285802900791168, "learning_rate": 1.2962962962962962e-05, "loss": 0.0133, "step": 920 }, { "epoch": 3.02, "grad_norm": 0.04922209680080414, "learning_rate": 1.25e-05, "loss": 0.06, "step": 930 }, { "epoch": 3.03, "grad_norm": 0.21828210353851318, "learning_rate": 1.2037037037037037e-05, "loss": 0.0091, "step": 940 }, { "epoch": 3.04, "grad_norm": 2.8969061374664307, "learning_rate": 1.1574074074074075e-05, "loss": 0.0162, "step": 950 }, { "epoch": 3.05, "grad_norm": 0.06720645725727081, "learning_rate": 1.1111111111111112e-05, "loss": 0.0103, "step": 960 }, { "epoch": 3.06, "grad_norm": 0.17596295475959778, "learning_rate": 1.0648148148148148e-05, "loss": 0.017, "step": 970 }, { "epoch": 3.07, "grad_norm": 0.0713464543223381, "learning_rate": 1.0185185185185185e-05, "loss": 0.0262, "step": 980 }, { "epoch": 3.08, "grad_norm": 0.08627210557460785, "learning_rate": 9.722222222222223e-06, "loss": 0.0307, "step": 990 }, { "epoch": 3.08, "grad_norm": 0.07651841640472412, "learning_rate": 9.259259259259259e-06, "loss": 0.0101, "step": 1000 }, { "epoch": 3.09, "grad_norm": 2.683367967605591, "learning_rate": 8.796296296296297e-06, "loss": 0.0203, "step": 1010 }, { "epoch": 3.1, "grad_norm": 0.3468647301197052, "learning_rate": 8.333333333333334e-06, "loss": 0.0139, "step": 1020 }, { "epoch": 3.11, "grad_norm": 0.22564736008644104, "learning_rate": 7.87037037037037e-06, "loss": 0.323, "step": 1030 }, { "epoch": 3.12, "grad_norm": 0.05308224633336067, "learning_rate": 7.4074074074074075e-06, "loss": 0.0047, "step": 1040 }, { "epoch": 3.12, "grad_norm": 0.052336595952510834, "learning_rate": 6.944444444444445e-06, "loss": 0.1145, "step": 1050 }, { "epoch": 3.13, "grad_norm": 161.04202270507812, "learning_rate": 6.481481481481481e-06, "loss": 0.3578, "step": 1060 }, { "epoch": 3.14, "grad_norm": 0.17146830260753632, "learning_rate": 6.0185185185185185e-06, "loss": 0.0197, "step": 1070 }, { "epoch": 3.15, "grad_norm": 0.23476547002792358, "learning_rate": 5.555555555555556e-06, "loss": 0.0207, "step": 1080 }, { "epoch": 3.16, "grad_norm": 0.2218230664730072, "learning_rate": 5.092592592592592e-06, "loss": 0.5924, "step": 1090 }, { "epoch": 3.17, "grad_norm": 0.08028724044561386, "learning_rate": 4.6296296296296296e-06, "loss": 0.0047, "step": 1100 }, { "epoch": 3.17, "grad_norm": 0.07452563941478729, "learning_rate": 4.166666666666667e-06, "loss": 0.6085, "step": 1110 }, { "epoch": 3.18, "grad_norm": 0.06979314237833023, "learning_rate": 3.7037037037037037e-06, "loss": 0.5321, "step": 1120 }, { "epoch": 3.19, "grad_norm": 0.08404794335365295, "learning_rate": 3.2407407407407406e-06, "loss": 0.0057, "step": 1130 }, { "epoch": 3.2, "grad_norm": 0.08606185019016266, "learning_rate": 2.777777777777778e-06, "loss": 0.2438, "step": 1140 }, { "epoch": 3.21, "grad_norm": 0.06998346745967865, "learning_rate": 2.3148148148148148e-06, "loss": 0.0987, "step": 1150 }, { "epoch": 3.22, "grad_norm": 0.18059991300106049, "learning_rate": 1.8518518518518519e-06, "loss": 0.3073, "step": 1160 }, { "epoch": 3.23, "grad_norm": 0.08045562356710434, "learning_rate": 1.388888888888889e-06, "loss": 0.2227, "step": 1170 }, { "epoch": 3.23, "grad_norm": 0.06636542081832886, "learning_rate": 9.259259259259259e-07, "loss": 0.7776, "step": 1180 }, { "epoch": 3.24, "grad_norm": 0.1821145862340927, "learning_rate": 4.6296296296296297e-07, "loss": 0.0074, "step": 1190 }, { "epoch": 3.25, "grad_norm": 0.10829867422580719, "learning_rate": 0.0, "loss": 1.0531, "step": 1200 }, { "epoch": 3.25, "eval_accuracy": 0.9285714285714286, "eval_loss": 0.2868471145629883, "eval_runtime": 17.7134, "eval_samples_per_second": 3.952, "eval_steps_per_second": 3.952, "step": 1200 }, { "epoch": 3.25, "step": 1200, "total_flos": 1.495384188125184e+18, "train_loss": 0.9583444615236173, "train_runtime": 830.113, "train_samples_per_second": 1.446, "train_steps_per_second": 1.446 }, { "epoch": 3.25, "eval_accuracy": 0.8516129032258064, "eval_loss": 0.5765717029571533, "eval_runtime": 43.5462, "eval_samples_per_second": 3.559, "eval_steps_per_second": 3.559, "step": 1200 }, { "epoch": 3.25, "eval_accuracy": 0.8516129032258064, "eval_loss": 0.5765716433525085, "eval_runtime": 41.7645, "eval_samples_per_second": 3.711, "eval_steps_per_second": 3.711, "step": 1200 } ], "logging_steps": 10, "max_steps": 1200, "num_input_tokens_seen": 0, "num_train_epochs": 9223372036854775807, "save_steps": 500, "total_flos": 1.495384188125184e+18, "train_batch_size": 1, "trial_name": null, "trial_params": null }