|
{ |
|
"best_metric": 0.8346934110853106, |
|
"best_model_checkpoint": "videomae-surf-analytics-runpod/checkpoint-370", |
|
"epoch": 4.1891891891891895, |
|
"eval_steps": 500, |
|
"global_step": 370, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02702702702702703, |
|
"grad_norm": 17.739418029785156, |
|
"learning_rate": 1.3513513513513515e-05, |
|
"loss": 1.4705, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05405405405405406, |
|
"grad_norm": 6.248379707336426, |
|
"learning_rate": 2.702702702702703e-05, |
|
"loss": 1.1378, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08108108108108109, |
|
"grad_norm": 6.359808444976807, |
|
"learning_rate": 4.0540540540540545e-05, |
|
"loss": 1.0852, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.10810810810810811, |
|
"grad_norm": 7.196521282196045, |
|
"learning_rate": 4.954954954954955e-05, |
|
"loss": 0.7478, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13513513513513514, |
|
"grad_norm": 7.588693141937256, |
|
"learning_rate": 4.804804804804805e-05, |
|
"loss": 0.653, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16216216216216217, |
|
"grad_norm": 9.380898475646973, |
|
"learning_rate": 4.654654654654655e-05, |
|
"loss": 0.7808, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.1891891891891892, |
|
"grad_norm": 9.640559196472168, |
|
"learning_rate": 4.5045045045045046e-05, |
|
"loss": 0.6213, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.20270270270270271, |
|
"eval_accuracy": 0.6970954356846473, |
|
"eval_f1": 0.6901963848053563, |
|
"eval_loss": 0.7773587703704834, |
|
"eval_runtime": 77.0947, |
|
"eval_samples_per_second": 3.126, |
|
"eval_steps_per_second": 0.324, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.0135135135135136, |
|
"grad_norm": 4.199209690093994, |
|
"learning_rate": 4.354354354354355e-05, |
|
"loss": 0.4215, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.0405405405405406, |
|
"grad_norm": 3.8096096515655518, |
|
"learning_rate": 4.204204204204204e-05, |
|
"loss": 0.391, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.0675675675675675, |
|
"grad_norm": 13.743525505065918, |
|
"learning_rate": 4.0540540540540545e-05, |
|
"loss": 0.4713, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.0945945945945945, |
|
"grad_norm": 3.954926013946533, |
|
"learning_rate": 3.903903903903904e-05, |
|
"loss": 0.4197, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.1216216216216217, |
|
"grad_norm": 7.9359211921691895, |
|
"learning_rate": 3.7537537537537536e-05, |
|
"loss": 0.3719, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.1486486486486487, |
|
"grad_norm": 10.24318790435791, |
|
"learning_rate": 3.603603603603604e-05, |
|
"loss": 0.3922, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.1756756756756757, |
|
"grad_norm": 13.8519926071167, |
|
"learning_rate": 3.453453453453453e-05, |
|
"loss": 0.4027, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.2027027027027026, |
|
"grad_norm": 1.3211474418640137, |
|
"learning_rate": 3.3033033033033035e-05, |
|
"loss": 0.3269, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.2027027027027026, |
|
"eval_accuracy": 0.7593360995850622, |
|
"eval_f1": 0.7561505755143376, |
|
"eval_loss": 0.6851304173469543, |
|
"eval_runtime": 78.2702, |
|
"eval_samples_per_second": 3.079, |
|
"eval_steps_per_second": 0.319, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.027027027027027, |
|
"grad_norm": 6.484254360198975, |
|
"learning_rate": 3.153153153153153e-05, |
|
"loss": 0.1699, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.054054054054054, |
|
"grad_norm": 4.243548393249512, |
|
"learning_rate": 3.0030030030030033e-05, |
|
"loss": 0.1404, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.081081081081081, |
|
"grad_norm": 4.49724006652832, |
|
"learning_rate": 2.852852852852853e-05, |
|
"loss": 0.2383, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.108108108108108, |
|
"grad_norm": 5.5315632820129395, |
|
"learning_rate": 2.702702702702703e-05, |
|
"loss": 0.3597, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.135135135135135, |
|
"grad_norm": 7.347559452056885, |
|
"learning_rate": 2.552552552552553e-05, |
|
"loss": 0.1501, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.1621621621621623, |
|
"grad_norm": 0.20061562955379486, |
|
"learning_rate": 2.4024024024024024e-05, |
|
"loss": 0.1769, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.189189189189189, |
|
"grad_norm": 0.2758616507053375, |
|
"learning_rate": 2.2522522522522523e-05, |
|
"loss": 0.2339, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.2027027027027026, |
|
"eval_accuracy": 0.8008298755186722, |
|
"eval_f1": 0.7992975614249908, |
|
"eval_loss": 0.5251602530479431, |
|
"eval_runtime": 75.07, |
|
"eval_samples_per_second": 3.21, |
|
"eval_steps_per_second": 0.333, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 3.0135135135135136, |
|
"grad_norm": 8.580607414245605, |
|
"learning_rate": 2.102102102102102e-05, |
|
"loss": 0.8979, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 3.0405405405405403, |
|
"grad_norm": 0.5315948724746704, |
|
"learning_rate": 1.951951951951952e-05, |
|
"loss": 0.0559, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 3.0675675675675675, |
|
"grad_norm": 0.13150528073310852, |
|
"learning_rate": 1.801801801801802e-05, |
|
"loss": 0.1062, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 3.0945945945945947, |
|
"grad_norm": 19.857810974121094, |
|
"learning_rate": 1.6516516516516518e-05, |
|
"loss": 0.1066, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 3.1216216216216215, |
|
"grad_norm": 1.9958362579345703, |
|
"learning_rate": 1.5015015015015016e-05, |
|
"loss": 0.1944, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 3.1486486486486487, |
|
"grad_norm": 16.243772506713867, |
|
"learning_rate": 1.3513513513513515e-05, |
|
"loss": 0.137, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 3.175675675675676, |
|
"grad_norm": 3.6971592903137207, |
|
"learning_rate": 1.2012012012012012e-05, |
|
"loss": 0.1812, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 3.2027027027027026, |
|
"grad_norm": 1.1694248914718628, |
|
"learning_rate": 1.051051051051051e-05, |
|
"loss": 0.1931, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.2027027027027026, |
|
"eval_accuracy": 0.8340248962655602, |
|
"eval_f1": 0.8341831246711504, |
|
"eval_loss": 0.4942285716533661, |
|
"eval_runtime": 73.8969, |
|
"eval_samples_per_second": 3.261, |
|
"eval_steps_per_second": 0.338, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 4.027027027027027, |
|
"grad_norm": 2.825990915298462, |
|
"learning_rate": 9.00900900900901e-06, |
|
"loss": 0.0731, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 4.054054054054054, |
|
"grad_norm": 0.2709617614746094, |
|
"learning_rate": 7.507507507507508e-06, |
|
"loss": 0.0751, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 4.081081081081081, |
|
"grad_norm": 0.10882856696844101, |
|
"learning_rate": 6.006006006006006e-06, |
|
"loss": 0.0774, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 4.108108108108108, |
|
"grad_norm": 0.09481658786535263, |
|
"learning_rate": 4.504504504504505e-06, |
|
"loss": 0.0567, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 4.135135135135135, |
|
"grad_norm": 0.3658810257911682, |
|
"learning_rate": 3.003003003003003e-06, |
|
"loss": 0.0438, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 4.162162162162162, |
|
"grad_norm": 0.9697806239128113, |
|
"learning_rate": 1.5015015015015015e-06, |
|
"loss": 0.098, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 4.1891891891891895, |
|
"grad_norm": 0.357666552066803, |
|
"learning_rate": 0.0, |
|
"loss": 0.1037, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 4.1891891891891895, |
|
"eval_accuracy": 0.8340248962655602, |
|
"eval_f1": 0.8346934110853106, |
|
"eval_loss": 0.47942548990249634, |
|
"eval_runtime": 73.9617, |
|
"eval_samples_per_second": 3.258, |
|
"eval_steps_per_second": 0.338, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 4.1891891891891895, |
|
"step": 370, |
|
"total_flos": 3.2102444558954004e+18, |
|
"train_loss": 0.36656422937238536, |
|
"train_runtime": 1888.2958, |
|
"train_samples_per_second": 1.959, |
|
"train_steps_per_second": 0.196 |
|
}, |
|
{ |
|
"epoch": 4.1891891891891895, |
|
"eval_accuracy": 0.9784075573549258, |
|
"eval_f1": 0.9783996585344938, |
|
"eval_loss": 0.06370694935321808, |
|
"eval_runtime": 252.958, |
|
"eval_samples_per_second": 2.929, |
|
"eval_steps_per_second": 0.296, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 4.1891891891891895, |
|
"eval_accuracy": 0.8067415730337079, |
|
"eval_f1": 0.8059471321937128, |
|
"eval_loss": 0.6186416745185852, |
|
"eval_runtime": 137.3351, |
|
"eval_samples_per_second": 3.24, |
|
"eval_steps_per_second": 0.328, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 4.1891891891891895, |
|
"eval_accuracy": 0.8340248962655602, |
|
"eval_f1": 0.8346934110853106, |
|
"eval_loss": 0.47942548990249634, |
|
"eval_runtime": 73.4732, |
|
"eval_samples_per_second": 3.28, |
|
"eval_steps_per_second": 0.34, |
|
"step": 370 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 370, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.2102444558954004e+18, |
|
"train_batch_size": 10, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|