quickdraw-ConvNeXT-Tiny-Finetune / trainer_state.json
JoshuaKelleyDs's picture
uno
47b6bca verified
{
"best_metric": 0.8391695022583008,
"best_model_checkpoint": "./quickdraw-ConvNeXT-Tiny-Finetune/checkpoint-25000",
"epoch": 7.0,
"eval_steps": 5000,
"global_step": 61530,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11376564277588168,
"grad_norm": 7.832313537597656,
"learning_rate": 0.00015952,
"loss": 3.8212,
"step": 1000
},
{
"epoch": 0.22753128555176336,
"grad_norm": 2.515751600265503,
"learning_rate": 0.00031952,
"loss": 1.4714,
"step": 2000
},
{
"epoch": 0.3412969283276451,
"grad_norm": 1.605010747909546,
"learning_rate": 0.00047952000000000004,
"loss": 1.2045,
"step": 3000
},
{
"epoch": 0.4550625711035267,
"grad_norm": 1.3963702917099,
"learning_rate": 0.0006395200000000001,
"loss": 1.1247,
"step": 4000
},
{
"epoch": 0.5688282138794084,
"grad_norm": 1.0773191452026367,
"learning_rate": 0.00079952,
"loss": 1.0895,
"step": 5000
},
{
"epoch": 0.5688282138794084,
"eval_accuracy": 0.727772,
"eval_loss": 1.0618326663970947,
"eval_runtime": 25.193,
"eval_samples_per_second": 9923.407,
"eval_steps_per_second": 19.41,
"step": 5000
},
{
"epoch": 0.6825938566552902,
"grad_norm": 0.9033334255218506,
"learning_rate": 0.0007993861659457679,
"loss": 1.0466,
"step": 6000
},
{
"epoch": 0.7963594994311718,
"grad_norm": 0.9264630079269409,
"learning_rate": 0.000797541628656647,
"loss": 1.0055,
"step": 7000
},
{
"epoch": 0.9101251422070534,
"grad_norm": 0.8748724460601807,
"learning_rate": 0.0007944683860683685,
"loss": 0.9762,
"step": 8000
},
{
"epoch": 1.023890784982935,
"grad_norm": 0.7623477578163147,
"learning_rate": 0.0007901820532646013,
"loss": 0.9451,
"step": 9000
},
{
"epoch": 1.1376564277588168,
"grad_norm": 0.7780616283416748,
"learning_rate": 0.000784687285963723,
"loss": 0.8912,
"step": 10000
},
{
"epoch": 1.1376564277588168,
"eval_accuracy": 0.760228,
"eval_loss": 0.9315244555473328,
"eval_runtime": 25.8226,
"eval_samples_per_second": 9681.46,
"eval_steps_per_second": 18.937,
"step": 10000
},
{
"epoch": 1.2514220705346986,
"grad_norm": 0.8434277772903442,
"learning_rate": 0.0007780120029676767,
"loss": 0.8896,
"step": 11000
},
{
"epoch": 1.36518771331058,
"grad_norm": 0.7569168210029602,
"learning_rate": 0.0007701634548188583,
"loss": 0.8879,
"step": 12000
},
{
"epoch": 1.4789533560864618,
"grad_norm": 0.8010281920433044,
"learning_rate": 0.000761171966395704,
"loss": 0.8761,
"step": 13000
},
{
"epoch": 1.5927189988623436,
"grad_norm": 0.6517601013183594,
"learning_rate": 0.0007510759539838813,
"loss": 0.8712,
"step": 14000
},
{
"epoch": 1.7064846416382253,
"grad_norm": 1.0584135055541992,
"learning_rate": 0.0007398863836452142,
"loss": 0.8658,
"step": 15000
},
{
"epoch": 1.7064846416382253,
"eval_accuracy": 0.770068,
"eval_loss": 0.887133777141571,
"eval_runtime": 25.3772,
"eval_samples_per_second": 9851.358,
"eval_steps_per_second": 19.269,
"step": 15000
},
{
"epoch": 1.820250284414107,
"grad_norm": 1.1719928979873657,
"learning_rate": 0.0007276473584467834,
"loss": 0.863,
"step": 16000
},
{
"epoch": 1.9340159271899886,
"grad_norm": 0.9330978393554688,
"learning_rate": 0.0007143966683828085,
"loss": 0.8548,
"step": 17000
},
{
"epoch": 2.04778156996587,
"grad_norm": 0.6235845685005188,
"learning_rate": 0.0007001899190878672,
"loss": 0.8088,
"step": 18000
},
{
"epoch": 2.161547212741752,
"grad_norm": 0.6521238684654236,
"learning_rate": 0.0006850425416138172,
"loss": 0.7471,
"step": 19000
},
{
"epoch": 2.2753128555176336,
"grad_norm": 0.6797402501106262,
"learning_rate": 0.0006690314994804036,
"loss": 0.7558,
"step": 20000
},
{
"epoch": 2.2753128555176336,
"eval_accuracy": 0.77796,
"eval_loss": 0.8652966022491455,
"eval_runtime": 25.3581,
"eval_samples_per_second": 9858.786,
"eval_steps_per_second": 19.284,
"step": 20000
},
{
"epoch": 2.3890784982935154,
"grad_norm": 0.9936743974685669,
"learning_rate": 0.0006521741835218227,
"loss": 0.7588,
"step": 21000
},
{
"epoch": 2.502844141069397,
"grad_norm": 1.0131540298461914,
"learning_rate": 0.000634556245420806,
"loss": 0.7554,
"step": 22000
},
{
"epoch": 2.616609783845279,
"grad_norm": 0.7528037428855896,
"learning_rate": 0.0006161968213858429,
"loss": 0.7605,
"step": 23000
},
{
"epoch": 2.73037542662116,
"grad_norm": 0.6982817649841309,
"learning_rate": 0.0005971891952805355,
"loss": 0.7584,
"step": 24000
},
{
"epoch": 2.8441410693970424,
"grad_norm": 0.6886234283447266,
"learning_rate": 0.000577554012761257,
"loss": 0.7578,
"step": 25000
},
{
"epoch": 2.8441410693970424,
"eval_accuracy": 0.782604,
"eval_loss": 0.8391695022583008,
"eval_runtime": 25.446,
"eval_samples_per_second": 9824.738,
"eval_steps_per_second": 19.217,
"step": 25000
},
{
"epoch": 2.9579067121729237,
"grad_norm": 0.7857435345649719,
"learning_rate": 0.0005573910397947634,
"loss": 0.7511,
"step": 26000
},
{
"epoch": 3.0716723549488054,
"grad_norm": 0.7772754430770874,
"learning_rate": 0.0005367430674286641,
"loss": 0.664,
"step": 27000
},
{
"epoch": 3.185437997724687,
"grad_norm": 0.8711711168289185,
"learning_rate": 0.0005156524423003168,
"loss": 0.6158,
"step": 28000
},
{
"epoch": 3.299203640500569,
"grad_norm": 0.7758294343948364,
"learning_rate": 0.0004942047213203677,
"loss": 0.621,
"step": 29000
},
{
"epoch": 3.4129692832764507,
"grad_norm": 0.7045032978057861,
"learning_rate": 0.00047246612783901975,
"loss": 0.6249,
"step": 30000
},
{
"epoch": 3.4129692832764507,
"eval_accuracy": 0.784164,
"eval_loss": 0.8584216237068176,
"eval_runtime": 25.4184,
"eval_samples_per_second": 9835.409,
"eval_steps_per_second": 19.238,
"step": 30000
},
{
"epoch": 3.526734926052332,
"grad_norm": 0.7496004104614258,
"learning_rate": 0.00045052583490800234,
"loss": 0.6296,
"step": 31000
},
{
"epoch": 3.640500568828214,
"grad_norm": 1.1859596967697144,
"learning_rate": 0.00042840767358049813,
"loss": 0.6323,
"step": 32000
},
{
"epoch": 3.7542662116040955,
"grad_norm": 0.6882604360580444,
"learning_rate": 0.0004062240257885585,
"loss": 0.6255,
"step": 33000
},
{
"epoch": 3.868031854379977,
"grad_norm": 0.7085530757904053,
"learning_rate": 0.00038399898691194754,
"loss": 0.6267,
"step": 34000
},
{
"epoch": 3.981797497155859,
"grad_norm": 0.7478043437004089,
"learning_rate": 0.00036184548192539,
"loss": 0.6243,
"step": 35000
},
{
"epoch": 3.981797497155859,
"eval_accuracy": 0.7882,
"eval_loss": 0.8410258293151855,
"eval_runtime": 25.3906,
"eval_samples_per_second": 9846.165,
"eval_steps_per_second": 19.259,
"step": 35000
},
{
"epoch": 4.09556313993174,
"grad_norm": 0.9012534022331238,
"learning_rate": 0.0003397875734682125,
"loss": 0.481,
"step": 36000
},
{
"epoch": 4.2093287827076225,
"grad_norm": 0.8275543451309204,
"learning_rate": 0.0003179373373288462,
"loss": 0.4521,
"step": 37000
},
{
"epoch": 4.323094425483504,
"grad_norm": 0.7898097038269043,
"learning_rate": 0.0002963185067428097,
"loss": 0.4555,
"step": 38000
},
{
"epoch": 4.436860068259386,
"grad_norm": 0.853428840637207,
"learning_rate": 0.00027504092655271806,
"loss": 0.4603,
"step": 39000
},
{
"epoch": 4.550625711035267,
"grad_norm": 0.857472836971283,
"learning_rate": 0.0002541277079880658,
"loss": 0.4585,
"step": 40000
},
{
"epoch": 4.550625711035267,
"eval_accuracy": 0.782792,
"eval_loss": 0.9402379989624023,
"eval_runtime": 25.4609,
"eval_samples_per_second": 9818.981,
"eval_steps_per_second": 19.206,
"step": 40000
},
{
"epoch": 4.664391353811149,
"grad_norm": 0.7866658568382263,
"learning_rate": 0.00023368511069100407,
"loss": 0.4596,
"step": 41000
},
{
"epoch": 4.778156996587031,
"grad_norm": 0.8958695530891418,
"learning_rate": 0.0002137353389513713,
"loss": 0.4585,
"step": 42000
},
{
"epoch": 4.891922639362912,
"grad_norm": 0.8633857369422913,
"learning_rate": 0.0001943797571574129,
"loss": 0.4587,
"step": 43000
},
{
"epoch": 5.005688282138794,
"grad_norm": 0.6427882313728333,
"learning_rate": 0.0001756393889071532,
"loss": 0.4445,
"step": 44000
},
{
"epoch": 5.1194539249146755,
"grad_norm": 0.8369711637496948,
"learning_rate": 0.00015760945363407598,
"loss": 0.2934,
"step": 45000
},
{
"epoch": 5.1194539249146755,
"eval_accuracy": 0.775848,
"eval_loss": 1.0586884021759033,
"eval_runtime": 25.6083,
"eval_samples_per_second": 9762.449,
"eval_steps_per_second": 19.095,
"step": 45000
},
{
"epoch": 5.233219567690558,
"grad_norm": 0.8710008859634399,
"learning_rate": 0.00014030953504874178,
"loss": 0.2876,
"step": 46000
},
{
"epoch": 5.346985210466439,
"grad_norm": 1.0742043256759644,
"learning_rate": 0.00012381145329356995,
"loss": 0.286,
"step": 47000
},
{
"epoch": 5.460750853242321,
"grad_norm": 1.2184427976608276,
"learning_rate": 0.00010818135193410835,
"loss": 0.2893,
"step": 48000
},
{
"epoch": 5.5745164960182025,
"grad_norm": 1.0682432651519775,
"learning_rate": 9.343620803524462e-05,
"loss": 0.2883,
"step": 49000
},
{
"epoch": 5.688282138794084,
"grad_norm": 0.9291980862617493,
"learning_rate": 7.965094137603242e-05,
"loss": 0.2869,
"step": 50000
},
{
"epoch": 5.688282138794084,
"eval_accuracy": 0.774452,
"eval_loss": 1.1279938220977783,
"eval_runtime": 25.7253,
"eval_samples_per_second": 9718.054,
"eval_steps_per_second": 19.009,
"step": 50000
},
{
"epoch": 5.802047781569966,
"grad_norm": 0.9417611360549927,
"learning_rate": 6.684052520336171e-05,
"loss": 0.2845,
"step": 51000
},
{
"epoch": 5.915813424345847,
"grad_norm": 0.8925788998603821,
"learning_rate": 5.507004898369141e-05,
"loss": 0.2845,
"step": 52000
},
{
"epoch": 6.0295790671217295,
"grad_norm": 0.6515630483627319,
"learning_rate": 4.435229754380843e-05,
"loss": 0.2576,
"step": 53000
},
{
"epoch": 6.143344709897611,
"grad_norm": 0.6938067674636841,
"learning_rate": 3.474172756282932e-05,
"loss": 0.187,
"step": 54000
},
{
"epoch": 6.257110352673493,
"grad_norm": 0.7021601796150208,
"learning_rate": 2.6248777825917638e-05,
"loss": 0.1837,
"step": 55000
},
{
"epoch": 6.257110352673493,
"eval_accuracy": 0.768376,
"eval_loss": 1.2402174472808838,
"eval_runtime": 25.1613,
"eval_samples_per_second": 9935.898,
"eval_steps_per_second": 19.435,
"step": 55000
},
{
"epoch": 6.370875995449374,
"grad_norm": 1.0282700061798096,
"learning_rate": 1.891660083956399e-05,
"loss": 0.1852,
"step": 56000
},
{
"epoch": 6.484641638225256,
"grad_norm": 0.7588028311729431,
"learning_rate": 1.2753160649656704e-05,
"loss": 0.1838,
"step": 57000
},
{
"epoch": 6.598407281001138,
"grad_norm": 0.9116990566253662,
"learning_rate": 7.789773571614944e-06,
"loss": 0.1857,
"step": 58000
},
{
"epoch": 6.712172923777019,
"grad_norm": 0.8666180372238159,
"learning_rate": 4.031830724955432e-06,
"loss": 0.1855,
"step": 59000
},
{
"epoch": 6.825938566552901,
"grad_norm": 0.7389401793479919,
"learning_rate": 1.4984261415641243e-06,
"loss": 0.1858,
"step": 60000
},
{
"epoch": 6.825938566552901,
"eval_accuracy": 0.767868,
"eval_loss": 1.2570157051086426,
"eval_runtime": 25.8837,
"eval_samples_per_second": 9658.593,
"eval_steps_per_second": 18.892,
"step": 60000
},
{
"epoch": 6.939704209328783,
"grad_norm": 0.871012806892395,
"learning_rate": 1.9231154857375366e-07,
"loss": 0.1816,
"step": 61000
},
{
"epoch": 7.0,
"step": 61530,
"total_flos": 4.160918550096e+18,
"train_loss": 0.6576268009338925,
"train_runtime": 10956.031,
"train_samples_per_second": 2875.129,
"train_steps_per_second": 5.616
}
],
"logging_steps": 1000,
"max_steps": 61530,
"num_input_tokens_seen": 0,
"num_train_epochs": 7,
"save_steps": 5000,
"total_flos": 4.160918550096e+18,
"train_batch_size": 512,
"trial_name": null,
"trial_params": null
}