|
{ |
|
"best_metric": 0.8704663212435233, |
|
"best_model_checkpoint": "./vit-focal-skin/checkpoint-3130", |
|
"epoch": 6.0, |
|
"global_step": 3756, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019733759318423856, |
|
"loss": 0.325, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0001946751863684771, |
|
"loss": 0.2897, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00019201277955271565, |
|
"loss": 0.2508, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00018935037273695422, |
|
"loss": 0.2626, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00018668796592119277, |
|
"loss": 0.2308, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00018402555910543132, |
|
"loss": 0.2437, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00018136315228966987, |
|
"loss": 0.2594, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0001787007454739084, |
|
"loss": 0.2355, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.000176038338658147, |
|
"loss": 0.2457, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0001733759318423855, |
|
"loss": 0.2136, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00017071352502662408, |
|
"loss": 0.2622, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00016805111821086263, |
|
"loss": 0.1966, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8290155440414507, |
|
"eval_f1": 0.8306733167328754, |
|
"eval_loss": 0.3647349774837494, |
|
"eval_precision": 0.8430511449105799, |
|
"eval_recall": 0.8290155440414507, |
|
"eval_runtime": 1.4278, |
|
"eval_samples_per_second": 135.17, |
|
"eval_steps_per_second": 17.509, |
|
"step": 626 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00016538871139510118, |
|
"loss": 0.1998, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00016272630457933972, |
|
"loss": 0.1312, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0001600638977635783, |
|
"loss": 0.1477, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00015740149094781684, |
|
"loss": 0.1155, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.0001547390841320554, |
|
"loss": 0.1591, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00015207667731629394, |
|
"loss": 0.1568, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00014941427050053249, |
|
"loss": 0.1322, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00014675186368477103, |
|
"loss": 0.1478, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00014408945686900958, |
|
"loss": 0.1145, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00014142705005324815, |
|
"loss": 0.1483, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.0001387646432374867, |
|
"loss": 0.1517, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00013610223642172525, |
|
"loss": 0.1294, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.0001334398296059638, |
|
"loss": 0.1434, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8238341968911918, |
|
"eval_f1": 0.825913518485432, |
|
"eval_loss": 0.38836923241615295, |
|
"eval_precision": 0.8418087741274269, |
|
"eval_recall": 0.8238341968911918, |
|
"eval_runtime": 1.4008, |
|
"eval_samples_per_second": 137.782, |
|
"eval_steps_per_second": 17.847, |
|
"step": 1252 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 0.00013077742279020234, |
|
"loss": 0.0846, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 0.00012811501597444092, |
|
"loss": 0.0635, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.00012545260915867944, |
|
"loss": 0.0466, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 0.000122790202342918, |
|
"loss": 0.0757, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.00012012779552715656, |
|
"loss": 0.0996, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 0.00011746538871139509, |
|
"loss": 0.089, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 0.00011480298189563365, |
|
"loss": 0.0679, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 0.00011214057507987221, |
|
"loss": 0.0747, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 0.00010947816826411077, |
|
"loss": 0.066, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 0.0001068157614483493, |
|
"loss": 0.0714, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 0.00010415335463258787, |
|
"loss": 0.0776, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 0.00010149094781682643, |
|
"loss": 0.058, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8186528497409327, |
|
"eval_f1": 0.8136687258538028, |
|
"eval_loss": 0.5063741207122803, |
|
"eval_precision": 0.8183198316271921, |
|
"eval_recall": 0.8186528497409327, |
|
"eval_runtime": 1.4832, |
|
"eval_samples_per_second": 130.125, |
|
"eval_steps_per_second": 16.856, |
|
"step": 1878 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 9.882854100106496e-05, |
|
"loss": 0.0361, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 9.616613418530351e-05, |
|
"loss": 0.0305, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 9.350372736954207e-05, |
|
"loss": 0.0224, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 9.084132055378062e-05, |
|
"loss": 0.0186, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 8.817891373801918e-05, |
|
"loss": 0.0138, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 8.556975505857295e-05, |
|
"loss": 0.0344, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 8.29073482428115e-05, |
|
"loss": 0.0158, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 8.029818956336529e-05, |
|
"loss": 0.0557, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 7.763578274760383e-05, |
|
"loss": 0.0409, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 7.49733759318424e-05, |
|
"loss": 0.0374, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 7.231096911608094e-05, |
|
"loss": 0.0301, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 6.96485623003195e-05, |
|
"loss": 0.02, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 6.698615548455805e-05, |
|
"loss": 0.02, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8393782383419689, |
|
"eval_f1": 0.8431497010182372, |
|
"eval_loss": 0.5476517081260681, |
|
"eval_precision": 0.8537832767554047, |
|
"eval_recall": 0.8393782383419689, |
|
"eval_runtime": 1.5001, |
|
"eval_samples_per_second": 128.658, |
|
"eval_steps_per_second": 16.666, |
|
"step": 2504 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 6.43237486687966e-05, |
|
"loss": 0.0096, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 6.166134185303514e-05, |
|
"loss": 0.0025, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 5.8998935037273696e-05, |
|
"loss": 0.0031, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 5.633652822151225e-05, |
|
"loss": 0.0018, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 5.36741214057508e-05, |
|
"loss": 0.0035, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 5.101171458998936e-05, |
|
"loss": 0.0042, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 4.8349307774227905e-05, |
|
"loss": 0.0046, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 4.568690095846646e-05, |
|
"loss": 0.002, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 4.3024494142705005e-05, |
|
"loss": 0.0039, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 4.036208732694356e-05, |
|
"loss": 0.0011, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 3.769968051118211e-05, |
|
"loss": 0.0106, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 3.503727369542067e-05, |
|
"loss": 0.0018, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.8704663212435233, |
|
"eval_f1": 0.8749284792904506, |
|
"eval_loss": 0.48757874965667725, |
|
"eval_precision": 0.8863835430545274, |
|
"eval_recall": 0.8704663212435233, |
|
"eval_runtime": 1.4425, |
|
"eval_samples_per_second": 133.795, |
|
"eval_steps_per_second": 17.331, |
|
"step": 3130 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"learning_rate": 3.2374866879659214e-05, |
|
"loss": 0.0037, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"learning_rate": 2.971246006389776e-05, |
|
"loss": 0.0004, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 5.19, |
|
"learning_rate": 2.7050053248136315e-05, |
|
"loss": 0.0004, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"learning_rate": 2.438764643237487e-05, |
|
"loss": 0.0004, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 5.35, |
|
"learning_rate": 2.172523961661342e-05, |
|
"loss": 0.0003, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 5.43, |
|
"learning_rate": 1.906283280085197e-05, |
|
"loss": 0.0014, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"learning_rate": 1.6400425985090524e-05, |
|
"loss": 0.0004, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 5.59, |
|
"learning_rate": 1.3738019169329076e-05, |
|
"loss": 0.0004, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 1.1075612353567626e-05, |
|
"loss": 0.0014, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"learning_rate": 8.413205537806178e-06, |
|
"loss": 0.0004, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 5.83, |
|
"learning_rate": 5.750798722044729e-06, |
|
"loss": 0.0003, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 5.91, |
|
"learning_rate": 3.08839190628328e-06, |
|
"loss": 0.0003, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 5.99, |
|
"learning_rate": 4.259850905218318e-07, |
|
"loss": 0.0003, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.8704663212435233, |
|
"eval_f1": 0.8761642097552051, |
|
"eval_loss": 0.4871196746826172, |
|
"eval_precision": 0.8862443826726218, |
|
"eval_recall": 0.8704663212435233, |
|
"eval_runtime": 1.5001, |
|
"eval_samples_per_second": 128.657, |
|
"eval_steps_per_second": 16.665, |
|
"step": 3756 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"step": 3756, |
|
"total_flos": 4.65670232933972e+18, |
|
"train_loss": 0.08256206865654514, |
|
"train_runtime": 1281.0434, |
|
"train_samples_per_second": 46.907, |
|
"train_steps_per_second": 2.932 |
|
} |
|
], |
|
"max_steps": 3756, |
|
"num_train_epochs": 6, |
|
"total_flos": 4.65670232933972e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|