vintage-lavender619's picture
End of training
a266363 verified
{
"best_metric": 0.9365918097754293,
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-eurosat/checkpoint-213",
"epoch": 9.68421052631579,
"eval_steps": 500,
"global_step": 230,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.42105263157894735,
"grad_norm": 8.034697532653809,
"learning_rate": 2.173913043478261e-05,
"loss": 0.3573,
"step": 10
},
{
"epoch": 0.8421052631578947,
"grad_norm": 7.977205753326416,
"learning_rate": 4.347826086956522e-05,
"loss": 0.3125,
"step": 20
},
{
"epoch": 0.968421052631579,
"eval_accuracy": 0.8731836195508587,
"eval_loss": 0.3340586721897125,
"eval_runtime": 7.3502,
"eval_samples_per_second": 102.991,
"eval_steps_per_second": 3.265,
"step": 23
},
{
"epoch": 1.263157894736842,
"grad_norm": 9.418652534484863,
"learning_rate": 4.830917874396135e-05,
"loss": 0.2766,
"step": 30
},
{
"epoch": 1.6842105263157894,
"grad_norm": 11.136240005493164,
"learning_rate": 4.589371980676328e-05,
"loss": 0.2977,
"step": 40
},
{
"epoch": 1.9789473684210526,
"eval_accuracy": 0.9062087186261559,
"eval_loss": 0.2942771017551422,
"eval_runtime": 7.3815,
"eval_samples_per_second": 102.554,
"eval_steps_per_second": 3.251,
"step": 47
},
{
"epoch": 2.1052631578947367,
"grad_norm": 8.324960708618164,
"learning_rate": 4.347826086956522e-05,
"loss": 0.2961,
"step": 50
},
{
"epoch": 2.526315789473684,
"grad_norm": 7.641033172607422,
"learning_rate": 4.106280193236715e-05,
"loss": 0.2195,
"step": 60
},
{
"epoch": 2.9473684210526314,
"grad_norm": 8.356009483337402,
"learning_rate": 3.864734299516908e-05,
"loss": 0.2677,
"step": 70
},
{
"epoch": 2.9894736842105263,
"eval_accuracy": 0.916776750330251,
"eval_loss": 0.2374182939529419,
"eval_runtime": 10.1855,
"eval_samples_per_second": 74.322,
"eval_steps_per_second": 2.356,
"step": 71
},
{
"epoch": 3.3684210526315788,
"grad_norm": 7.1681694984436035,
"learning_rate": 3.6231884057971014e-05,
"loss": 0.2403,
"step": 80
},
{
"epoch": 3.7894736842105265,
"grad_norm": 4.950290203094482,
"learning_rate": 3.381642512077295e-05,
"loss": 0.2483,
"step": 90
},
{
"epoch": 4.0,
"eval_accuracy": 0.9207397622192867,
"eval_loss": 0.22298040986061096,
"eval_runtime": 7.1036,
"eval_samples_per_second": 106.565,
"eval_steps_per_second": 3.379,
"step": 95
},
{
"epoch": 4.2105263157894735,
"grad_norm": 6.089962959289551,
"learning_rate": 3.140096618357488e-05,
"loss": 0.2446,
"step": 100
},
{
"epoch": 4.631578947368421,
"grad_norm": 5.7862677574157715,
"learning_rate": 2.8985507246376814e-05,
"loss": 0.2331,
"step": 110
},
{
"epoch": 4.968421052631579,
"eval_accuracy": 0.9233817701453104,
"eval_loss": 0.21976187825202942,
"eval_runtime": 7.5299,
"eval_samples_per_second": 100.533,
"eval_steps_per_second": 3.187,
"step": 118
},
{
"epoch": 5.052631578947368,
"grad_norm": 3.7768943309783936,
"learning_rate": 2.6570048309178748e-05,
"loss": 0.22,
"step": 120
},
{
"epoch": 5.473684210526316,
"grad_norm": 7.145321846008301,
"learning_rate": 2.4154589371980676e-05,
"loss": 0.2209,
"step": 130
},
{
"epoch": 5.894736842105263,
"grad_norm": 9.237335205078125,
"learning_rate": 2.173913043478261e-05,
"loss": 0.2315,
"step": 140
},
{
"epoch": 5.978947368421053,
"eval_accuracy": 0.9180977542932629,
"eval_loss": 0.2150202840566635,
"eval_runtime": 6.91,
"eval_samples_per_second": 109.551,
"eval_steps_per_second": 3.473,
"step": 142
},
{
"epoch": 6.315789473684211,
"grad_norm": 6.9982428550720215,
"learning_rate": 1.932367149758454e-05,
"loss": 0.1987,
"step": 150
},
{
"epoch": 6.7368421052631575,
"grad_norm": 7.03655481338501,
"learning_rate": 1.6908212560386476e-05,
"loss": 0.2249,
"step": 160
},
{
"epoch": 6.989473684210527,
"eval_accuracy": 0.9233817701453104,
"eval_loss": 0.21770989894866943,
"eval_runtime": 7.2386,
"eval_samples_per_second": 104.578,
"eval_steps_per_second": 3.316,
"step": 166
},
{
"epoch": 7.157894736842105,
"grad_norm": 5.7152581214904785,
"learning_rate": 1.4492753623188407e-05,
"loss": 0.1732,
"step": 170
},
{
"epoch": 7.578947368421053,
"grad_norm": 6.694705009460449,
"learning_rate": 1.2077294685990338e-05,
"loss": 0.2026,
"step": 180
},
{
"epoch": 8.0,
"grad_norm": 5.67457389831543,
"learning_rate": 9.66183574879227e-06,
"loss": 0.1683,
"step": 190
},
{
"epoch": 8.0,
"eval_accuracy": 0.9326287978863936,
"eval_loss": 0.20676808059215546,
"eval_runtime": 7.2386,
"eval_samples_per_second": 104.578,
"eval_steps_per_second": 3.316,
"step": 190
},
{
"epoch": 8.421052631578947,
"grad_norm": 5.295100688934326,
"learning_rate": 7.246376811594203e-06,
"loss": 0.1825,
"step": 200
},
{
"epoch": 8.842105263157894,
"grad_norm": 8.137918472290039,
"learning_rate": 4.830917874396135e-06,
"loss": 0.1725,
"step": 210
},
{
"epoch": 8.968421052631578,
"eval_accuracy": 0.9365918097754293,
"eval_loss": 0.20404939353466034,
"eval_runtime": 7.0704,
"eval_samples_per_second": 107.066,
"eval_steps_per_second": 3.394,
"step": 213
},
{
"epoch": 9.263157894736842,
"grad_norm": 6.243011474609375,
"learning_rate": 2.4154589371980677e-06,
"loss": 0.1653,
"step": 220
},
{
"epoch": 9.68421052631579,
"grad_norm": 5.12047004699707,
"learning_rate": 0.0,
"loss": 0.1789,
"step": 230
},
{
"epoch": 9.68421052631579,
"eval_accuracy": 0.9365918097754293,
"eval_loss": 0.20334188640117645,
"eval_runtime": 7.0261,
"eval_samples_per_second": 107.741,
"eval_steps_per_second": 3.416,
"step": 230
},
{
"epoch": 9.68421052631579,
"step": 230,
"total_flos": 7.494072602457047e+17,
"train_loss": 0.23186731493991355,
"train_runtime": 640.6828,
"train_samples_per_second": 47.262,
"train_steps_per_second": 0.359
}
],
"logging_steps": 10,
"max_steps": 230,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.494072602457047e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}