bryanzhou008's picture
End of training
16141c8 verified
raw
history blame
4.26 kB
{
"best_metric": 0.9507407407407408,
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-eurosat/checkpoint-141",
"epoch": 2.968421052631579,
"eval_steps": 500,
"global_step": 141,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.21052631578947367,
"grad_norm": 4.6187968254089355,
"learning_rate": 3.3333333333333335e-05,
"loss": 2.2058,
"step": 10
},
{
"epoch": 0.42105263157894735,
"grad_norm": 13.80441665649414,
"learning_rate": 4.801587301587302e-05,
"loss": 1.4136,
"step": 20
},
{
"epoch": 0.631578947368421,
"grad_norm": 8.810165405273438,
"learning_rate": 4.404761904761905e-05,
"loss": 0.6209,
"step": 30
},
{
"epoch": 0.8421052631578947,
"grad_norm": 9.584736824035645,
"learning_rate": 4.007936507936508e-05,
"loss": 0.4418,
"step": 40
},
{
"epoch": 0.9894736842105263,
"eval_accuracy": 0.9196296296296296,
"eval_loss": 0.24092020094394684,
"eval_runtime": 6.6105,
"eval_samples_per_second": 408.443,
"eval_steps_per_second": 3.328,
"step": 47
},
{
"epoch": 1.0526315789473684,
"grad_norm": 6.858447551727295,
"learning_rate": 3.611111111111111e-05,
"loss": 0.3384,
"step": 50
},
{
"epoch": 1.263157894736842,
"grad_norm": 5.806480884552002,
"learning_rate": 3.2142857142857144e-05,
"loss": 0.3164,
"step": 60
},
{
"epoch": 1.4736842105263157,
"grad_norm": 5.885347366333008,
"learning_rate": 2.8174603174603175e-05,
"loss": 0.267,
"step": 70
},
{
"epoch": 1.6842105263157894,
"grad_norm": 9.434675216674805,
"learning_rate": 2.4206349206349206e-05,
"loss": 0.2542,
"step": 80
},
{
"epoch": 1.8947368421052633,
"grad_norm": 7.690194606781006,
"learning_rate": 2.023809523809524e-05,
"loss": 0.2514,
"step": 90
},
{
"epoch": 2.0,
"eval_accuracy": 0.9462962962962963,
"eval_loss": 0.16330811381340027,
"eval_runtime": 5.834,
"eval_samples_per_second": 462.801,
"eval_steps_per_second": 3.771,
"step": 95
},
{
"epoch": 2.1052631578947367,
"grad_norm": 6.186488628387451,
"learning_rate": 1.626984126984127e-05,
"loss": 0.2324,
"step": 100
},
{
"epoch": 2.3157894736842106,
"grad_norm": 5.900567531585693,
"learning_rate": 1.2301587301587301e-05,
"loss": 0.2235,
"step": 110
},
{
"epoch": 2.526315789473684,
"grad_norm": 6.816904544830322,
"learning_rate": 8.333333333333334e-06,
"loss": 0.2262,
"step": 120
},
{
"epoch": 2.736842105263158,
"grad_norm": 5.449790000915527,
"learning_rate": 4.365079365079365e-06,
"loss": 0.1988,
"step": 130
},
{
"epoch": 2.9473684210526314,
"grad_norm": 7.056036472320557,
"learning_rate": 3.9682539682539683e-07,
"loss": 0.1907,
"step": 140
},
{
"epoch": 2.968421052631579,
"eval_accuracy": 0.9507407407407408,
"eval_loss": 0.14795009791851044,
"eval_runtime": 6.5373,
"eval_samples_per_second": 413.015,
"eval_steps_per_second": 3.365,
"step": 141
},
{
"epoch": 2.968421052631579,
"step": 141,
"total_flos": 1.7938102147100836e+18,
"train_loss": 0.5108092158821458,
"train_runtime": 247.4957,
"train_samples_per_second": 294.551,
"train_steps_per_second": 0.57
}
],
"logging_steps": 10,
"max_steps": 141,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.7938102147100836e+18,
"train_batch_size": 128,
"trial_name": null,
"trial_params": null
}