ahishamm's picture
Uploaded Model
34aea88
{
"best_metric": 0.0906025692820549,
"best_model_checkpoint": "./vit-base-16-thesis-demo-ISIC-multi-class/checkpoint-200",
"epoch": 4.0,
"eval_steps": 50,
"global_step": 204,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2,
"learning_rate": 0.00019019607843137254,
"loss": 0.9473,
"step": 10
},
{
"epoch": 0.39,
"learning_rate": 0.0001803921568627451,
"loss": 0.6725,
"step": 20
},
{
"epoch": 0.59,
"learning_rate": 0.00017058823529411766,
"loss": 0.6139,
"step": 30
},
{
"epoch": 0.78,
"learning_rate": 0.00016078431372549022,
"loss": 0.5606,
"step": 40
},
{
"epoch": 0.98,
"learning_rate": 0.00015196078431372549,
"loss": 0.575,
"step": 50
},
{
"epoch": 0.98,
"eval_accuracy": 0.8490566037735849,
"eval_f1": 0.8490566037735849,
"eval_loss": 0.4132387042045593,
"eval_precision": 0.8490566037735849,
"eval_recall": 0.8490566037735849,
"eval_runtime": 20.7397,
"eval_samples_per_second": 7.666,
"eval_steps_per_second": 0.964,
"step": 50
},
{
"epoch": 1.18,
"learning_rate": 0.00014215686274509804,
"loss": 0.5265,
"step": 60
},
{
"epoch": 1.37,
"learning_rate": 0.0001323529411764706,
"loss": 0.4456,
"step": 70
},
{
"epoch": 1.57,
"learning_rate": 0.00012254901960784316,
"loss": 0.3044,
"step": 80
},
{
"epoch": 1.76,
"learning_rate": 0.0001127450980392157,
"loss": 0.3829,
"step": 90
},
{
"epoch": 1.96,
"learning_rate": 0.00010294117647058823,
"loss": 0.2771,
"step": 100
},
{
"epoch": 1.96,
"eval_accuracy": 0.9182389937106918,
"eval_f1": 0.9182389937106918,
"eval_loss": 0.23294508457183838,
"eval_precision": 0.9182389937106918,
"eval_recall": 0.9182389937106918,
"eval_runtime": 18.6716,
"eval_samples_per_second": 8.516,
"eval_steps_per_second": 1.071,
"step": 100
},
{
"epoch": 2.16,
"learning_rate": 9.313725490196079e-05,
"loss": 0.2781,
"step": 110
},
{
"epoch": 2.35,
"learning_rate": 8.333333333333334e-05,
"loss": 0.2399,
"step": 120
},
{
"epoch": 2.55,
"learning_rate": 7.352941176470589e-05,
"loss": 0.1721,
"step": 130
},
{
"epoch": 2.75,
"learning_rate": 6.372549019607843e-05,
"loss": 0.1759,
"step": 140
},
{
"epoch": 2.94,
"learning_rate": 5.392156862745098e-05,
"loss": 0.1703,
"step": 150
},
{
"epoch": 2.94,
"eval_accuracy": 0.949685534591195,
"eval_f1": 0.949685534591195,
"eval_loss": 0.18214598298072815,
"eval_precision": 0.949685534591195,
"eval_recall": 0.949685534591195,
"eval_runtime": 19.3259,
"eval_samples_per_second": 8.227,
"eval_steps_per_second": 1.035,
"step": 150
},
{
"epoch": 3.14,
"learning_rate": 4.411764705882353e-05,
"loss": 0.0989,
"step": 160
},
{
"epoch": 3.33,
"learning_rate": 3.431372549019608e-05,
"loss": 0.0797,
"step": 170
},
{
"epoch": 3.53,
"learning_rate": 2.4509803921568626e-05,
"loss": 0.1056,
"step": 180
},
{
"epoch": 3.73,
"learning_rate": 1.4705882352941177e-05,
"loss": 0.0492,
"step": 190
},
{
"epoch": 3.92,
"learning_rate": 4.901960784313726e-06,
"loss": 0.1186,
"step": 200
},
{
"epoch": 3.92,
"eval_accuracy": 0.9748427672955975,
"eval_f1": 0.9748427672955975,
"eval_loss": 0.0906025692820549,
"eval_precision": 0.9748427672955975,
"eval_recall": 0.9748427672955975,
"eval_runtime": 18.0737,
"eval_samples_per_second": 8.797,
"eval_steps_per_second": 1.107,
"step": 200
},
{
"epoch": 4.0,
"step": 204,
"total_flos": 2.5014638443873075e+17,
"train_loss": 0.33582471369528305,
"train_runtime": 621.9278,
"train_samples_per_second": 5.19,
"train_steps_per_second": 0.328
}
],
"logging_steps": 10,
"max_steps": 204,
"num_train_epochs": 4,
"save_steps": 50,
"total_flos": 2.5014638443873075e+17,
"trial_name": null,
"trial_params": null
}