MattyB95's picture
End of training
0ffb411
{
"best_metric": 0.050547029823064804,
"best_model_checkpoint": "W:/res/Transformers/VIT-ConstantQ-Synthetic-Voice-Detection\\checkpoint-6346",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 9519,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16,
"learning_rate": 4.7373673705221136e-05,
"loss": 0.2039,
"step": 500
},
{
"epoch": 0.32,
"learning_rate": 4.4747347410442276e-05,
"loss": 0.0976,
"step": 1000
},
{
"epoch": 0.47,
"learning_rate": 4.212102111566341e-05,
"loss": 0.0627,
"step": 1500
},
{
"epoch": 0.63,
"learning_rate": 3.949469482088455e-05,
"loss": 0.046,
"step": 2000
},
{
"epoch": 0.79,
"learning_rate": 3.686836852610568e-05,
"loss": 0.0461,
"step": 2500
},
{
"epoch": 0.95,
"learning_rate": 3.424204223132682e-05,
"loss": 0.0383,
"step": 3000
},
{
"epoch": 1.0,
"eval_accuracy": 0.9752857832877153,
"eval_f1": 0.9864141257689073,
"eval_loss": 0.11920787394046783,
"eval_precision": 0.9734474626604943,
"eval_recall": 0.9997308934337998,
"eval_runtime": 647.2762,
"eval_samples_per_second": 38.382,
"eval_steps_per_second": 4.799,
"step": 3173
},
{
"epoch": 1.1,
"learning_rate": 3.1615715936547956e-05,
"loss": 0.0195,
"step": 3500
},
{
"epoch": 1.26,
"learning_rate": 2.8989389641769092e-05,
"loss": 0.0133,
"step": 4000
},
{
"epoch": 1.42,
"learning_rate": 2.636306334699023e-05,
"loss": 0.0132,
"step": 4500
},
{
"epoch": 1.58,
"learning_rate": 2.373673705221137e-05,
"loss": 0.0147,
"step": 5000
},
{
"epoch": 1.73,
"learning_rate": 2.1110410757432505e-05,
"loss": 0.0178,
"step": 5500
},
{
"epoch": 1.89,
"learning_rate": 1.8484084462653642e-05,
"loss": 0.0158,
"step": 6000
},
{
"epoch": 2.0,
"eval_accuracy": 0.9888101754950893,
"eval_f1": 0.993782429772768,
"eval_loss": 0.050547029823064804,
"eval_precision": 0.9911224125624554,
"eval_recall": 0.9964567635450305,
"eval_runtime": 652.5276,
"eval_samples_per_second": 38.073,
"eval_steps_per_second": 4.76,
"step": 6346
},
{
"epoch": 2.05,
"learning_rate": 1.585775816787478e-05,
"loss": 0.0103,
"step": 6500
},
{
"epoch": 2.21,
"learning_rate": 1.3231431873095915e-05,
"loss": 0.0034,
"step": 7000
},
{
"epoch": 2.36,
"learning_rate": 1.060510557831705e-05,
"loss": 0.0007,
"step": 7500
},
{
"epoch": 2.52,
"learning_rate": 7.978779283538187e-06,
"loss": 0.0017,
"step": 8000
},
{
"epoch": 2.68,
"learning_rate": 5.352452988759324e-06,
"loss": 0.0001,
"step": 8500
},
{
"epoch": 2.84,
"learning_rate": 2.7261266939804603e-06,
"loss": 0.0005,
"step": 9000
},
{
"epoch": 2.99,
"learning_rate": 9.98003992015968e-08,
"loss": 0.0021,
"step": 9500
},
{
"epoch": 3.0,
"eval_accuracy": 0.9849058122685558,
"eval_f1": 0.9916590671500701,
"eval_loss": 0.10420931875705719,
"eval_precision": 0.9836297048051891,
"eval_recall": 0.9998205956225332,
"eval_runtime": 641.7351,
"eval_samples_per_second": 38.714,
"eval_steps_per_second": 4.84,
"step": 9519
},
{
"epoch": 3.0,
"step": 9519,
"total_flos": 5.900240089255035e+18,
"train_loss": 0.03190612774814858,
"train_runtime": 4007.5201,
"train_samples_per_second": 18.999,
"train_steps_per_second": 2.375
}
],
"logging_steps": 500,
"max_steps": 9519,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 5.900240089255035e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}