|
{ |
|
"best_metric": 44.35982865302237, |
|
"best_model_checkpoint": "whisper-sk-small-augmented/checkpoint-5000", |
|
"epoch": 71.42857142857143, |
|
"global_step": 5000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.6e-06, |
|
"loss": 3.0357, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 9.600000000000001e-06, |
|
"loss": 1.2337, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 1.4599999999999999e-05, |
|
"loss": 0.7656, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 1.9600000000000002e-05, |
|
"loss": 0.4719, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 2.46e-05, |
|
"loss": 0.3019, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 2.96e-05, |
|
"loss": 0.2215, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 3.46e-05, |
|
"loss": 0.1645, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 5.71, |
|
"learning_rate": 3.960000000000001e-05, |
|
"loss": 0.0987, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 6.43, |
|
"learning_rate": 4.46e-05, |
|
"loss": 0.0914, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 7.14, |
|
"learning_rate": 4.96e-05, |
|
"loss": 0.0885, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 7.86, |
|
"learning_rate": 4.948888888888889e-05, |
|
"loss": 0.0722, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 8.57, |
|
"learning_rate": 4.8933333333333335e-05, |
|
"loss": 0.059, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 9.29, |
|
"learning_rate": 4.837777777777778e-05, |
|
"loss": 0.0532, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 4.782222222222222e-05, |
|
"loss": 0.0464, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 10.71, |
|
"learning_rate": 4.726666666666667e-05, |
|
"loss": 0.0373, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 11.43, |
|
"learning_rate": 4.671111111111111e-05, |
|
"loss": 0.0333, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 12.14, |
|
"learning_rate": 4.615555555555556e-05, |
|
"loss": 0.0278, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 12.86, |
|
"learning_rate": 4.5600000000000004e-05, |
|
"loss": 0.0228, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 13.57, |
|
"learning_rate": 4.504444444444445e-05, |
|
"loss": 0.0268, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 14.29, |
|
"learning_rate": 4.448888888888889e-05, |
|
"loss": 0.0232, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 14.29, |
|
"eval_loss": 0.7425360679626465, |
|
"eval_runtime": 568.891, |
|
"eval_samples_per_second": 3.985, |
|
"eval_steps_per_second": 0.125, |
|
"eval_wer": 51.880057115659206, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 4.3933333333333335e-05, |
|
"loss": 0.0208, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 15.71, |
|
"learning_rate": 4.337777777777778e-05, |
|
"loss": 0.0201, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 16.43, |
|
"learning_rate": 4.282222222222222e-05, |
|
"loss": 0.0173, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 17.14, |
|
"learning_rate": 4.226666666666667e-05, |
|
"loss": 0.0192, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 17.86, |
|
"learning_rate": 4.171111111111111e-05, |
|
"loss": 0.0181, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 18.57, |
|
"learning_rate": 4.115555555555556e-05, |
|
"loss": 0.0138, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 19.29, |
|
"learning_rate": 4.0600000000000004e-05, |
|
"loss": 0.014, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 4.004444444444445e-05, |
|
"loss": 0.0125, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 20.71, |
|
"learning_rate": 3.948888888888889e-05, |
|
"loss": 0.0097, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 21.43, |
|
"learning_rate": 3.8933333333333336e-05, |
|
"loss": 0.0122, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 22.14, |
|
"learning_rate": 3.837777777777778e-05, |
|
"loss": 0.0109, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 22.86, |
|
"learning_rate": 3.782222222222222e-05, |
|
"loss": 0.0094, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 23.57, |
|
"learning_rate": 3.726666666666667e-05, |
|
"loss": 0.009, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 24.29, |
|
"learning_rate": 3.671111111111111e-05, |
|
"loss": 0.007, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 3.615555555555556e-05, |
|
"loss": 0.008, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 25.71, |
|
"learning_rate": 3.56e-05, |
|
"loss": 0.008, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 26.43, |
|
"learning_rate": 3.504444444444445e-05, |
|
"loss": 0.0082, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 27.14, |
|
"learning_rate": 3.448888888888889e-05, |
|
"loss": 0.0077, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 27.86, |
|
"learning_rate": 3.3933333333333336e-05, |
|
"loss": 0.0064, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 28.57, |
|
"learning_rate": 3.337777777777778e-05, |
|
"loss": 0.0083, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 28.57, |
|
"eval_loss": 0.7697533369064331, |
|
"eval_runtime": 560.6254, |
|
"eval_samples_per_second": 4.044, |
|
"eval_steps_per_second": 0.127, |
|
"eval_wer": 48.48881485007139, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 29.29, |
|
"learning_rate": 3.2822222222222223e-05, |
|
"loss": 0.0057, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 3.226666666666667e-05, |
|
"loss": 0.0052, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 30.71, |
|
"learning_rate": 3.171111111111111e-05, |
|
"loss": 0.0055, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 31.43, |
|
"learning_rate": 3.1155555555555555e-05, |
|
"loss": 0.0028, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 32.14, |
|
"learning_rate": 3.06e-05, |
|
"loss": 0.003, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 32.86, |
|
"learning_rate": 3.004444444444445e-05, |
|
"loss": 0.0027, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 33.57, |
|
"learning_rate": 2.948888888888889e-05, |
|
"loss": 0.0023, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 34.29, |
|
"learning_rate": 2.8933333333333333e-05, |
|
"loss": 0.0024, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 2.837777777777778e-05, |
|
"loss": 0.0018, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 35.71, |
|
"learning_rate": 2.782222222222222e-05, |
|
"loss": 0.001, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 36.43, |
|
"learning_rate": 2.7266666666666668e-05, |
|
"loss": 0.0014, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 37.14, |
|
"learning_rate": 2.6711111111111115e-05, |
|
"loss": 0.0015, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 37.86, |
|
"learning_rate": 2.6155555555555555e-05, |
|
"loss": 0.001, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 38.57, |
|
"learning_rate": 2.5600000000000002e-05, |
|
"loss": 0.0009, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 39.29, |
|
"learning_rate": 2.504444444444445e-05, |
|
"loss": 0.0015, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 2.448888888888889e-05, |
|
"loss": 0.0013, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 40.71, |
|
"learning_rate": 2.3933333333333337e-05, |
|
"loss": 0.0007, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 41.43, |
|
"learning_rate": 2.337777777777778e-05, |
|
"loss": 0.0003, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 42.14, |
|
"learning_rate": 2.282222222222222e-05, |
|
"loss": 0.0004, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 42.86, |
|
"learning_rate": 2.2266666666666668e-05, |
|
"loss": 0.0006, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 42.86, |
|
"eval_loss": 0.7640286684036255, |
|
"eval_runtime": 528.4764, |
|
"eval_samples_per_second": 4.29, |
|
"eval_steps_per_second": 0.134, |
|
"eval_wer": 47.596382674916704, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 43.57, |
|
"learning_rate": 2.1711111111111112e-05, |
|
"loss": 0.0009, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 44.29, |
|
"learning_rate": 2.1155555555555556e-05, |
|
"loss": 0.0008, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 2.06e-05, |
|
"loss": 0.0012, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 45.71, |
|
"learning_rate": 2.0044444444444446e-05, |
|
"loss": 0.0007, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 46.43, |
|
"learning_rate": 1.948888888888889e-05, |
|
"loss": 0.0008, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 47.14, |
|
"learning_rate": 1.8933333333333334e-05, |
|
"loss": 0.0004, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 47.86, |
|
"learning_rate": 1.837777777777778e-05, |
|
"loss": 0.0008, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 48.57, |
|
"learning_rate": 1.7822222222222225e-05, |
|
"loss": 0.0003, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 49.29, |
|
"learning_rate": 1.726666666666667e-05, |
|
"loss": 0.0007, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 1.6711111111111112e-05, |
|
"loss": 0.001, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 50.71, |
|
"learning_rate": 1.6155555555555556e-05, |
|
"loss": 0.0004, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 51.43, |
|
"learning_rate": 1.56e-05, |
|
"loss": 0.0004, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 52.14, |
|
"learning_rate": 1.5044444444444445e-05, |
|
"loss": 0.0004, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 52.86, |
|
"learning_rate": 1.448888888888889e-05, |
|
"loss": 0.0002, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 53.57, |
|
"learning_rate": 1.3933333333333334e-05, |
|
"loss": 0.0007, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 54.29, |
|
"learning_rate": 1.3377777777777778e-05, |
|
"loss": 0.0006, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 1.2822222222222222e-05, |
|
"loss": 0.0005, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 55.71, |
|
"learning_rate": 1.2266666666666667e-05, |
|
"loss": 0.0002, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 56.43, |
|
"learning_rate": 1.1711111111111111e-05, |
|
"loss": 0.0002, |
|
"step": 3950 |
|
}, |
|
{ |
|
"epoch": 57.14, |
|
"learning_rate": 1.1155555555555556e-05, |
|
"loss": 0.0005, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 57.14, |
|
"eval_loss": 0.7649062275886536, |
|
"eval_runtime": 488.6276, |
|
"eval_samples_per_second": 4.64, |
|
"eval_steps_per_second": 0.145, |
|
"eval_wer": 44.89528795811518, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 57.86, |
|
"learning_rate": 1.06e-05, |
|
"loss": 0.0002, |
|
"step": 4050 |
|
}, |
|
{ |
|
"epoch": 58.57, |
|
"learning_rate": 1.0044444444444446e-05, |
|
"loss": 0.0003, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 59.29, |
|
"learning_rate": 9.48888888888889e-06, |
|
"loss": 0.0005, |
|
"step": 4150 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 8.933333333333333e-06, |
|
"loss": 0.0005, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 60.71, |
|
"learning_rate": 8.377777777777779e-06, |
|
"loss": 0.0002, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 61.43, |
|
"learning_rate": 7.822222222222222e-06, |
|
"loss": 0.0003, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 62.14, |
|
"learning_rate": 7.266666666666668e-06, |
|
"loss": 0.001, |
|
"step": 4350 |
|
}, |
|
{ |
|
"epoch": 62.86, |
|
"learning_rate": 6.711111111111111e-06, |
|
"loss": 0.0002, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 63.57, |
|
"learning_rate": 6.155555555555556e-06, |
|
"loss": 0.0003, |
|
"step": 4450 |
|
}, |
|
{ |
|
"epoch": 64.29, |
|
"learning_rate": 5.600000000000001e-06, |
|
"loss": 0.0004, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 5.044444444444444e-06, |
|
"loss": 0.0003, |
|
"step": 4550 |
|
}, |
|
{ |
|
"epoch": 65.71, |
|
"learning_rate": 4.488888888888889e-06, |
|
"loss": 0.0003, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 66.43, |
|
"learning_rate": 3.9333333333333335e-06, |
|
"loss": 0.0002, |
|
"step": 4650 |
|
}, |
|
{ |
|
"epoch": 67.14, |
|
"learning_rate": 3.3777777777777777e-06, |
|
"loss": 0.0002, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 67.86, |
|
"learning_rate": 2.8222222222222223e-06, |
|
"loss": 0.0003, |
|
"step": 4750 |
|
}, |
|
{ |
|
"epoch": 68.57, |
|
"learning_rate": 2.266666666666667e-06, |
|
"loss": 0.0002, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 69.29, |
|
"learning_rate": 1.7111111111111112e-06, |
|
"loss": 0.0003, |
|
"step": 4850 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 1.1555555555555556e-06, |
|
"loss": 0.0004, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 70.71, |
|
"learning_rate": 6.000000000000001e-07, |
|
"loss": 0.0002, |
|
"step": 4950 |
|
}, |
|
{ |
|
"epoch": 71.43, |
|
"learning_rate": 4.444444444444445e-08, |
|
"loss": 0.0002, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 71.43, |
|
"eval_loss": 0.7440354824066162, |
|
"eval_runtime": 495.5118, |
|
"eval_samples_per_second": 4.575, |
|
"eval_steps_per_second": 0.143, |
|
"eval_wer": 44.35982865302237, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 71.43, |
|
"step": 5000, |
|
"total_flos": 9.230634897555456e+19, |
|
"train_loss": 0.07175957694733516, |
|
"train_runtime": 15470.3261, |
|
"train_samples_per_second": 20.685, |
|
"train_steps_per_second": 0.323 |
|
} |
|
], |
|
"max_steps": 5000, |
|
"num_train_epochs": 72, |
|
"total_flos": 9.230634897555456e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|