|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 49.99830220713073, |
|
"global_step": 7350, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 3.7125e-06, |
|
"loss": 20.4357, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 7.425e-06, |
|
"loss": 11.928, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.1174999999999999e-05, |
|
"loss": 8.0523, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 1.4925e-05, |
|
"loss": 6.5593, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 1.8675e-05, |
|
"loss": 5.3155, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"eval_loss": 4.558210849761963, |
|
"eval_runtime": 61.1578, |
|
"eval_samples_per_second": 34.256, |
|
"eval_steps_per_second": 4.284, |
|
"eval_wer": 1.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 2.2424999999999996e-05, |
|
"loss": 4.3699, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 2.6174999999999996e-05, |
|
"loss": 3.7571, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 5.44, |
|
"learning_rate": 2.9925e-05, |
|
"loss": 3.5182, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 6.12, |
|
"learning_rate": 3.3675e-05, |
|
"loss": 3.4537, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"learning_rate": 3.7424999999999995e-05, |
|
"loss": 3.3369, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"eval_loss": 3.426917791366577, |
|
"eval_runtime": 62.6973, |
|
"eval_samples_per_second": 33.415, |
|
"eval_steps_per_second": 4.179, |
|
"eval_wer": 1.0, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 7.48, |
|
"learning_rate": 4.1175e-05, |
|
"loss": 3.2812, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 8.16, |
|
"learning_rate": 4.4924999999999994e-05, |
|
"loss": 3.2389, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 8.84, |
|
"learning_rate": 4.8675e-05, |
|
"loss": 3.1102, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 9.52, |
|
"learning_rate": 5.2424999999999994e-05, |
|
"loss": 2.7161, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 10.2, |
|
"learning_rate": 5.6175e-05, |
|
"loss": 2.1785, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 10.2, |
|
"eval_loss": 1.7190676927566528, |
|
"eval_runtime": 62.8007, |
|
"eval_samples_per_second": 33.36, |
|
"eval_steps_per_second": 4.172, |
|
"eval_wer": 0.8831024930747923, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 10.88, |
|
"learning_rate": 5.9925e-05, |
|
"loss": 1.9322, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 11.56, |
|
"learning_rate": 6.367499999999999e-05, |
|
"loss": 1.7738, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 12.24, |
|
"learning_rate": 6.7425e-05, |
|
"loss": 1.6853, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 12.92, |
|
"learning_rate": 7.1175e-05, |
|
"loss": 1.6283, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 13.6, |
|
"learning_rate": 7.492499999999999e-05, |
|
"loss": 1.579, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 13.6, |
|
"eval_loss": 1.3603801727294922, |
|
"eval_runtime": 61.857, |
|
"eval_samples_per_second": 33.868, |
|
"eval_steps_per_second": 4.236, |
|
"eval_wer": 0.7647276084949215, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 14.29, |
|
"learning_rate": 7.362616822429906e-05, |
|
"loss": 1.5102, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 14.96, |
|
"learning_rate": 7.222429906542056e-05, |
|
"loss": 1.478, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 15.65, |
|
"learning_rate": 7.082242990654205e-05, |
|
"loss": 1.472, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 16.33, |
|
"learning_rate": 6.942056074766355e-05, |
|
"loss": 1.4333, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 17.01, |
|
"learning_rate": 6.801869158878504e-05, |
|
"loss": 1.3773, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 17.01, |
|
"eval_loss": 1.2737088203430176, |
|
"eval_runtime": 62.7361, |
|
"eval_samples_per_second": 33.394, |
|
"eval_steps_per_second": 4.176, |
|
"eval_wer": 0.7519236688211758, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 17.69, |
|
"learning_rate": 6.661682242990654e-05, |
|
"loss": 1.3823, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 18.37, |
|
"learning_rate": 6.52429906542056e-05, |
|
"loss": 1.3638, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 19.05, |
|
"learning_rate": 6.384112149532709e-05, |
|
"loss": 1.3431, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 19.73, |
|
"learning_rate": 6.243925233644859e-05, |
|
"loss": 1.3223, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 20.41, |
|
"learning_rate": 6.103738317757008e-05, |
|
"loss": 1.3165, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 20.41, |
|
"eval_loss": 1.245711326599121, |
|
"eval_runtime": 62.7019, |
|
"eval_samples_per_second": 33.412, |
|
"eval_steps_per_second": 4.179, |
|
"eval_wer": 0.740104647583872, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 21.09, |
|
"learning_rate": 5.9635514018691585e-05, |
|
"loss": 1.292, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 21.77, |
|
"learning_rate": 5.823364485981308e-05, |
|
"loss": 1.2607, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 22.45, |
|
"learning_rate": 5.6831775700934575e-05, |
|
"loss": 1.2767, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 23.13, |
|
"learning_rate": 5.542990654205607e-05, |
|
"loss": 1.2518, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 23.81, |
|
"learning_rate": 5.4028037383177566e-05, |
|
"loss": 1.2274, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 23.81, |
|
"eval_loss": 1.3616658449172974, |
|
"eval_runtime": 63.5478, |
|
"eval_samples_per_second": 32.967, |
|
"eval_steps_per_second": 4.123, |
|
"eval_wer": 0.7300707910126193, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 24.49, |
|
"learning_rate": 5.262616822429906e-05, |
|
"loss": 1.1919, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 25.17, |
|
"learning_rate": 5.1224299065420557e-05, |
|
"loss": 1.2453, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 25.85, |
|
"learning_rate": 4.982242990654205e-05, |
|
"loss": 1.2109, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 26.53, |
|
"learning_rate": 4.842056074766355e-05, |
|
"loss": 1.1759, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 27.21, |
|
"learning_rate": 4.701869158878504e-05, |
|
"loss": 1.1787, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 27.21, |
|
"eval_loss": 1.2067540884017944, |
|
"eval_runtime": 62.6504, |
|
"eval_samples_per_second": 33.44, |
|
"eval_steps_per_second": 4.182, |
|
"eval_wer": 0.7009541397353032, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 27.89, |
|
"learning_rate": 4.561682242990654e-05, |
|
"loss": 1.1783, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 28.57, |
|
"learning_rate": 4.421495327102803e-05, |
|
"loss": 1.1709, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 29.25, |
|
"learning_rate": 4.281308411214953e-05, |
|
"loss": 1.1346, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 29.93, |
|
"learning_rate": 4.1411214953271024e-05, |
|
"loss": 1.1589, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 30.61, |
|
"learning_rate": 4.000934579439252e-05, |
|
"loss": 1.1467, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 30.61, |
|
"eval_loss": 1.241578459739685, |
|
"eval_runtime": 62.8423, |
|
"eval_samples_per_second": 33.337, |
|
"eval_steps_per_second": 4.169, |
|
"eval_wer": 0.6946137273007079, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 31.29, |
|
"learning_rate": 3.8621495327102795e-05, |
|
"loss": 1.127, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 31.97, |
|
"learning_rate": 3.72196261682243e-05, |
|
"loss": 1.1027, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 32.65, |
|
"learning_rate": 3.581775700934579e-05, |
|
"loss": 1.125, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 3.4429906542056075e-05, |
|
"loss": 1.1193, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 34.01, |
|
"learning_rate": 3.302803738317757e-05, |
|
"loss": 1.0801, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 34.01, |
|
"eval_loss": 1.2311749458312988, |
|
"eval_runtime": 61.8112, |
|
"eval_samples_per_second": 33.894, |
|
"eval_steps_per_second": 4.239, |
|
"eval_wer": 0.6990458602646968, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 34.69, |
|
"learning_rate": 3.1626168224299066e-05, |
|
"loss": 1.0891, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 35.37, |
|
"learning_rate": 3.0224299065420558e-05, |
|
"loss": 1.0841, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 36.05, |
|
"learning_rate": 2.8822429906542053e-05, |
|
"loss": 1.0679, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 36.73, |
|
"learning_rate": 2.742056074766355e-05, |
|
"loss": 1.0555, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 37.41, |
|
"learning_rate": 2.6018691588785044e-05, |
|
"loss": 1.0709, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 37.41, |
|
"eval_loss": 1.2984226942062378, |
|
"eval_runtime": 62.056, |
|
"eval_samples_per_second": 33.76, |
|
"eval_steps_per_second": 4.222, |
|
"eval_wer": 0.7137580794090489, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 38.1, |
|
"learning_rate": 2.461682242990654e-05, |
|
"loss": 1.051, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 38.77, |
|
"learning_rate": 2.3214953271028034e-05, |
|
"loss": 1.0309, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 39.46, |
|
"learning_rate": 2.181308411214953e-05, |
|
"loss": 1.0343, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 40.14, |
|
"learning_rate": 2.0411214953271025e-05, |
|
"loss": 1.0456, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 40.81, |
|
"learning_rate": 1.900934579439252e-05, |
|
"loss": 1.0307, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 40.81, |
|
"eval_loss": 1.2048739194869995, |
|
"eval_runtime": 63.482, |
|
"eval_samples_per_second": 33.001, |
|
"eval_steps_per_second": 4.127, |
|
"eval_wer": 0.6871037242228378, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 41.5, |
|
"learning_rate": 1.760747663551402e-05, |
|
"loss": 1.0092, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 42.18, |
|
"learning_rate": 1.6205607476635514e-05, |
|
"loss": 1.0258, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 42.86, |
|
"learning_rate": 1.4803738317757008e-05, |
|
"loss": 1.0129, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 43.54, |
|
"learning_rate": 1.3401869158878503e-05, |
|
"loss": 1.0044, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 44.22, |
|
"learning_rate": 1.1999999999999999e-05, |
|
"loss": 1.0003, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 44.22, |
|
"eval_loss": 1.1955562829971313, |
|
"eval_runtime": 63.8, |
|
"eval_samples_per_second": 32.837, |
|
"eval_steps_per_second": 4.107, |
|
"eval_wer": 0.6841489689135118, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 44.9, |
|
"learning_rate": 1.0598130841121494e-05, |
|
"loss": 1.0068, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 45.58, |
|
"learning_rate": 9.19626168224299e-06, |
|
"loss": 1.0071, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 46.26, |
|
"learning_rate": 7.794392523364485e-06, |
|
"loss": 0.9813, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 46.94, |
|
"learning_rate": 6.392523364485981e-06, |
|
"loss": 0.9888, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 47.62, |
|
"learning_rate": 4.990654205607477e-06, |
|
"loss": 1.004, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 47.62, |
|
"eval_loss": 1.2101075649261475, |
|
"eval_runtime": 62.342, |
|
"eval_samples_per_second": 33.605, |
|
"eval_steps_per_second": 4.203, |
|
"eval_wer": 0.6792859341335795, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 48.3, |
|
"learning_rate": 3.5887850467289714e-06, |
|
"loss": 0.9865, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 48.98, |
|
"learning_rate": 2.186915887850467e-06, |
|
"loss": 0.976, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 49.66, |
|
"learning_rate": 7.850467289719626e-07, |
|
"loss": 1.0041, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"step": 7350, |
|
"total_flos": 2.9231564377949348e+19, |
|
"train_loss": 2.1161629922049388, |
|
"train_runtime": 13636.3379, |
|
"train_samples_per_second": 17.274, |
|
"train_steps_per_second": 0.539 |
|
} |
|
], |
|
"max_steps": 7350, |
|
"num_train_epochs": 50, |
|
"total_flos": 2.9231564377949348e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|