|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 100.0, |
|
"global_step": 3300, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 1.455e-05, |
|
"loss": 13.8145, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 2.955e-05, |
|
"loss": 4.8042, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 4.454999999999999e-05, |
|
"loss": 3.4947, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 12.12, |
|
"learning_rate": 5.955e-05, |
|
"loss": 3.0225, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 15.15, |
|
"learning_rate": 7.455e-05, |
|
"loss": 2.927, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 15.15, |
|
"eval_loss": 2.919621706008911, |
|
"eval_runtime": 20.0515, |
|
"eval_samples_per_second": 21.295, |
|
"eval_steps_per_second": 1.347, |
|
"eval_wer": 1.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 18.18, |
|
"learning_rate": 7.240178571428571e-05, |
|
"loss": 2.8838, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 21.21, |
|
"learning_rate": 6.972321428571428e-05, |
|
"loss": 2.8617, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 24.24, |
|
"learning_rate": 6.704464285714285e-05, |
|
"loss": 2.8313, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 27.27, |
|
"learning_rate": 6.436607142857142e-05, |
|
"loss": 2.0706, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 30.3, |
|
"learning_rate": 6.16875e-05, |
|
"loss": 1.3835, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 30.3, |
|
"eval_loss": 0.5878978967666626, |
|
"eval_runtime": 20.2624, |
|
"eval_samples_per_second": 21.073, |
|
"eval_steps_per_second": 1.333, |
|
"eval_wer": 0.5866312741312741, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 5.9008928571428565e-05, |
|
"loss": 1.1555, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 36.36, |
|
"learning_rate": 5.633035714285714e-05, |
|
"loss": 0.9935, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 39.39, |
|
"learning_rate": 5.3651785714285706e-05, |
|
"loss": 0.8824, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 42.42, |
|
"learning_rate": 5.0973214285714276e-05, |
|
"loss": 0.8012, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 45.45, |
|
"learning_rate": 4.829464285714285e-05, |
|
"loss": 0.7415, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 45.45, |
|
"eval_loss": 0.30774399638175964, |
|
"eval_runtime": 20.0347, |
|
"eval_samples_per_second": 21.313, |
|
"eval_steps_per_second": 1.348, |
|
"eval_wer": 0.33156370656370654, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 48.48, |
|
"learning_rate": 4.561607142857142e-05, |
|
"loss": 0.6777, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 51.52, |
|
"learning_rate": 4.29375e-05, |
|
"loss": 0.6458, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 54.55, |
|
"learning_rate": 4.025892857142857e-05, |
|
"loss": 0.6332, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 57.58, |
|
"learning_rate": 3.758035714285714e-05, |
|
"loss": 0.5732, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 60.61, |
|
"learning_rate": 3.490178571428571e-05, |
|
"loss": 0.5575, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 60.61, |
|
"eval_loss": 0.2735135853290558, |
|
"eval_runtime": 19.9705, |
|
"eval_samples_per_second": 21.382, |
|
"eval_steps_per_second": 1.352, |
|
"eval_wer": 0.2953667953667954, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 63.64, |
|
"learning_rate": 3.222321428571428e-05, |
|
"loss": 0.5202, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 66.67, |
|
"learning_rate": 2.954464285714285e-05, |
|
"loss": 0.5067, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 69.7, |
|
"learning_rate": 2.6866071428571425e-05, |
|
"loss": 0.4894, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 72.73, |
|
"learning_rate": 2.41875e-05, |
|
"loss": 0.472, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 75.76, |
|
"learning_rate": 2.1508928571428572e-05, |
|
"loss": 0.4581, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 75.76, |
|
"eval_loss": 0.27067553997039795, |
|
"eval_runtime": 19.7341, |
|
"eval_samples_per_second": 21.638, |
|
"eval_steps_per_second": 1.368, |
|
"eval_wer": 0.28016409266409265, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 78.79, |
|
"learning_rate": 1.883035714285714e-05, |
|
"loss": 0.4364, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 81.82, |
|
"learning_rate": 1.6151785714285712e-05, |
|
"loss": 0.4457, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 84.85, |
|
"learning_rate": 1.3473214285714284e-05, |
|
"loss": 0.4142, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 87.88, |
|
"learning_rate": 1.0794642857142856e-05, |
|
"loss": 0.418, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 90.91, |
|
"learning_rate": 8.116071428571428e-06, |
|
"loss": 0.3977, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 90.91, |
|
"eval_loss": 0.2785268723964691, |
|
"eval_runtime": 19.7902, |
|
"eval_samples_per_second": 21.576, |
|
"eval_steps_per_second": 1.364, |
|
"eval_wer": 0.2808880308880309, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 93.94, |
|
"learning_rate": 5.437499999999999e-06, |
|
"loss": 0.3984, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 96.97, |
|
"learning_rate": 2.758928571428571e-06, |
|
"loss": 0.3919, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 8.035714285714285e-08, |
|
"loss": 0.384, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"step": 3300, |
|
"total_flos": 2.0644460727363166e+19, |
|
"train_loss": 1.620846695177483, |
|
"train_runtime": 7377.7284, |
|
"train_samples_per_second": 13.893, |
|
"train_steps_per_second": 0.447 |
|
} |
|
], |
|
"max_steps": 3300, |
|
"num_train_epochs": 100, |
|
"total_flos": 2.0644460727363166e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|