|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 99.99521531100478, |
|
"global_step": 5200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 1.6166666666666665e-07, |
|
"loss": 24.0656, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 3.283333333333333e-07, |
|
"loss": 19.0268, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 5.77, |
|
"learning_rate": 4.95e-07, |
|
"loss": 6.1995, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 7.69, |
|
"learning_rate": 4.901020408163265e-07, |
|
"loss": 3.6484, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 9.61, |
|
"learning_rate": 4.798979591836734e-07, |
|
"loss": 3.5131, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 9.61, |
|
"eval_loss": 3.5294487476348877, |
|
"eval_runtime": 64.5425, |
|
"eval_samples_per_second": 17.229, |
|
"eval_steps_per_second": 4.307, |
|
"eval_wer": 1.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 11.54, |
|
"learning_rate": 4.696938775510204e-07, |
|
"loss": 3.4699, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 13.46, |
|
"learning_rate": 4.5948979591836735e-07, |
|
"loss": 3.3973, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 15.38, |
|
"learning_rate": 4.4928571428571426e-07, |
|
"loss": 3.2876, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 17.31, |
|
"learning_rate": 4.390816326530612e-07, |
|
"loss": 3.0902, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 19.23, |
|
"learning_rate": 4.288775510204081e-07, |
|
"loss": 2.8596, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 19.23, |
|
"eval_loss": 3.5708465576171875, |
|
"eval_runtime": 66.2433, |
|
"eval_samples_per_second": 16.787, |
|
"eval_steps_per_second": 4.197, |
|
"eval_wer": 1.0, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 21.15, |
|
"learning_rate": 4.1867346938775513e-07, |
|
"loss": 2.6397, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 23.08, |
|
"learning_rate": 4.0846938775510203e-07, |
|
"loss": 2.4302, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 3.98265306122449e-07, |
|
"loss": 2.2341, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 26.92, |
|
"learning_rate": 3.880612244897959e-07, |
|
"loss": 2.071, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 28.84, |
|
"learning_rate": 3.778571428571428e-07, |
|
"loss": 1.9055, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 28.84, |
|
"eval_loss": 3.6432793140411377, |
|
"eval_runtime": 65.1639, |
|
"eval_samples_per_second": 17.065, |
|
"eval_steps_per_second": 4.266, |
|
"eval_wer": 1.0007174887892376, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 30.77, |
|
"learning_rate": 3.676530612244898e-07, |
|
"loss": 1.7787, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 32.69, |
|
"learning_rate": 3.574489795918367e-07, |
|
"loss": 1.6655, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 34.61, |
|
"learning_rate": 3.4724489795918366e-07, |
|
"loss": 1.5696, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 36.54, |
|
"learning_rate": 3.3704081632653057e-07, |
|
"loss": 1.4902, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 38.46, |
|
"learning_rate": 3.268367346938775e-07, |
|
"loss": 1.4239, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 38.46, |
|
"eval_loss": 3.6568963527679443, |
|
"eval_runtime": 66.0028, |
|
"eval_samples_per_second": 16.848, |
|
"eval_steps_per_second": 4.212, |
|
"eval_wer": 0.9994618834080717, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 40.38, |
|
"learning_rate": 3.166326530612245e-07, |
|
"loss": 1.3735, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 42.31, |
|
"learning_rate": 3.0642857142857144e-07, |
|
"loss": 1.3228, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 44.23, |
|
"learning_rate": 2.9622448979591834e-07, |
|
"loss": 1.2834, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 46.15, |
|
"learning_rate": 2.860204081632653e-07, |
|
"loss": 1.2438, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 48.08, |
|
"learning_rate": 2.758163265306122e-07, |
|
"loss": 1.2168, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 48.08, |
|
"eval_loss": 3.6079351902008057, |
|
"eval_runtime": 65.3166, |
|
"eval_samples_per_second": 17.025, |
|
"eval_steps_per_second": 4.256, |
|
"eval_wer": 0.995695067264574, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 2.656122448979592e-07, |
|
"loss": 1.1792, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 51.92, |
|
"learning_rate": 2.554081632653061e-07, |
|
"loss": 1.1706, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 53.84, |
|
"learning_rate": 2.4520408163265307e-07, |
|
"loss": 1.1429, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 55.77, |
|
"learning_rate": 2.3499999999999997e-07, |
|
"loss": 1.1318, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 57.69, |
|
"learning_rate": 2.2479591836734693e-07, |
|
"loss": 1.1063, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 57.69, |
|
"eval_loss": 3.5737504959106445, |
|
"eval_runtime": 64.9336, |
|
"eval_samples_per_second": 17.125, |
|
"eval_steps_per_second": 4.281, |
|
"eval_wer": 0.9924663677130044, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 59.61, |
|
"learning_rate": 2.1459183673469386e-07, |
|
"loss": 1.0893, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 61.54, |
|
"learning_rate": 2.0438775510204082e-07, |
|
"loss": 1.0744, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 63.46, |
|
"learning_rate": 1.9418367346938775e-07, |
|
"loss": 1.0591, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 65.38, |
|
"learning_rate": 1.839795918367347e-07, |
|
"loss": 1.05, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 67.31, |
|
"learning_rate": 1.7377551020408163e-07, |
|
"loss": 1.0404, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 67.31, |
|
"eval_loss": 3.4857470989227295, |
|
"eval_runtime": 64.9602, |
|
"eval_samples_per_second": 17.118, |
|
"eval_steps_per_second": 4.28, |
|
"eval_wer": 0.9888789237668162, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 69.23, |
|
"learning_rate": 1.6357142857142856e-07, |
|
"loss": 1.0325, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 71.15, |
|
"learning_rate": 1.5336734693877552e-07, |
|
"loss": 1.0266, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 73.08, |
|
"learning_rate": 1.4316326530612245e-07, |
|
"loss": 1.0164, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 1.329591836734694e-07, |
|
"loss": 1.0002, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 76.92, |
|
"learning_rate": 1.227551020408163e-07, |
|
"loss": 1.001, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 76.92, |
|
"eval_loss": 3.4881510734558105, |
|
"eval_runtime": 64.7811, |
|
"eval_samples_per_second": 17.165, |
|
"eval_steps_per_second": 4.291, |
|
"eval_wer": 0.9858295964125561, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 78.84, |
|
"learning_rate": 1.1255102040816327e-07, |
|
"loss": 0.997, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 80.77, |
|
"learning_rate": 1.0234693877551021e-07, |
|
"loss": 0.9896, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 82.69, |
|
"learning_rate": 9.214285714285714e-08, |
|
"loss": 0.986, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 84.61, |
|
"learning_rate": 8.193877551020407e-08, |
|
"loss": 0.9823, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 86.54, |
|
"learning_rate": 7.173469387755101e-08, |
|
"loss": 0.982, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 86.54, |
|
"eval_loss": 3.3850555419921875, |
|
"eval_runtime": 65.2436, |
|
"eval_samples_per_second": 17.044, |
|
"eval_steps_per_second": 4.261, |
|
"eval_wer": 0.987085201793722, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 88.46, |
|
"learning_rate": 6.153061224489796e-08, |
|
"loss": 0.9675, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 90.38, |
|
"learning_rate": 5.132653061224489e-08, |
|
"loss": 0.9774, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 92.31, |
|
"learning_rate": 4.1122448979591836e-08, |
|
"loss": 0.9687, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 94.23, |
|
"learning_rate": 3.091836734693877e-08, |
|
"loss": 0.9771, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 96.15, |
|
"learning_rate": 2.0714285714285713e-08, |
|
"loss": 0.9612, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 96.15, |
|
"eval_loss": 3.386908531188965, |
|
"eval_runtime": 65.5128, |
|
"eval_samples_per_second": 16.974, |
|
"eval_steps_per_second": 4.243, |
|
"eval_wer": 0.9872645739910314, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 98.08, |
|
"learning_rate": 1.0510204081632651e-08, |
|
"loss": 0.9667, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 3.061224489795918e-10, |
|
"loss": 0.9664, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"step": 5200, |
|
"total_flos": 1.4653197993726655e+20, |
|
"train_loss": 2.424026096050556, |
|
"train_runtime": 30676.4279, |
|
"train_samples_per_second": 10.875, |
|
"train_steps_per_second": 0.17 |
|
} |
|
], |
|
"max_steps": 5200, |
|
"num_train_epochs": 100, |
|
"total_flos": 1.4653197993726655e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|