|
{ |
|
"best_metric": 14.535443909153475, |
|
"best_model_checkpoint": "whisper-small-nl/checkpoint-10000", |
|
"epoch": 14.0372, |
|
"global_step": 10000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 9.980000000000001e-06, |
|
"loss": 0.7181, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 8.891111111111111e-06, |
|
"loss": 0.2045, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"eval_loss": 0.31939101219177246, |
|
"eval_runtime": 852.4844, |
|
"eval_samples_per_second": 5.865, |
|
"eval_steps_per_second": 0.733, |
|
"eval_wer": 16.16284610090529, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 7.78e-06, |
|
"loss": 0.1182, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"learning_rate": 6.668888888888889e-06, |
|
"loss": 0.0652, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"eval_loss": 0.3425401449203491, |
|
"eval_runtime": 853.9079, |
|
"eval_samples_per_second": 5.855, |
|
"eval_steps_per_second": 0.732, |
|
"eval_wer": 16.367197840012707, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"learning_rate": 5.557777777777778e-06, |
|
"loss": 0.0318, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"learning_rate": 4.446666666666667e-06, |
|
"loss": 0.0167, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"eval_loss": 0.3914715647697449, |
|
"eval_runtime": 851.1992, |
|
"eval_samples_per_second": 5.874, |
|
"eval_steps_per_second": 0.734, |
|
"eval_wer": 15.818730478056011, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 8.71, |
|
"learning_rate": 3.335555555555556e-06, |
|
"loss": 0.0096, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 9.95, |
|
"learning_rate": 2.2244444444444447e-06, |
|
"loss": 0.0064, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 9.95, |
|
"eval_loss": 0.41895967721939087, |
|
"eval_runtime": 850.8744, |
|
"eval_samples_per_second": 5.876, |
|
"eval_steps_per_second": 0.735, |
|
"eval_wer": 15.729789824765735, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 5.790526315789474e-06, |
|
"loss": 0.1798, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 5.26421052631579e-06, |
|
"loss": 0.1966, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"eval_loss": 0.32975926995277405, |
|
"eval_runtime": 1015.8008, |
|
"eval_samples_per_second": 4.922, |
|
"eval_steps_per_second": 0.615, |
|
"eval_wer": 15.088146540314467, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 4.737894736842106e-06, |
|
"loss": 0.1605, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 4.211578947368422e-06, |
|
"loss": 0.1912, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"eval_loss": 0.3265514671802521, |
|
"eval_runtime": 1016.6509, |
|
"eval_samples_per_second": 4.918, |
|
"eval_steps_per_second": 0.615, |
|
"eval_wer": 14.876383080099528, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 3.685263157894737e-06, |
|
"loss": 0.1355, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"learning_rate": 3.1600000000000002e-06, |
|
"loss": 0.1008, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"eval_loss": 0.3261025547981262, |
|
"eval_runtime": 1005.7569, |
|
"eval_samples_per_second": 4.971, |
|
"eval_steps_per_second": 0.621, |
|
"eval_wer": 14.808618772830748, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 8.03, |
|
"learning_rate": 2.633684210526316e-06, |
|
"loss": 0.1092, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 9.04, |
|
"learning_rate": 2.1073684210526317e-06, |
|
"loss": 0.0899, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 9.04, |
|
"eval_loss": 0.31958696246147156, |
|
"eval_runtime": 1016.386, |
|
"eval_samples_per_second": 4.919, |
|
"eval_steps_per_second": 0.615, |
|
"eval_wer": 14.648737360368468, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 11.01, |
|
"learning_rate": 1.5810526315789477e-06, |
|
"loss": 0.0683, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 12.02, |
|
"learning_rate": 1.0547368421052632e-06, |
|
"loss": 0.1126, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 12.02, |
|
"eval_loss": 0.3282873332500458, |
|
"eval_runtime": 1020.4433, |
|
"eval_samples_per_second": 4.9, |
|
"eval_steps_per_second": 0.612, |
|
"eval_wer": 14.589443591508285, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 13.03, |
|
"learning_rate": 5.284210526315789e-07, |
|
"loss": 0.0853, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 14.04, |
|
"learning_rate": 2.105263157894737e-09, |
|
"loss": 0.1071, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 14.04, |
|
"eval_loss": 0.30335742235183716, |
|
"eval_runtime": 1026.6502, |
|
"eval_samples_per_second": 4.87, |
|
"eval_steps_per_second": 0.609, |
|
"eval_wer": 14.535443909153475, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 14.04, |
|
"step": 10000, |
|
"total_flos": 3.699734092598477e+20, |
|
"train_loss": 0.0768378963470459, |
|
"train_runtime": 74057.892, |
|
"train_samples_per_second": 17.284, |
|
"train_steps_per_second": 0.135 |
|
} |
|
], |
|
"max_steps": 10000, |
|
"num_train_epochs": 9223372036854775807, |
|
"total_flos": 3.699734092598477e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|