|
{
|
|
"best_metric": 8.153910529519171,
|
|
"best_model_checkpoint": "../Whisper-squeezeformer-v6\\checkpoint-30000",
|
|
"epoch": 12.0,
|
|
"eval_steps": 2500,
|
|
"global_step": 30000,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 1.0,
|
|
"grad_norm": 12.83377456665039,
|
|
"learning_rate": 9.972e-06,
|
|
"loss": 4.7944,
|
|
"step": 2500
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_loss": 3.786200523376465,
|
|
"eval_runtime": 867.735,
|
|
"eval_samples_per_second": 3.019,
|
|
"eval_steps_per_second": 0.378,
|
|
"eval_wer": 136.5585057821059,
|
|
"step": 2500
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"grad_norm": 14.687960624694824,
|
|
"learning_rate": 8.575428571428573e-06,
|
|
"loss": 3.8012,
|
|
"step": 5000
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_loss": 2.992347478866577,
|
|
"eval_runtime": 676.1432,
|
|
"eval_samples_per_second": 3.875,
|
|
"eval_steps_per_second": 0.485,
|
|
"eval_wer": 107.02982349360926,
|
|
"step": 5000
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"grad_norm": 8.66879653930664,
|
|
"learning_rate": 7.146857142857143e-06,
|
|
"loss": 0.7205,
|
|
"step": 7500
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_loss": 0.2924005389213562,
|
|
"eval_runtime": 675.8874,
|
|
"eval_samples_per_second": 3.876,
|
|
"eval_steps_per_second": 0.485,
|
|
"eval_wer": 17.247413268411442,
|
|
"step": 7500
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"grad_norm": 5.629953861236572,
|
|
"learning_rate": 5.718285714285715e-06,
|
|
"loss": 0.1473,
|
|
"step": 10000
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_loss": 0.23985154926776886,
|
|
"eval_runtime": 594.3096,
|
|
"eval_samples_per_second": 4.408,
|
|
"eval_steps_per_second": 0.552,
|
|
"eval_wer": 13.827601947656726,
|
|
"step": 10000
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"grad_norm": 5.151638031005859,
|
|
"learning_rate": 4.289714285714286e-06,
|
|
"loss": 0.0847,
|
|
"step": 12500
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"eval_loss": 0.22282332181930542,
|
|
"eval_runtime": 585.3758,
|
|
"eval_samples_per_second": 4.476,
|
|
"eval_steps_per_second": 0.56,
|
|
"eval_wer": 11.767726719415702,
|
|
"step": 12500
|
|
},
|
|
{
|
|
"epoch": 6.0,
|
|
"grad_norm": 3.3535683155059814,
|
|
"learning_rate": 2.861142857142857e-06,
|
|
"loss": 0.0505,
|
|
"step": 15000
|
|
},
|
|
{
|
|
"epoch": 6.0,
|
|
"eval_loss": 0.2200464904308319,
|
|
"eval_runtime": 645.3171,
|
|
"eval_samples_per_second": 4.06,
|
|
"eval_steps_per_second": 0.508,
|
|
"eval_wer": 12.20709068776628,
|
|
"step": 15000
|
|
},
|
|
{
|
|
"epoch": 7.0,
|
|
"grad_norm": 6.204869747161865,
|
|
"learning_rate": 4.548727272727273e-06,
|
|
"loss": 0.1802,
|
|
"step": 17500
|
|
},
|
|
{
|
|
"epoch": 7.0,
|
|
"eval_loss": 0.17816144227981567,
|
|
"eval_runtime": 603.0644,
|
|
"eval_samples_per_second": 4.344,
|
|
"eval_steps_per_second": 0.544,
|
|
"eval_wer": 9.755401704199635,
|
|
"step": 17500
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"grad_norm": 4.365288734436035,
|
|
"learning_rate": 3.6400000000000003e-06,
|
|
"loss": 0.107,
|
|
"step": 20000
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"eval_loss": 0.1680830866098404,
|
|
"eval_runtime": 599.7782,
|
|
"eval_samples_per_second": 4.368,
|
|
"eval_steps_per_second": 0.547,
|
|
"eval_wer": 9.196211199026171,
|
|
"step": 20000
|
|
},
|
|
{
|
|
"epoch": 9.0,
|
|
"grad_norm": 3.9741170406341553,
|
|
"learning_rate": 2.7309090909090914e-06,
|
|
"loss": 0.0693,
|
|
"step": 22500
|
|
},
|
|
{
|
|
"epoch": 9.0,
|
|
"eval_loss": 0.1669154316186905,
|
|
"eval_runtime": 646.0215,
|
|
"eval_samples_per_second": 4.056,
|
|
"eval_steps_per_second": 0.508,
|
|
"eval_wer": 8.220480827754107,
|
|
"step": 22500
|
|
},
|
|
{
|
|
"epoch": 10.0,
|
|
"grad_norm": 3.504108428955078,
|
|
"learning_rate": 1.8221818181818182e-06,
|
|
"loss": 0.0462,
|
|
"step": 25000
|
|
},
|
|
{
|
|
"epoch": 10.0,
|
|
"eval_loss": 0.16997282207012177,
|
|
"eval_runtime": 581.1384,
|
|
"eval_samples_per_second": 4.508,
|
|
"eval_steps_per_second": 0.564,
|
|
"eval_wer": 8.9603621424224,
|
|
"step": 25000
|
|
},
|
|
{
|
|
"epoch": 11.0,
|
|
"grad_norm": 3.334559679031372,
|
|
"learning_rate": 9.130909090909091e-07,
|
|
"loss": 0.032,
|
|
"step": 27500
|
|
},
|
|
{
|
|
"epoch": 11.0,
|
|
"eval_loss": 0.17454640567302704,
|
|
"eval_runtime": 579.2297,
|
|
"eval_samples_per_second": 4.523,
|
|
"eval_steps_per_second": 0.566,
|
|
"eval_wer": 8.595176506390748,
|
|
"step": 27500
|
|
},
|
|
{
|
|
"epoch": 12.0,
|
|
"grad_norm": 2.283846616744995,
|
|
"learning_rate": 4.363636363636364e-09,
|
|
"loss": 0.024,
|
|
"step": 30000
|
|
},
|
|
{
|
|
"epoch": 12.0,
|
|
"eval_loss": 0.17751750349998474,
|
|
"eval_runtime": 574.699,
|
|
"eval_samples_per_second": 4.559,
|
|
"eval_steps_per_second": 0.571,
|
|
"eval_wer": 8.153910529519171,
|
|
"step": 30000
|
|
},
|
|
{
|
|
"epoch": 12.0,
|
|
"step": 30000,
|
|
"total_flos": 2.07995682816e+20,
|
|
"train_loss": 0.03824057401021322,
|
|
"train_runtime": 72024.4779,
|
|
"train_samples_per_second": 8.331,
|
|
"train_steps_per_second": 0.417
|
|
}
|
|
],
|
|
"logging_steps": 2500,
|
|
"max_steps": 30000,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 12,
|
|
"save_steps": 2500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 2.07995682816e+20,
|
|
"train_batch_size": 20,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|