|
{ |
|
"best_metric": 108.23912626940026, |
|
"best_model_checkpoint": "./whisper-small-ha-v2/checkpoint-1000", |
|
"epoch": 12.738853503184714, |
|
"eval_steps": 500, |
|
"global_step": 2000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1592356687898089, |
|
"grad_norm": 19.899324417114258, |
|
"learning_rate": 0.00021, |
|
"loss": 4.2616, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3184713375796178, |
|
"grad_norm": 14.790164947509766, |
|
"learning_rate": 0.00046, |
|
"loss": 2.8345, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.47770700636942676, |
|
"grad_norm": 56.03984451293945, |
|
"learning_rate": 0.0005, |
|
"loss": 3.4138, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6369426751592356, |
|
"grad_norm": 10.28797721862793, |
|
"learning_rate": 0.0005, |
|
"loss": 3.5843, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7961783439490446, |
|
"grad_norm": 8.962028503417969, |
|
"learning_rate": 0.0005, |
|
"loss": 3.5061, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.9554140127388535, |
|
"grad_norm": 7.9880499839782715, |
|
"learning_rate": 0.0005, |
|
"loss": 3.4731, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.1146496815286624, |
|
"grad_norm": 8.54383659362793, |
|
"learning_rate": 0.0005, |
|
"loss": 2.8313, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.2738853503184713, |
|
"grad_norm": 8.352940559387207, |
|
"learning_rate": 0.0005, |
|
"loss": 2.7241, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.4331210191082802, |
|
"grad_norm": 10.003473281860352, |
|
"learning_rate": 0.0005, |
|
"loss": 2.8259, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.5923566878980893, |
|
"grad_norm": 7.971159934997559, |
|
"learning_rate": 0.0005, |
|
"loss": 2.865, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.7515923566878981, |
|
"grad_norm": 7.780789375305176, |
|
"learning_rate": 0.0005, |
|
"loss": 2.813, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.910828025477707, |
|
"grad_norm": 7.645856857299805, |
|
"learning_rate": 0.0005, |
|
"loss": 2.911, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.070063694267516, |
|
"grad_norm": 7.892724514007568, |
|
"learning_rate": 0.0005, |
|
"loss": 2.4582, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.229299363057325, |
|
"grad_norm": 7.8490681648254395, |
|
"learning_rate": 0.0005, |
|
"loss": 1.9971, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.388535031847134, |
|
"grad_norm": 8.876250267028809, |
|
"learning_rate": 0.0005, |
|
"loss": 2.1415, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.5477707006369426, |
|
"grad_norm": 9.139019966125488, |
|
"learning_rate": 0.0005, |
|
"loss": 2.2764, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.7070063694267517, |
|
"grad_norm": 9.341145515441895, |
|
"learning_rate": 0.0005, |
|
"loss": 2.3182, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 2.8662420382165603, |
|
"grad_norm": 7.993480682373047, |
|
"learning_rate": 0.0005, |
|
"loss": 2.3467, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.0254777070063694, |
|
"grad_norm": 7.322677135467529, |
|
"learning_rate": 0.0005, |
|
"loss": 2.1176, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 3.1847133757961785, |
|
"grad_norm": 7.923095226287842, |
|
"learning_rate": 0.0005, |
|
"loss": 1.5509, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.1847133757961785, |
|
"eval_loss": 3.9122631549835205, |
|
"eval_runtime": 241.8174, |
|
"eval_samples_per_second": 2.729, |
|
"eval_steps_per_second": 0.174, |
|
"eval_wer": 134.7576164016095, |
|
"eval_wer_ortho": 136.11328125, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.343949044585987, |
|
"grad_norm": 7.589674472808838, |
|
"learning_rate": 0.0005, |
|
"loss": 1.6755, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 3.5031847133757963, |
|
"grad_norm": 7.347910404205322, |
|
"learning_rate": 0.0005, |
|
"loss": 1.7794, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 3.662420382165605, |
|
"grad_norm": 7.744343280792236, |
|
"learning_rate": 0.0005, |
|
"loss": 1.8251, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 3.821656050955414, |
|
"grad_norm": 7.262452125549316, |
|
"learning_rate": 0.0005, |
|
"loss": 1.8631, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.980891719745223, |
|
"grad_norm": 7.604316711425781, |
|
"learning_rate": 0.0005, |
|
"loss": 1.9072, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 4.140127388535032, |
|
"grad_norm": 6.618544578552246, |
|
"learning_rate": 0.0005, |
|
"loss": 1.3487, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 4.2993630573248405, |
|
"grad_norm": 7.4200239181518555, |
|
"learning_rate": 0.0005, |
|
"loss": 1.347, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 4.45859872611465, |
|
"grad_norm": 7.097377777099609, |
|
"learning_rate": 0.0005, |
|
"loss": 1.4564, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 4.617834394904459, |
|
"grad_norm": 7.271367073059082, |
|
"learning_rate": 0.0005, |
|
"loss": 1.5359, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 4.777070063694268, |
|
"grad_norm": 7.738811492919922, |
|
"learning_rate": 0.0005, |
|
"loss": 1.564, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 4.936305732484076, |
|
"grad_norm": 7.1541361808776855, |
|
"learning_rate": 0.0005, |
|
"loss": 1.6548, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 5.095541401273885, |
|
"grad_norm": 6.448052883148193, |
|
"learning_rate": 0.0005, |
|
"loss": 1.3242, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 5.254777070063694, |
|
"grad_norm": 6.672939300537109, |
|
"learning_rate": 0.0005, |
|
"loss": 1.2112, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 5.414012738853503, |
|
"grad_norm": 6.929521083831787, |
|
"learning_rate": 0.0005, |
|
"loss": 1.246, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 5.573248407643312, |
|
"grad_norm": 7.8832197189331055, |
|
"learning_rate": 0.0005, |
|
"loss": 1.3477, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 5.732484076433121, |
|
"grad_norm": 6.741547107696533, |
|
"learning_rate": 0.0005, |
|
"loss": 1.3434, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 5.89171974522293, |
|
"grad_norm": 6.631527423858643, |
|
"learning_rate": 0.0005, |
|
"loss": 1.4936, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 6.050955414012739, |
|
"grad_norm": 6.012191295623779, |
|
"learning_rate": 0.0005, |
|
"loss": 1.3185, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 6.210191082802548, |
|
"grad_norm": 6.420320987701416, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0981, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 6.369426751592357, |
|
"grad_norm": 6.100344181060791, |
|
"learning_rate": 0.0005, |
|
"loss": 1.1254, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 6.369426751592357, |
|
"eval_loss": 4.613819599151611, |
|
"eval_runtime": 232.7229, |
|
"eval_samples_per_second": 2.836, |
|
"eval_steps_per_second": 0.18, |
|
"eval_wer": 108.23912626940026, |
|
"eval_wer_ortho": 104.8828125, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 6.528662420382165, |
|
"grad_norm": 7.62544584274292, |
|
"learning_rate": 0.0005, |
|
"loss": 1.2221, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 6.687898089171974, |
|
"grad_norm": 6.837728977203369, |
|
"learning_rate": 0.0005, |
|
"loss": 1.2167, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 6.8471337579617835, |
|
"grad_norm": 7.663355827331543, |
|
"learning_rate": 0.0005, |
|
"loss": 1.2218, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 7.006369426751593, |
|
"grad_norm": 6.275888919830322, |
|
"learning_rate": 0.0005, |
|
"loss": 1.2511, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 7.165605095541402, |
|
"grad_norm": 5.376118183135986, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0101, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 7.32484076433121, |
|
"grad_norm": 12.314349174499512, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0479, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 7.484076433121019, |
|
"grad_norm": 6.386078834533691, |
|
"learning_rate": 0.0005, |
|
"loss": 1.075, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 7.643312101910828, |
|
"grad_norm": 6.5437912940979, |
|
"learning_rate": 0.0005, |
|
"loss": 1.1115, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 7.802547770700637, |
|
"grad_norm": 6.102975368499756, |
|
"learning_rate": 0.0005, |
|
"loss": 1.1557, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 7.961783439490446, |
|
"grad_norm": 5.722137451171875, |
|
"learning_rate": 0.0005, |
|
"loss": 1.1659, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 8.121019108280255, |
|
"grad_norm": 6.168821811676025, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0219, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 8.280254777070065, |
|
"grad_norm": 5.716577053070068, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9961, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 8.439490445859873, |
|
"grad_norm": 5.789382457733154, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0159, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 8.598726114649681, |
|
"grad_norm": 6.052881717681885, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0636, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 8.757961783439491, |
|
"grad_norm": 5.608780384063721, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0547, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 8.9171974522293, |
|
"grad_norm": 6.0210747718811035, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0525, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 9.07643312101911, |
|
"grad_norm": 5.303377151489258, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9966, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 9.235668789808917, |
|
"grad_norm": 5.898992538452148, |
|
"learning_rate": 0.0005, |
|
"loss": 0.913, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 9.394904458598726, |
|
"grad_norm": 5.273637771606445, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9564, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 9.554140127388536, |
|
"grad_norm": 5.149616718292236, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9898, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 9.554140127388536, |
|
"eval_loss": 5.00140905380249, |
|
"eval_runtime": 243.3495, |
|
"eval_samples_per_second": 2.712, |
|
"eval_steps_per_second": 0.173, |
|
"eval_wer": 123.87430542249473, |
|
"eval_wer_ortho": 125.99609375, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 9.713375796178344, |
|
"grad_norm": 5.37247371673584, |
|
"learning_rate": 0.0005, |
|
"loss": 1.043, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 9.872611464968152, |
|
"grad_norm": 5.714049816131592, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0139, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 10.031847133757962, |
|
"grad_norm": 5.447414398193359, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9852, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 10.19108280254777, |
|
"grad_norm": 4.800140380859375, |
|
"learning_rate": 0.0005, |
|
"loss": 0.8618, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 10.35031847133758, |
|
"grad_norm": 4.940687656402588, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9099, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 10.509554140127388, |
|
"grad_norm": 5.976550579071045, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9144, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 10.668789808917197, |
|
"grad_norm": 5.915425777435303, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9963, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 10.828025477707007, |
|
"grad_norm": 6.186098575592041, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9809, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 10.987261146496815, |
|
"grad_norm": 6.102351665496826, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9992, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 11.146496815286625, |
|
"grad_norm": 5.012561798095703, |
|
"learning_rate": 0.0005, |
|
"loss": 0.8408, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 11.305732484076433, |
|
"grad_norm": 4.676059246063232, |
|
"learning_rate": 0.0005, |
|
"loss": 0.838, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 11.464968152866241, |
|
"grad_norm": 4.5027689933776855, |
|
"learning_rate": 0.0005, |
|
"loss": 0.8774, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 11.624203821656051, |
|
"grad_norm": 5.583326816558838, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9229, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 11.78343949044586, |
|
"grad_norm": 5.260093688964844, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9436, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 11.94267515923567, |
|
"grad_norm": 4.777396202087402, |
|
"learning_rate": 0.0005, |
|
"loss": 0.949, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 12.101910828025478, |
|
"grad_norm": 4.361011028289795, |
|
"learning_rate": 0.0005, |
|
"loss": 0.8302, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 12.261146496815286, |
|
"grad_norm": 4.748295783996582, |
|
"learning_rate": 0.0005, |
|
"loss": 0.843, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 12.420382165605096, |
|
"grad_norm": 5.090404510498047, |
|
"learning_rate": 0.0005, |
|
"loss": 0.8528, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 12.579617834394904, |
|
"grad_norm": 5.081445217132568, |
|
"learning_rate": 0.0005, |
|
"loss": 0.8623, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 12.738853503184714, |
|
"grad_norm": 5.345047950744629, |
|
"learning_rate": 0.0005, |
|
"loss": 0.892, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 12.738853503184714, |
|
"eval_loss": 4.75683069229126, |
|
"eval_runtime": 241.3599, |
|
"eval_samples_per_second": 2.735, |
|
"eval_steps_per_second": 0.174, |
|
"eval_wer": 114.79210576738839, |
|
"eval_wer_ortho": 118.00781250000001, |
|
"step": 2000 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 2355, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 15, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.22088071102464e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|