|
{ |
|
"best_metric": 29.205723913714138, |
|
"best_model_checkpoint": "./hamsa-pretrained/checkpoint-35000", |
|
"epoch": 0.17544536787336215, |
|
"eval_steps": 1000, |
|
"global_step": 35001, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.0500000000000001e-05, |
|
"loss": 4.6001, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.3e-05, |
|
"loss": 2.9857, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.5499999999999996e-05, |
|
"loss": 2.8432, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.7000000000000004e-05, |
|
"loss": 3.0861, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 5.9499999999999996e-05, |
|
"loss": 1.3867, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 7.2e-05, |
|
"loss": 1.307, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 8.450000000000001e-05, |
|
"loss": 1.3456, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.7e-05, |
|
"loss": 1.3625, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0001095, |
|
"loss": 1.4828, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.000122, |
|
"loss": 1.653, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00013450000000000002, |
|
"loss": 1.527, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.000147, |
|
"loss": 1.6017, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0001595, |
|
"loss": 1.7962, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00017199999999999998, |
|
"loss": 1.7863, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0001845, |
|
"loss": 1.751, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00019700000000000002, |
|
"loss": 1.767, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0002095, |
|
"loss": 1.8878, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.000222, |
|
"loss": 2.1027, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00023449999999999998, |
|
"loss": 2.6898, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.000247, |
|
"loss": 2.2361, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0002498623188405797, |
|
"loss": 2.1121, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0002496811594202899, |
|
"loss": 1.9875, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0002495, |
|
"loss": 2.0148, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00024931884057971017, |
|
"loss": 1.8997, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0002491376811594203, |
|
"loss": 1.9364, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00024895652173913046, |
|
"loss": 1.9569, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0002487753623188406, |
|
"loss": 1.9624, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0002485942028985507, |
|
"loss": 1.9502, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0002484130434782609, |
|
"loss": 1.9584, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.000248231884057971, |
|
"loss": 2.0121, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00024805072463768117, |
|
"loss": 1.8849, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00024786956521739134, |
|
"loss": 1.9201, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00024768840579710147, |
|
"loss": 1.9334, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0002475072463768116, |
|
"loss": 1.9636, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00024732608695652176, |
|
"loss": 1.8136, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0002471449275362319, |
|
"loss": 1.8495, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00024696376811594205, |
|
"loss": 1.8316, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0002467826086956522, |
|
"loss": 1.7647, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00024660144927536235, |
|
"loss": 1.7411, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024642028985507247, |
|
"loss": 1.895, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"eval_loss": 1.876536250114441, |
|
"eval_runtime": 5942.4913, |
|
"eval_samples_per_second": 0.842, |
|
"eval_steps_per_second": 0.211, |
|
"eval_wer": 86.70115569899617, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002462391304347826, |
|
"loss": 1.7266, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024605797101449276, |
|
"loss": 1.7269, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024587681159420294, |
|
"loss": 1.8598, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024569565217391306, |
|
"loss": 1.6687, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002455144927536232, |
|
"loss": 1.8505, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024533333333333335, |
|
"loss": 1.7166, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024515217391304347, |
|
"loss": 1.6826, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024497101449275365, |
|
"loss": 1.8421, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024478985507246377, |
|
"loss": 1.7725, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024460869565217394, |
|
"loss": 1.7464, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024442753623188406, |
|
"loss": 1.7435, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002442463768115942, |
|
"loss": 1.7296, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024406521739130435, |
|
"loss": 1.8299, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002438840579710145, |
|
"loss": 1.7485, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024370289855072465, |
|
"loss": 1.7177, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002435217391304348, |
|
"loss": 1.7324, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024334057971014492, |
|
"loss": 1.7653, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024315942028985506, |
|
"loss": 1.7578, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024297826086956524, |
|
"loss": 1.7259, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024279710144927536, |
|
"loss": 1.7141, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002426159420289855, |
|
"loss": 1.7828, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024243478260869568, |
|
"loss": 1.6833, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002422536231884058, |
|
"loss": 1.7581, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024207246376811595, |
|
"loss": 1.6707, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024189130434782607, |
|
"loss": 1.6099, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024171014492753624, |
|
"loss": 1.6632, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002415289855072464, |
|
"loss": 1.7326, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002413478260869565, |
|
"loss": 1.7251, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024116666666666668, |
|
"loss": 1.655, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024098550724637683, |
|
"loss": 1.7368, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024080434782608695, |
|
"loss": 1.7527, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002406231884057971, |
|
"loss": 1.6496, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024044202898550724, |
|
"loss": 1.7439, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002402608695652174, |
|
"loss": 1.6974, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00024007971014492754, |
|
"loss": 1.5964, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023989855072463769, |
|
"loss": 1.5731, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023971739130434783, |
|
"loss": 1.5764, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023953623188405798, |
|
"loss": 1.7154, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002393550724637681, |
|
"loss": 1.7839, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023917391304347828, |
|
"loss": 1.6569, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"eval_loss": 1.5809087753295898, |
|
"eval_runtime": 6451.2844, |
|
"eval_samples_per_second": 0.776, |
|
"eval_steps_per_second": 0.194, |
|
"eval_wer": 84.0907472887349, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002389927536231884, |
|
"loss": 1.6677, |
|
"step": 2025 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023881159420289854, |
|
"loss": 1.6182, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023863043478260872, |
|
"loss": 1.6538, |
|
"step": 2075 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023844927536231884, |
|
"loss": 1.6372, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023826811594202898, |
|
"loss": 1.5046, |
|
"step": 2125 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023808695652173916, |
|
"loss": 1.5537, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023790579710144928, |
|
"loss": 1.6118, |
|
"step": 2175 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023772463768115943, |
|
"loss": 1.5437, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023754347826086957, |
|
"loss": 1.6297, |
|
"step": 2225 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023736231884057972, |
|
"loss": 1.5116, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023718115942028987, |
|
"loss": 1.5196, |
|
"step": 2275 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.000237, |
|
"loss": 1.5372, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023681884057971016, |
|
"loss": 1.5344, |
|
"step": 2325 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002366376811594203, |
|
"loss": 1.5114, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023645652173913043, |
|
"loss": 1.5755, |
|
"step": 2375 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023627536231884058, |
|
"loss": 1.4815, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023609420289855075, |
|
"loss": 1.4123, |
|
"step": 2425 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023591304347826087, |
|
"loss": 1.5506, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023573188405797102, |
|
"loss": 1.5217, |
|
"step": 2475 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023555072463768116, |
|
"loss": 1.5644, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002353695652173913, |
|
"loss": 1.5734, |
|
"step": 2525 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023518840579710146, |
|
"loss": 1.5853, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023500724637681158, |
|
"loss": 1.4974, |
|
"step": 2575 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023482608695652175, |
|
"loss": 1.4737, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002346449275362319, |
|
"loss": 1.4675, |
|
"step": 2625 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023446376811594202, |
|
"loss": 1.5251, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002342826086956522, |
|
"loss": 1.5386, |
|
"step": 2675 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023410144927536232, |
|
"loss": 1.5002, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023392028985507246, |
|
"loss": 1.375, |
|
"step": 2725 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002337391304347826, |
|
"loss": 1.519, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023355797101449276, |
|
"loss": 1.4296, |
|
"step": 2775 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002333768115942029, |
|
"loss": 1.4506, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023319565217391305, |
|
"loss": 1.4481, |
|
"step": 2825 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002330144927536232, |
|
"loss": 1.4097, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00023283333333333335, |
|
"loss": 1.5138, |
|
"step": 2875 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002326521739130435, |
|
"loss": 1.5278, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002324710144927536, |
|
"loss": 1.3968, |
|
"step": 2925 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002322898550724638, |
|
"loss": 1.3648, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002321086956521739, |
|
"loss": 1.4226, |
|
"step": 2975 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00023192753623188405, |
|
"loss": 1.3312, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"eval_loss": 1.3458073139190674, |
|
"eval_runtime": 6200.517, |
|
"eval_samples_per_second": 0.807, |
|
"eval_steps_per_second": 0.202, |
|
"eval_wer": 75.70896319324142, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00023174637681159423, |
|
"loss": 1.3902, |
|
"step": 3025 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00023156521739130435, |
|
"loss": 1.4262, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002313840579710145, |
|
"loss": 1.4074, |
|
"step": 3075 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00023120289855072464, |
|
"loss": 1.4074, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002310217391304348, |
|
"loss": 1.3735, |
|
"step": 3125 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00023084057971014494, |
|
"loss": 1.4296, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00023065942028985506, |
|
"loss": 1.3597, |
|
"step": 3175 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00023047826086956523, |
|
"loss": 1.4012, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00023029710144927538, |
|
"loss": 1.3194, |
|
"step": 3225 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002301159420289855, |
|
"loss": 1.3787, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022993478260869565, |
|
"loss": 1.334, |
|
"step": 3275 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022975362318840582, |
|
"loss": 1.3462, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022957246376811594, |
|
"loss": 1.3108, |
|
"step": 3325 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002293913043478261, |
|
"loss": 1.3052, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022921014492753624, |
|
"loss": 1.3761, |
|
"step": 3375 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022902898550724638, |
|
"loss": 1.2893, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022884782608695653, |
|
"loss": 1.345, |
|
"step": 3425 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022866666666666665, |
|
"loss": 1.2041, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022848550724637682, |
|
"loss": 1.3416, |
|
"step": 3475 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022830434782608697, |
|
"loss": 1.2589, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002281231884057971, |
|
"loss": 1.4042, |
|
"step": 3525 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022794202898550727, |
|
"loss": 1.3195, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022776086956521739, |
|
"loss": 1.2601, |
|
"step": 3575 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022757971014492753, |
|
"loss": 1.3014, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022739855072463768, |
|
"loss": 1.2728, |
|
"step": 3625 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022721739130434783, |
|
"loss": 1.2994, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022703623188405798, |
|
"loss": 1.2771, |
|
"step": 3675 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022685507246376812, |
|
"loss": 1.2256, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022667391304347827, |
|
"loss": 1.4345, |
|
"step": 3725 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022649275362318842, |
|
"loss": 1.293, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022631159420289856, |
|
"loss": 1.2223, |
|
"step": 3775 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022613043478260868, |
|
"loss": 1.2575, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022594927536231886, |
|
"loss": 1.3047, |
|
"step": 3825 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022576811594202898, |
|
"loss": 1.2402, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022558695652173913, |
|
"loss": 1.203, |
|
"step": 3875 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002254057971014493, |
|
"loss": 1.1681, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022522463768115942, |
|
"loss": 1.1496, |
|
"step": 3925 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022504347826086957, |
|
"loss": 1.258, |
|
"step": 3950 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022486231884057971, |
|
"loss": 1.1696, |
|
"step": 3975 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022468115942028986, |
|
"loss": 1.2369, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"eval_loss": 1.2389202117919922, |
|
"eval_runtime": 6171.9451, |
|
"eval_samples_per_second": 0.811, |
|
"eval_steps_per_second": 0.203, |
|
"eval_wer": 73.13652435985667, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002245, |
|
"loss": 1.2727, |
|
"step": 4025 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022431884057971013, |
|
"loss": 1.2076, |
|
"step": 4050 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002241376811594203, |
|
"loss": 1.3128, |
|
"step": 4075 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022395652173913045, |
|
"loss": 1.2745, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022377536231884057, |
|
"loss": 1.2999, |
|
"step": 4125 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022359420289855072, |
|
"loss": 1.3311, |
|
"step": 4150 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002234130434782609, |
|
"loss": 1.252, |
|
"step": 4175 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.000223231884057971, |
|
"loss": 1.2794, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022305072463768116, |
|
"loss": 1.3587, |
|
"step": 4225 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002228695652173913, |
|
"loss": 1.2422, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022268840579710145, |
|
"loss": 1.2734, |
|
"step": 4275 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002225072463768116, |
|
"loss": 1.1935, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022232608695652172, |
|
"loss": 1.1646, |
|
"step": 4325 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002221449275362319, |
|
"loss": 1.2976, |
|
"step": 4350 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022196376811594204, |
|
"loss": 1.2087, |
|
"step": 4375 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022178260869565216, |
|
"loss": 1.2437, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022160144927536234, |
|
"loss": 1.2036, |
|
"step": 4425 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022142028985507248, |
|
"loss": 1.2467, |
|
"step": 4450 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002212391304347826, |
|
"loss": 1.2779, |
|
"step": 4475 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022105797101449275, |
|
"loss": 1.2326, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002208768115942029, |
|
"loss": 1.1391, |
|
"step": 4525 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022069565217391305, |
|
"loss": 1.1322, |
|
"step": 4550 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002205144927536232, |
|
"loss": 1.257, |
|
"step": 4575 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00022033333333333334, |
|
"loss": 1.1633, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002201521739130435, |
|
"loss": 1.174, |
|
"step": 4625 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00021997101449275363, |
|
"loss": 1.0957, |
|
"step": 4650 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00021978985507246375, |
|
"loss": 1.1833, |
|
"step": 4675 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00021960869565217393, |
|
"loss": 1.199, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00021942753623188405, |
|
"loss": 1.2279, |
|
"step": 4725 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002192463768115942, |
|
"loss": 1.1736, |
|
"step": 4750 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00021906521739130437, |
|
"loss": 1.1281, |
|
"step": 4775 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002188840579710145, |
|
"loss": 1.1309, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00021870289855072464, |
|
"loss": 1.2036, |
|
"step": 4825 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002185217391304348, |
|
"loss": 1.1075, |
|
"step": 4850 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00021834057971014493, |
|
"loss": 1.099, |
|
"step": 4875 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00021815942028985508, |
|
"loss": 1.1406, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002179782608695652, |
|
"loss": 1.1683, |
|
"step": 4925 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00021779710144927537, |
|
"loss": 1.1565, |
|
"step": 4950 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00021761594202898552, |
|
"loss": 1.1336, |
|
"step": 4975 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021743478260869564, |
|
"loss": 1.1518, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"eval_loss": 1.1097463369369507, |
|
"eval_runtime": 6186.9062, |
|
"eval_samples_per_second": 0.809, |
|
"eval_steps_per_second": 0.202, |
|
"eval_wer": 66.81696290846959, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021725362318840582, |
|
"loss": 1.0662, |
|
"step": 5025 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021707246376811596, |
|
"loss": 1.0916, |
|
"step": 5050 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021689130434782608, |
|
"loss": 1.1352, |
|
"step": 5075 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021671014492753623, |
|
"loss": 1.2055, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021652898550724638, |
|
"loss": 1.1505, |
|
"step": 5125 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021634782608695652, |
|
"loss": 1.1587, |
|
"step": 5150 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021616666666666667, |
|
"loss": 1.2408, |
|
"step": 5175 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021598550724637682, |
|
"loss": 1.134, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021580434782608697, |
|
"loss": 1.0902, |
|
"step": 5225 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002156231884057971, |
|
"loss": 1.1215, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021544202898550723, |
|
"loss": 1.0944, |
|
"step": 5275 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002152608695652174, |
|
"loss": 1.0846, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021507971014492755, |
|
"loss": 1.1319, |
|
"step": 5325 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021489855072463767, |
|
"loss": 1.0893, |
|
"step": 5350 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021471739130434785, |
|
"loss": 1.1424, |
|
"step": 5375 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021453623188405797, |
|
"loss": 1.0745, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021435507246376812, |
|
"loss": 1.1504, |
|
"step": 5425 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021417391304347826, |
|
"loss": 1.0584, |
|
"step": 5450 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002139927536231884, |
|
"loss": 1.1529, |
|
"step": 5475 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021381159420289856, |
|
"loss": 1.1315, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002136304347826087, |
|
"loss": 1.1588, |
|
"step": 5525 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021344927536231885, |
|
"loss": 1.0836, |
|
"step": 5550 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000213268115942029, |
|
"loss": 1.1677, |
|
"step": 5575 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021308695652173912, |
|
"loss": 1.0709, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021290579710144927, |
|
"loss": 1.1992, |
|
"step": 5625 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021272463768115944, |
|
"loss": 1.129, |
|
"step": 5650 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021254347826086956, |
|
"loss": 1.0316, |
|
"step": 5675 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002123623188405797, |
|
"loss": 1.1119, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021218115942028988, |
|
"loss": 1.0713, |
|
"step": 5725 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000212, |
|
"loss": 1.0471, |
|
"step": 5750 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021181884057971015, |
|
"loss": 1.03, |
|
"step": 5775 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021163768115942027, |
|
"loss": 0.9324, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021145652173913044, |
|
"loss": 1.1022, |
|
"step": 5825 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002112753623188406, |
|
"loss": 1.0703, |
|
"step": 5850 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002110942028985507, |
|
"loss": 0.9873, |
|
"step": 5875 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021091304347826089, |
|
"loss": 1.1181, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021073188405797103, |
|
"loss": 1.0425, |
|
"step": 5925 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021055072463768115, |
|
"loss": 1.1457, |
|
"step": 5950 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002103695652173913, |
|
"loss": 1.1561, |
|
"step": 5975 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00021018840579710148, |
|
"loss": 1.0135, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"eval_loss": 1.0616226196289062, |
|
"eval_runtime": 6268.4143, |
|
"eval_samples_per_second": 0.798, |
|
"eval_steps_per_second": 0.2, |
|
"eval_wer": 65.18427110277891, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002100072463768116, |
|
"loss": 0.9881, |
|
"step": 6025 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020982608695652174, |
|
"loss": 1.0689, |
|
"step": 6050 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002096449275362319, |
|
"loss": 1.0569, |
|
"step": 6075 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020946376811594204, |
|
"loss": 1.1212, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020928260869565218, |
|
"loss": 1.0133, |
|
"step": 6125 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002091014492753623, |
|
"loss": 1.0957, |
|
"step": 6150 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020892028985507248, |
|
"loss": 1.0328, |
|
"step": 6175 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020873913043478263, |
|
"loss": 1.0581, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020855797101449275, |
|
"loss": 0.9603, |
|
"step": 6225 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020837681159420292, |
|
"loss": 1.0873, |
|
"step": 6250 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020819565217391304, |
|
"loss": 0.9497, |
|
"step": 6275 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002080144927536232, |
|
"loss": 0.997, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020783333333333333, |
|
"loss": 1.0641, |
|
"step": 6325 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020765217391304348, |
|
"loss": 1.0911, |
|
"step": 6350 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020747101449275363, |
|
"loss": 1.1041, |
|
"step": 6375 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020728985507246378, |
|
"loss": 1.0997, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020710869565217392, |
|
"loss": 1.0995, |
|
"step": 6425 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020692753623188407, |
|
"loss": 1.0015, |
|
"step": 6450 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002067463768115942, |
|
"loss": 1.0448, |
|
"step": 6475 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020656521739130434, |
|
"loss": 1.0744, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002063840579710145, |
|
"loss": 0.9513, |
|
"step": 6525 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020620289855072463, |
|
"loss": 1.0539, |
|
"step": 6550 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020602173913043478, |
|
"loss": 1.058, |
|
"step": 6575 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020584057971014495, |
|
"loss": 0.9622, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020565942028985507, |
|
"loss": 1.0784, |
|
"step": 6625 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020547826086956522, |
|
"loss": 1.0293, |
|
"step": 6650 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020529710144927534, |
|
"loss": 1.1025, |
|
"step": 6675 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020511594202898552, |
|
"loss": 1.0524, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020493478260869566, |
|
"loss": 1.0089, |
|
"step": 6725 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020475362318840578, |
|
"loss": 1.0969, |
|
"step": 6750 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020457246376811596, |
|
"loss": 0.949, |
|
"step": 6775 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002043913043478261, |
|
"loss": 1.0159, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020421014492753622, |
|
"loss": 0.9893, |
|
"step": 6825 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020402898550724637, |
|
"loss": 1.0183, |
|
"step": 6850 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002038550724637681, |
|
"loss": 1.1104, |
|
"step": 6875 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020367391304347825, |
|
"loss": 0.9909, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020349275362318843, |
|
"loss": 0.9662, |
|
"step": 6925 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00020331159420289855, |
|
"loss": 0.9838, |
|
"step": 6950 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002031304347826087, |
|
"loss": 1.018, |
|
"step": 6975 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00020294927536231887, |
|
"loss": 1.0965, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_loss": 1.008375883102417, |
|
"eval_runtime": 6473.3745, |
|
"eval_samples_per_second": 0.773, |
|
"eval_steps_per_second": 0.193, |
|
"eval_wer": 65.85823109233726, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.000202768115942029, |
|
"loss": 1.0087, |
|
"step": 7025 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00020258695652173914, |
|
"loss": 1.0846, |
|
"step": 7050 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00020240579710144926, |
|
"loss": 1.0778, |
|
"step": 7075 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00020222463768115943, |
|
"loss": 0.9426, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00020204347826086958, |
|
"loss": 0.9758, |
|
"step": 7125 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0002018623188405797, |
|
"loss": 1.0578, |
|
"step": 7150 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00020168115942028987, |
|
"loss": 0.9529, |
|
"step": 7175 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00020150000000000002, |
|
"loss": 1.0048, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00020131884057971014, |
|
"loss": 0.9114, |
|
"step": 7225 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00020113768115942029, |
|
"loss": 1.0504, |
|
"step": 7250 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00020095652173913043, |
|
"loss": 0.9835, |
|
"step": 7275 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00020077536231884058, |
|
"loss": 0.9254, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00020059420289855073, |
|
"loss": 0.9491, |
|
"step": 7325 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00020041304347826088, |
|
"loss": 0.9581, |
|
"step": 7350 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00020023188405797102, |
|
"loss": 0.9441, |
|
"step": 7375 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00020005072463768117, |
|
"loss": 0.9157, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001998695652173913, |
|
"loss": 0.8822, |
|
"step": 7425 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019968840579710146, |
|
"loss": 1.0002, |
|
"step": 7450 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019950724637681158, |
|
"loss": 0.96, |
|
"step": 7475 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019932608695652173, |
|
"loss": 0.9303, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001991449275362319, |
|
"loss": 0.9066, |
|
"step": 7525 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019896376811594203, |
|
"loss": 0.9793, |
|
"step": 7550 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019878260869565217, |
|
"loss": 1.0105, |
|
"step": 7575 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019860144927536235, |
|
"loss": 0.9897, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019842028985507247, |
|
"loss": 0.9949, |
|
"step": 7625 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019823913043478261, |
|
"loss": 0.9493, |
|
"step": 7650 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019805797101449276, |
|
"loss": 0.8889, |
|
"step": 7675 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001978768115942029, |
|
"loss": 0.9088, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019769565217391306, |
|
"loss": 0.9772, |
|
"step": 7725 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019751449275362318, |
|
"loss": 0.9503, |
|
"step": 7750 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019733333333333335, |
|
"loss": 0.8893, |
|
"step": 7775 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001971521739130435, |
|
"loss": 1.0032, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019697101449275362, |
|
"loss": 0.9792, |
|
"step": 7825 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019678985507246376, |
|
"loss": 0.965, |
|
"step": 7850 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019660869565217394, |
|
"loss": 0.9428, |
|
"step": 7875 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019642753623188406, |
|
"loss": 1.0196, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001962463768115942, |
|
"loss": 0.9396, |
|
"step": 7925 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019606521739130435, |
|
"loss": 0.9787, |
|
"step": 7950 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001958840579710145, |
|
"loss": 0.9023, |
|
"step": 7975 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019570289855072465, |
|
"loss": 0.867, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_loss": 0.9304548501968384, |
|
"eval_runtime": 6180.6024, |
|
"eval_samples_per_second": 0.81, |
|
"eval_steps_per_second": 0.203, |
|
"eval_wer": 57.609340515911626, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019552173913043477, |
|
"loss": 0.9631, |
|
"step": 8025 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019534057971014494, |
|
"loss": 0.9408, |
|
"step": 8050 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001951594202898551, |
|
"loss": 1.0169, |
|
"step": 8075 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001949782608695652, |
|
"loss": 1.0044, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019479710144927538, |
|
"loss": 1.0462, |
|
"step": 8125 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001946159420289855, |
|
"loss": 0.9778, |
|
"step": 8150 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019443478260869565, |
|
"loss": 0.9329, |
|
"step": 8175 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001942536231884058, |
|
"loss": 0.9757, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019407246376811595, |
|
"loss": 0.9153, |
|
"step": 8225 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001938913043478261, |
|
"loss": 0.9201, |
|
"step": 8250 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019371014492753624, |
|
"loss": 0.8937, |
|
"step": 8275 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001935289855072464, |
|
"loss": 0.91, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019334782608695653, |
|
"loss": 0.884, |
|
"step": 8325 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019316666666666665, |
|
"loss": 0.9819, |
|
"step": 8350 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001929855072463768, |
|
"loss": 0.9816, |
|
"step": 8375 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019280434782608698, |
|
"loss": 0.9698, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001926231884057971, |
|
"loss": 0.9072, |
|
"step": 8425 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019244202898550724, |
|
"loss": 1.0583, |
|
"step": 8450 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019226086956521742, |
|
"loss": 0.8779, |
|
"step": 8475 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019207971014492754, |
|
"loss": 0.8537, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019189855072463769, |
|
"loss": 0.9183, |
|
"step": 8525 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019171739130434783, |
|
"loss": 0.829, |
|
"step": 8550 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019153623188405798, |
|
"loss": 0.9313, |
|
"step": 8575 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019135507246376813, |
|
"loss": 0.8861, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019117391304347825, |
|
"loss": 0.9286, |
|
"step": 8625 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019099275362318842, |
|
"loss": 0.9069, |
|
"step": 8650 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019081159420289857, |
|
"loss": 0.9949, |
|
"step": 8675 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001906304347826087, |
|
"loss": 0.9647, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019044927536231884, |
|
"loss": 0.9581, |
|
"step": 8725 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.000190268115942029, |
|
"loss": 0.9482, |
|
"step": 8750 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019008695652173913, |
|
"loss": 0.8733, |
|
"step": 8775 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00018990579710144928, |
|
"loss": 0.8457, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00018972463768115942, |
|
"loss": 0.8804, |
|
"step": 8825 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00018954347826086957, |
|
"loss": 0.9482, |
|
"step": 8850 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00018936231884057972, |
|
"loss": 0.922, |
|
"step": 8875 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00018918115942028984, |
|
"loss": 0.8846, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.000189, |
|
"loss": 0.9022, |
|
"step": 8925 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00018881884057971016, |
|
"loss": 0.8839, |
|
"step": 8950 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00018863768115942028, |
|
"loss": 0.9456, |
|
"step": 8975 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018845652173913045, |
|
"loss": 0.9425, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"eval_loss": 0.8907226920127869, |
|
"eval_runtime": 6145.942, |
|
"eval_samples_per_second": 0.814, |
|
"eval_steps_per_second": 0.204, |
|
"eval_wer": 55.48541730938086, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018827536231884057, |
|
"loss": 0.8422, |
|
"step": 9025 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018809420289855072, |
|
"loss": 0.8493, |
|
"step": 9050 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018791304347826087, |
|
"loss": 0.844, |
|
"step": 9075 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018773188405797102, |
|
"loss": 0.9008, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018755072463768116, |
|
"loss": 0.8489, |
|
"step": 9125 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001873695652173913, |
|
"loss": 0.9133, |
|
"step": 9150 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018718840579710146, |
|
"loss": 0.8765, |
|
"step": 9175 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001870072463768116, |
|
"loss": 0.8892, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018682608695652173, |
|
"loss": 0.9426, |
|
"step": 9225 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018664492753623187, |
|
"loss": 0.9292, |
|
"step": 9250 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018646376811594205, |
|
"loss": 0.9203, |
|
"step": 9275 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018628260869565217, |
|
"loss": 0.881, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018610144927536231, |
|
"loss": 0.9301, |
|
"step": 9325 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001859202898550725, |
|
"loss": 0.8434, |
|
"step": 9350 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001857391304347826, |
|
"loss": 0.9381, |
|
"step": 9375 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018555797101449276, |
|
"loss": 0.891, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001853768115942029, |
|
"loss": 0.8266, |
|
"step": 9425 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018519565217391305, |
|
"loss": 0.9595, |
|
"step": 9450 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001850144927536232, |
|
"loss": 0.8791, |
|
"step": 9475 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018483333333333332, |
|
"loss": 0.8108, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001846521739130435, |
|
"loss": 0.845, |
|
"step": 9525 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018447101449275364, |
|
"loss": 0.8476, |
|
"step": 9550 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018428985507246376, |
|
"loss": 0.8943, |
|
"step": 9575 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001841086956521739, |
|
"loss": 0.8937, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018392753623188408, |
|
"loss": 0.8666, |
|
"step": 9625 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001837463768115942, |
|
"loss": 0.8304, |
|
"step": 9650 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018356521739130435, |
|
"loss": 0.8912, |
|
"step": 9675 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001833840579710145, |
|
"loss": 0.9049, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018320289855072464, |
|
"loss": 0.8729, |
|
"step": 9725 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001830217391304348, |
|
"loss": 0.8693, |
|
"step": 9750 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001828405797101449, |
|
"loss": 0.8315, |
|
"step": 9775 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018265942028985508, |
|
"loss": 0.8364, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018247826086956523, |
|
"loss": 0.8924, |
|
"step": 9825 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018229710144927535, |
|
"loss": 0.8813, |
|
"step": 9850 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018211594202898553, |
|
"loss": 0.8711, |
|
"step": 9875 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018193478260869565, |
|
"loss": 0.8155, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001817536231884058, |
|
"loss": 0.8729, |
|
"step": 9925 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018157246376811594, |
|
"loss": 0.9039, |
|
"step": 9950 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001813913043478261, |
|
"loss": 0.7705, |
|
"step": 9975 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018121014492753623, |
|
"loss": 0.9501, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"eval_loss": 0.8392999172210693, |
|
"eval_runtime": 6230.858, |
|
"eval_samples_per_second": 0.803, |
|
"eval_steps_per_second": 0.201, |
|
"eval_wer": 54.02121550107976, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018102898550724638, |
|
"loss": 0.8114, |
|
"step": 10025 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018084782608695653, |
|
"loss": 0.8974, |
|
"step": 10050 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018066666666666668, |
|
"loss": 0.7821, |
|
"step": 10075 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018048550724637682, |
|
"loss": 0.8489, |
|
"step": 10100 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018030434782608694, |
|
"loss": 0.8337, |
|
"step": 10125 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018012318840579712, |
|
"loss": 0.9795, |
|
"step": 10150 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017994202898550724, |
|
"loss": 0.9552, |
|
"step": 10175 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017976086956521739, |
|
"loss": 0.8544, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017957971014492756, |
|
"loss": 0.8426, |
|
"step": 10225 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017939855072463768, |
|
"loss": 0.8073, |
|
"step": 10250 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017921739130434783, |
|
"loss": 0.8112, |
|
"step": 10275 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017903623188405797, |
|
"loss": 0.8495, |
|
"step": 10300 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017885507246376812, |
|
"loss": 0.9735, |
|
"step": 10325 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017867391304347827, |
|
"loss": 0.8394, |
|
"step": 10350 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001784927536231884, |
|
"loss": 0.7613, |
|
"step": 10375 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017831159420289856, |
|
"loss": 0.8697, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001781304347826087, |
|
"loss": 0.8087, |
|
"step": 10425 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017794927536231883, |
|
"loss": 0.8902, |
|
"step": 10450 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017776811594202898, |
|
"loss": 0.787, |
|
"step": 10475 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017758695652173915, |
|
"loss": 0.8174, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017740579710144927, |
|
"loss": 0.7975, |
|
"step": 10525 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017722463768115942, |
|
"loss": 0.7892, |
|
"step": 10550 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017704347826086957, |
|
"loss": 0.7133, |
|
"step": 10575 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001768623188405797, |
|
"loss": 0.8419, |
|
"step": 10600 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017668115942028986, |
|
"loss": 0.7968, |
|
"step": 10625 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017649999999999998, |
|
"loss": 0.8893, |
|
"step": 10650 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017631884057971015, |
|
"loss": 0.8464, |
|
"step": 10675 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001761376811594203, |
|
"loss": 0.8745, |
|
"step": 10700 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017595652173913042, |
|
"loss": 0.853, |
|
"step": 10725 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001757753623188406, |
|
"loss": 0.8488, |
|
"step": 10750 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017559420289855072, |
|
"loss": 0.8435, |
|
"step": 10775 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017541304347826086, |
|
"loss": 0.7949, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017523188405797104, |
|
"loss": 0.7916, |
|
"step": 10825 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017505072463768116, |
|
"loss": 0.7734, |
|
"step": 10850 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001748695652173913, |
|
"loss": 0.8258, |
|
"step": 10875 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017468840579710145, |
|
"loss": 0.8101, |
|
"step": 10900 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001745072463768116, |
|
"loss": 0.8221, |
|
"step": 10925 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017432608695652175, |
|
"loss": 0.8009, |
|
"step": 10950 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001741449275362319, |
|
"loss": 0.7875, |
|
"step": 10975 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00017396376811594204, |
|
"loss": 0.8602, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"eval_loss": 0.8095631003379822, |
|
"eval_runtime": 6209.5684, |
|
"eval_samples_per_second": 0.806, |
|
"eval_steps_per_second": 0.202, |
|
"eval_wer": 53.49676072047272, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001737826086956522, |
|
"loss": 0.8403, |
|
"step": 11025 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001736014492753623, |
|
"loss": 0.756, |
|
"step": 11050 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00017342028985507246, |
|
"loss": 0.8086, |
|
"step": 11075 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00017323913043478263, |
|
"loss": 0.7628, |
|
"step": 11100 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00017305797101449275, |
|
"loss": 0.8002, |
|
"step": 11125 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001728768115942029, |
|
"loss": 0.8258, |
|
"step": 11150 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00017269565217391307, |
|
"loss": 0.7933, |
|
"step": 11175 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001725144927536232, |
|
"loss": 0.7734, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00017233333333333334, |
|
"loss": 0.8077, |
|
"step": 11225 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00017215217391304346, |
|
"loss": 0.8157, |
|
"step": 11250 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00017197101449275363, |
|
"loss": 0.822, |
|
"step": 11275 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00017178985507246378, |
|
"loss": 0.7343, |
|
"step": 11300 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001716086956521739, |
|
"loss": 0.8093, |
|
"step": 11325 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00017142753623188408, |
|
"loss": 0.7928, |
|
"step": 11350 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00017124637681159422, |
|
"loss": 0.8318, |
|
"step": 11375 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00017106521739130434, |
|
"loss": 0.8163, |
|
"step": 11400 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001708840579710145, |
|
"loss": 0.765, |
|
"step": 11425 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00017070289855072464, |
|
"loss": 0.8056, |
|
"step": 11450 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00017052173913043478, |
|
"loss": 0.7481, |
|
"step": 11475 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00017034057971014493, |
|
"loss": 0.7827, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00017015942028985508, |
|
"loss": 0.7476, |
|
"step": 11525 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016997826086956523, |
|
"loss": 0.7672, |
|
"step": 11550 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016979710144927537, |
|
"loss": 0.9035, |
|
"step": 11575 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001696159420289855, |
|
"loss": 0.8095, |
|
"step": 11600 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016943478260869567, |
|
"loss": 0.8071, |
|
"step": 11625 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016925362318840581, |
|
"loss": 0.8259, |
|
"step": 11650 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016907246376811593, |
|
"loss": 0.7824, |
|
"step": 11675 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001688913043478261, |
|
"loss": 0.6768, |
|
"step": 11700 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016871014492753623, |
|
"loss": 0.7784, |
|
"step": 11725 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016852898550724638, |
|
"loss": 0.7946, |
|
"step": 11750 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016834782608695652, |
|
"loss": 0.7938, |
|
"step": 11775 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016816666666666667, |
|
"loss": 0.7757, |
|
"step": 11800 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016798550724637682, |
|
"loss": 0.784, |
|
"step": 11825 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016780434782608696, |
|
"loss": 0.8067, |
|
"step": 11850 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001676231884057971, |
|
"loss": 0.8073, |
|
"step": 11875 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016744202898550726, |
|
"loss": 0.7267, |
|
"step": 11900 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016726086956521738, |
|
"loss": 0.8355, |
|
"step": 11925 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016707971014492753, |
|
"loss": 0.8044, |
|
"step": 11950 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001668985507246377, |
|
"loss": 0.8151, |
|
"step": 11975 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016671739130434782, |
|
"loss": 0.7596, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"eval_loss": 0.7761228680610657, |
|
"eval_runtime": 6256.6305, |
|
"eval_samples_per_second": 0.8, |
|
"eval_steps_per_second": 0.2, |
|
"eval_wer": 51.930515674315956, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016653623188405797, |
|
"loss": 0.7608, |
|
"step": 12025 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016635507246376814, |
|
"loss": 0.6833, |
|
"step": 12050 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016617391304347826, |
|
"loss": 0.8248, |
|
"step": 12075 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001659927536231884, |
|
"loss": 0.869, |
|
"step": 12100 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016581159420289853, |
|
"loss": 0.7692, |
|
"step": 12125 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001656304347826087, |
|
"loss": 0.8126, |
|
"step": 12150 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016544927536231885, |
|
"loss": 0.7638, |
|
"step": 12175 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016526811594202897, |
|
"loss": 0.755, |
|
"step": 12200 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016508695652173915, |
|
"loss": 0.798, |
|
"step": 12225 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001649057971014493, |
|
"loss": 0.8398, |
|
"step": 12250 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001647246376811594, |
|
"loss": 0.7998, |
|
"step": 12275 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016454347826086956, |
|
"loss": 0.7801, |
|
"step": 12300 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001643623188405797, |
|
"loss": 0.7226, |
|
"step": 12325 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016418115942028985, |
|
"loss": 0.8209, |
|
"step": 12350 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000164, |
|
"loss": 0.7869, |
|
"step": 12375 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016381884057971015, |
|
"loss": 0.7806, |
|
"step": 12400 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001636376811594203, |
|
"loss": 0.6899, |
|
"step": 12425 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016345652173913044, |
|
"loss": 0.7796, |
|
"step": 12450 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016327536231884056, |
|
"loss": 0.7448, |
|
"step": 12475 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016309420289855074, |
|
"loss": 0.702, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016291304347826089, |
|
"loss": 0.8131, |
|
"step": 12525 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000162731884057971, |
|
"loss": 0.8154, |
|
"step": 12550 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016255072463768118, |
|
"loss": 0.7873, |
|
"step": 12575 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001623695652173913, |
|
"loss": 0.8329, |
|
"step": 12600 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016218840579710145, |
|
"loss": 0.7596, |
|
"step": 12625 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001620072463768116, |
|
"loss": 0.6257, |
|
"step": 12650 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016182608695652174, |
|
"loss": 0.766, |
|
"step": 12675 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001616449275362319, |
|
"loss": 0.7594, |
|
"step": 12700 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016146376811594204, |
|
"loss": 0.776, |
|
"step": 12725 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016128260869565218, |
|
"loss": 0.8994, |
|
"step": 12750 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016110144927536233, |
|
"loss": 0.7237, |
|
"step": 12775 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016092028985507245, |
|
"loss": 0.7658, |
|
"step": 12800 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001607391304347826, |
|
"loss": 0.7995, |
|
"step": 12825 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016055797101449277, |
|
"loss": 0.8313, |
|
"step": 12850 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001603768115942029, |
|
"loss": 0.7675, |
|
"step": 12875 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00016019565217391304, |
|
"loss": 0.7119, |
|
"step": 12900 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001600144927536232, |
|
"loss": 0.7035, |
|
"step": 12925 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00015983333333333333, |
|
"loss": 0.737, |
|
"step": 12950 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015965217391304348, |
|
"loss": 0.694, |
|
"step": 12975 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001594710144927536, |
|
"loss": 0.7334, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 0.7694315314292908, |
|
"eval_runtime": 6125.7667, |
|
"eval_samples_per_second": 0.817, |
|
"eval_steps_per_second": 0.204, |
|
"eval_wer": 49.44113529034861, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015928985507246377, |
|
"loss": 0.6969, |
|
"step": 13025 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015910869565217392, |
|
"loss": 0.69, |
|
"step": 13050 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015892753623188404, |
|
"loss": 0.8317, |
|
"step": 13075 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015874637681159422, |
|
"loss": 0.8752, |
|
"step": 13100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015856521739130436, |
|
"loss": 0.7777, |
|
"step": 13125 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015838405797101448, |
|
"loss": 0.7003, |
|
"step": 13150 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015820289855072463, |
|
"loss": 0.745, |
|
"step": 13175 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001580217391304348, |
|
"loss": 0.6816, |
|
"step": 13200 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015784057971014493, |
|
"loss": 0.7321, |
|
"step": 13225 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015765942028985507, |
|
"loss": 0.8165, |
|
"step": 13250 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015747826086956522, |
|
"loss": 0.8318, |
|
"step": 13275 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015729710144927537, |
|
"loss": 0.7568, |
|
"step": 13300 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015711594202898551, |
|
"loss": 0.7191, |
|
"step": 13325 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015693478260869563, |
|
"loss": 0.7375, |
|
"step": 13350 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001567536231884058, |
|
"loss": 0.6437, |
|
"step": 13375 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015657246376811596, |
|
"loss": 0.8342, |
|
"step": 13400 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015639130434782608, |
|
"loss": 0.7003, |
|
"step": 13425 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015621014492753625, |
|
"loss": 0.6729, |
|
"step": 13450 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015603623188405796, |
|
"loss": 0.7319, |
|
"step": 13475 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015585507246376813, |
|
"loss": 0.6888, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015567391304347828, |
|
"loss": 0.7646, |
|
"step": 13525 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001554927536231884, |
|
"loss": 0.7568, |
|
"step": 13550 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015531159420289857, |
|
"loss": 0.7149, |
|
"step": 13575 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001551304347826087, |
|
"loss": 0.776, |
|
"step": 13600 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015494927536231884, |
|
"loss": 0.735, |
|
"step": 13625 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000154768115942029, |
|
"loss": 0.7454, |
|
"step": 13650 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015458695652173913, |
|
"loss": 0.7072, |
|
"step": 13675 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015440579710144928, |
|
"loss": 0.7504, |
|
"step": 13700 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015422463768115943, |
|
"loss": 0.6737, |
|
"step": 13725 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015404347826086958, |
|
"loss": 0.7757, |
|
"step": 13750 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015386231884057972, |
|
"loss": 0.7659, |
|
"step": 13775 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015368115942028984, |
|
"loss": 0.6894, |
|
"step": 13800 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001535, |
|
"loss": 0.6959, |
|
"step": 13825 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015331884057971016, |
|
"loss": 0.7326, |
|
"step": 13850 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015313768115942029, |
|
"loss": 0.7797, |
|
"step": 13875 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015295652173913043, |
|
"loss": 0.6998, |
|
"step": 13900 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001527753623188406, |
|
"loss": 0.7806, |
|
"step": 13925 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015259420289855073, |
|
"loss": 0.7847, |
|
"step": 13950 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015241304347826087, |
|
"loss": 0.7633, |
|
"step": 13975 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000152231884057971, |
|
"loss": 0.708, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 0.7335543632507324, |
|
"eval_runtime": 6161.4493, |
|
"eval_samples_per_second": 0.812, |
|
"eval_steps_per_second": 0.203, |
|
"eval_wer": 47.00396307458649, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015205072463768117, |
|
"loss": 0.7598, |
|
"step": 14025 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015186956521739132, |
|
"loss": 0.7354, |
|
"step": 14050 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015168840579710144, |
|
"loss": 0.7034, |
|
"step": 14075 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001515072463768116, |
|
"loss": 0.6852, |
|
"step": 14100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015132608695652176, |
|
"loss": 0.7925, |
|
"step": 14125 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015114492753623188, |
|
"loss": 0.7146, |
|
"step": 14150 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015096376811594202, |
|
"loss": 0.714, |
|
"step": 14175 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001507826086956522, |
|
"loss": 0.7124, |
|
"step": 14200 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015060144927536232, |
|
"loss": 0.7223, |
|
"step": 14225 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015042028985507247, |
|
"loss": 0.8055, |
|
"step": 14250 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001502391304347826, |
|
"loss": 0.651, |
|
"step": 14275 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00015005797101449276, |
|
"loss": 0.6731, |
|
"step": 14300 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001498768115942029, |
|
"loss": 0.7672, |
|
"step": 14325 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00014969565217391303, |
|
"loss": 0.706, |
|
"step": 14350 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001495144927536232, |
|
"loss": 0.6925, |
|
"step": 14375 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00014933333333333335, |
|
"loss": 0.8309, |
|
"step": 14400 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00014915217391304347, |
|
"loss": 0.7526, |
|
"step": 14425 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00014897101449275364, |
|
"loss": 0.7555, |
|
"step": 14450 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00014878985507246376, |
|
"loss": 0.7689, |
|
"step": 14475 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001486086956521739, |
|
"loss": 0.6583, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00014842753623188406, |
|
"loss": 0.7107, |
|
"step": 14525 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001482463768115942, |
|
"loss": 0.7068, |
|
"step": 14550 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00014806521739130435, |
|
"loss": 0.795, |
|
"step": 14575 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001478840579710145, |
|
"loss": 0.679, |
|
"step": 14600 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00014770289855072465, |
|
"loss": 0.7713, |
|
"step": 14625 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001475217391304348, |
|
"loss": 0.6923, |
|
"step": 14650 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00014734057971014491, |
|
"loss": 0.7371, |
|
"step": 14675 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00014715942028985506, |
|
"loss": 0.7118, |
|
"step": 14700 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00014697826086956524, |
|
"loss": 0.724, |
|
"step": 14725 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00014679710144927536, |
|
"loss": 0.7121, |
|
"step": 14750 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001466159420289855, |
|
"loss": 0.6898, |
|
"step": 14775 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00014643478260869568, |
|
"loss": 0.6558, |
|
"step": 14800 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001462536231884058, |
|
"loss": 0.7129, |
|
"step": 14825 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00014607246376811594, |
|
"loss": 0.6833, |
|
"step": 14850 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001458913043478261, |
|
"loss": 0.6877, |
|
"step": 14875 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00014571014492753624, |
|
"loss": 0.6646, |
|
"step": 14900 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00014552898550724639, |
|
"loss": 0.6781, |
|
"step": 14925 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001453478260869565, |
|
"loss": 0.7139, |
|
"step": 14950 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014516666666666668, |
|
"loss": 0.7152, |
|
"step": 14975 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014498550724637683, |
|
"loss": 0.7112, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_loss": 0.7149045467376709, |
|
"eval_runtime": 6211.0734, |
|
"eval_samples_per_second": 0.806, |
|
"eval_steps_per_second": 0.202, |
|
"eval_wer": 47.578252924843966, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014480434782608695, |
|
"loss": 0.7359, |
|
"step": 15025 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001446231884057971, |
|
"loss": 0.7161, |
|
"step": 15050 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014444202898550727, |
|
"loss": 0.7225, |
|
"step": 15075 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001442608695652174, |
|
"loss": 0.6912, |
|
"step": 15100 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014407971014492754, |
|
"loss": 0.7643, |
|
"step": 15125 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014389855072463768, |
|
"loss": 0.7172, |
|
"step": 15150 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014371739130434783, |
|
"loss": 0.6586, |
|
"step": 15175 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014353623188405798, |
|
"loss": 0.7689, |
|
"step": 15200 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001433550724637681, |
|
"loss": 0.6689, |
|
"step": 15225 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014317391304347827, |
|
"loss": 0.6828, |
|
"step": 15250 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014299275362318842, |
|
"loss": 0.626, |
|
"step": 15275 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014281159420289854, |
|
"loss": 0.6763, |
|
"step": 15300 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014263043478260871, |
|
"loss": 0.733, |
|
"step": 15325 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014244927536231883, |
|
"loss": 0.712, |
|
"step": 15350 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014226811594202898, |
|
"loss": 0.7525, |
|
"step": 15375 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014208695652173913, |
|
"loss": 0.6006, |
|
"step": 15400 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014190579710144928, |
|
"loss": 0.66, |
|
"step": 15425 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014172463768115942, |
|
"loss": 0.6597, |
|
"step": 15450 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014154347826086957, |
|
"loss": 0.7359, |
|
"step": 15475 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014136231884057972, |
|
"loss": 0.7218, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014118115942028986, |
|
"loss": 0.6768, |
|
"step": 15525 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014099999999999998, |
|
"loss": 0.7398, |
|
"step": 15550 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014081884057971013, |
|
"loss": 0.6622, |
|
"step": 15575 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001406376811594203, |
|
"loss": 0.6799, |
|
"step": 15600 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014045652173913043, |
|
"loss": 0.6402, |
|
"step": 15625 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014027536231884057, |
|
"loss": 0.6506, |
|
"step": 15650 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014009420289855075, |
|
"loss": 0.6803, |
|
"step": 15675 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013991304347826087, |
|
"loss": 0.6568, |
|
"step": 15700 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013973188405797102, |
|
"loss": 0.6378, |
|
"step": 15725 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013955072463768116, |
|
"loss": 0.6953, |
|
"step": 15750 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001393695652173913, |
|
"loss": 0.6846, |
|
"step": 15775 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013918840579710146, |
|
"loss": 0.6657, |
|
"step": 15800 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013900724637681158, |
|
"loss": 0.639, |
|
"step": 15825 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013883333333333334, |
|
"loss": 0.6946, |
|
"step": 15850 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013865217391304349, |
|
"loss": 0.7005, |
|
"step": 15875 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013847101449275363, |
|
"loss": 0.7449, |
|
"step": 15900 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013828985507246378, |
|
"loss": 0.6588, |
|
"step": 15925 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001381086956521739, |
|
"loss": 0.6473, |
|
"step": 15950 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013792753623188405, |
|
"loss": 0.6854, |
|
"step": 15975 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013774637681159422, |
|
"loss": 0.6989, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_loss": 0.6713331937789917, |
|
"eval_runtime": 6174.3958, |
|
"eval_samples_per_second": 0.811, |
|
"eval_steps_per_second": 0.203, |
|
"eval_wer": 44.29863072213389, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013756521739130434, |
|
"loss": 0.6563, |
|
"step": 16025 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001373840579710145, |
|
"loss": 0.6982, |
|
"step": 16050 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013720289855072466, |
|
"loss": 0.6678, |
|
"step": 16075 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013702173913043478, |
|
"loss": 0.6593, |
|
"step": 16100 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013684057971014493, |
|
"loss": 0.6349, |
|
"step": 16125 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013665942028985505, |
|
"loss": 0.7161, |
|
"step": 16150 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013647826086956522, |
|
"loss": 0.6789, |
|
"step": 16175 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013629710144927537, |
|
"loss": 0.6891, |
|
"step": 16200 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001361159420289855, |
|
"loss": 0.752, |
|
"step": 16225 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013593478260869567, |
|
"loss": 0.6384, |
|
"step": 16250 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001357536231884058, |
|
"loss": 0.6149, |
|
"step": 16275 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013557246376811593, |
|
"loss": 0.7397, |
|
"step": 16300 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001353913043478261, |
|
"loss": 0.6256, |
|
"step": 16325 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013521014492753623, |
|
"loss": 0.7775, |
|
"step": 16350 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013502898550724637, |
|
"loss": 0.6211, |
|
"step": 16375 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013484782608695652, |
|
"loss": 0.6559, |
|
"step": 16400 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013466666666666667, |
|
"loss": 0.6802, |
|
"step": 16425 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013448550724637682, |
|
"loss": 0.7037, |
|
"step": 16450 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013430434782608696, |
|
"loss": 0.6405, |
|
"step": 16475 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001341231884057971, |
|
"loss": 0.6445, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013394202898550726, |
|
"loss": 0.6343, |
|
"step": 16525 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001337608695652174, |
|
"loss": 0.6078, |
|
"step": 16550 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013357971014492753, |
|
"loss": 0.6994, |
|
"step": 16575 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001333985507246377, |
|
"loss": 0.6863, |
|
"step": 16600 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013321739130434782, |
|
"loss": 0.7175, |
|
"step": 16625 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013303623188405797, |
|
"loss": 0.654, |
|
"step": 16650 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013285507246376814, |
|
"loss": 0.7037, |
|
"step": 16675 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013267391304347826, |
|
"loss": 0.6302, |
|
"step": 16700 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001324927536231884, |
|
"loss": 0.6877, |
|
"step": 16725 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013231159420289856, |
|
"loss": 0.698, |
|
"step": 16750 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001321304347826087, |
|
"loss": 0.6587, |
|
"step": 16775 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013194927536231885, |
|
"loss": 0.6436, |
|
"step": 16800 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013176811594202897, |
|
"loss": 0.6632, |
|
"step": 16825 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013158695652173914, |
|
"loss": 0.7216, |
|
"step": 16850 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001314057971014493, |
|
"loss": 0.7142, |
|
"step": 16875 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001312246376811594, |
|
"loss": 0.6373, |
|
"step": 16900 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013104347826086956, |
|
"loss": 0.7663, |
|
"step": 16925 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013086231884057973, |
|
"loss": 0.6699, |
|
"step": 16950 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00013068115942028985, |
|
"loss": 0.6204, |
|
"step": 16975 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001305, |
|
"loss": 0.7025, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"eval_loss": 0.6639269590377808, |
|
"eval_runtime": 6204.7668, |
|
"eval_samples_per_second": 0.807, |
|
"eval_steps_per_second": 0.202, |
|
"eval_wer": 43.748071857424236, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00013031884057971015, |
|
"loss": 0.6292, |
|
"step": 17025 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001301376811594203, |
|
"loss": 0.6168, |
|
"step": 17050 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012995652173913044, |
|
"loss": 0.6671, |
|
"step": 17075 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012977536231884056, |
|
"loss": 0.6828, |
|
"step": 17100 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012959420289855074, |
|
"loss": 0.6362, |
|
"step": 17125 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012941304347826088, |
|
"loss": 0.6586, |
|
"step": 17150 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000129231884057971, |
|
"loss": 0.6173, |
|
"step": 17175 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012905072463768118, |
|
"loss": 0.6647, |
|
"step": 17200 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001288695652173913, |
|
"loss": 0.6498, |
|
"step": 17225 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012868840579710145, |
|
"loss": 0.6818, |
|
"step": 17250 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001285072463768116, |
|
"loss": 0.6441, |
|
"step": 17275 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012832608695652174, |
|
"loss": 0.6493, |
|
"step": 17300 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001281449275362319, |
|
"loss": 0.5915, |
|
"step": 17325 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012796376811594203, |
|
"loss": 0.6864, |
|
"step": 17350 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012778260869565218, |
|
"loss": 0.6784, |
|
"step": 17375 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012760144927536233, |
|
"loss": 0.6209, |
|
"step": 17400 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012742028985507248, |
|
"loss": 0.681, |
|
"step": 17425 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001272391304347826, |
|
"loss": 0.6087, |
|
"step": 17450 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012705797101449277, |
|
"loss": 0.6084, |
|
"step": 17475 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001268768115942029, |
|
"loss": 0.7131, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012669565217391304, |
|
"loss": 0.6433, |
|
"step": 17525 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001265144927536232, |
|
"loss": 0.7002, |
|
"step": 17550 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012633333333333333, |
|
"loss": 0.6467, |
|
"step": 17575 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012615217391304348, |
|
"loss": 0.7296, |
|
"step": 17600 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012597101449275363, |
|
"loss": 0.6358, |
|
"step": 17625 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012578985507246377, |
|
"loss": 0.6227, |
|
"step": 17650 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012560869565217392, |
|
"loss": 0.6872, |
|
"step": 17675 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012542753623188404, |
|
"loss": 0.661, |
|
"step": 17700 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012524637681159422, |
|
"loss": 0.661, |
|
"step": 17725 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012506521739130436, |
|
"loss": 0.6795, |
|
"step": 17750 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001248840579710145, |
|
"loss": 0.6197, |
|
"step": 17775 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012470289855072463, |
|
"loss": 0.6697, |
|
"step": 17800 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012452173913043478, |
|
"loss": 0.6658, |
|
"step": 17825 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012434057971014492, |
|
"loss": 0.5313, |
|
"step": 17850 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012415942028985507, |
|
"loss": 0.7019, |
|
"step": 17875 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012397826086956522, |
|
"loss": 0.6018, |
|
"step": 17900 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012379710144927537, |
|
"loss": 0.5935, |
|
"step": 17925 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001236159420289855, |
|
"loss": 0.6657, |
|
"step": 17950 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012343478260869566, |
|
"loss": 0.6591, |
|
"step": 17975 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001232536231884058, |
|
"loss": 0.6127, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"eval_loss": 0.6477032899856567, |
|
"eval_runtime": 6184.9416, |
|
"eval_samples_per_second": 0.809, |
|
"eval_steps_per_second": 0.202, |
|
"eval_wer": 42.91274116614063, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012307246376811595, |
|
"loss": 0.6471, |
|
"step": 18025 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012289130434782607, |
|
"loss": 0.5746, |
|
"step": 18050 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012271014492753625, |
|
"loss": 0.6262, |
|
"step": 18075 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012252898550724637, |
|
"loss": 0.646, |
|
"step": 18100 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012234782608695652, |
|
"loss": 0.5969, |
|
"step": 18125 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012216666666666666, |
|
"loss": 0.6934, |
|
"step": 18150 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012198550724637681, |
|
"loss": 0.6236, |
|
"step": 18175 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012180434782608696, |
|
"loss": 0.6376, |
|
"step": 18200 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001216231884057971, |
|
"loss": 0.6312, |
|
"step": 18225 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012144202898550725, |
|
"loss": 0.5801, |
|
"step": 18250 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001212608695652174, |
|
"loss": 0.6155, |
|
"step": 18275 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012107971014492753, |
|
"loss": 0.6514, |
|
"step": 18300 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012089855072463768, |
|
"loss": 0.627, |
|
"step": 18325 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012071739130434783, |
|
"loss": 0.6617, |
|
"step": 18350 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012054347826086957, |
|
"loss": 0.6565, |
|
"step": 18375 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012036231884057971, |
|
"loss": 0.6107, |
|
"step": 18400 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012018115942028986, |
|
"loss": 0.6027, |
|
"step": 18425 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012, |
|
"loss": 0.6002, |
|
"step": 18450 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011981884057971015, |
|
"loss": 0.5878, |
|
"step": 18475 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001196376811594203, |
|
"loss": 0.6664, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011945652173913043, |
|
"loss": 0.6488, |
|
"step": 18525 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011927536231884058, |
|
"loss": 0.5983, |
|
"step": 18550 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011909420289855074, |
|
"loss": 0.6504, |
|
"step": 18575 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011891304347826087, |
|
"loss": 0.6248, |
|
"step": 18600 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011873188405797102, |
|
"loss": 0.5826, |
|
"step": 18625 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011855072463768115, |
|
"loss": 0.6357, |
|
"step": 18650 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011836956521739131, |
|
"loss": 0.6931, |
|
"step": 18675 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011818840579710145, |
|
"loss": 0.5672, |
|
"step": 18700 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001180072463768116, |
|
"loss": 0.6437, |
|
"step": 18725 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011782608695652174, |
|
"loss": 0.6527, |
|
"step": 18750 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011764492753623189, |
|
"loss": 0.6001, |
|
"step": 18775 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011746376811594204, |
|
"loss": 0.6255, |
|
"step": 18800 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011728260869565217, |
|
"loss": 0.6615, |
|
"step": 18825 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011710144927536232, |
|
"loss": 0.6139, |
|
"step": 18850 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011692028985507246, |
|
"loss": 0.6925, |
|
"step": 18875 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011673913043478261, |
|
"loss": 0.6406, |
|
"step": 18900 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011655797101449276, |
|
"loss": 0.6745, |
|
"step": 18925 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011637681159420289, |
|
"loss": 0.6063, |
|
"step": 18950 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011619565217391305, |
|
"loss": 0.617, |
|
"step": 18975 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011601449275362319, |
|
"loss": 0.6342, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"eval_loss": 0.6297908425331116, |
|
"eval_runtime": 6186.457, |
|
"eval_samples_per_second": 0.809, |
|
"eval_steps_per_second": 0.202, |
|
"eval_wer": 42.68255060632668, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011583333333333333, |
|
"loss": 0.6395, |
|
"step": 19025 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011565217391304348, |
|
"loss": 0.6232, |
|
"step": 19050 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011547101449275363, |
|
"loss": 0.6556, |
|
"step": 19075 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011528985507246378, |
|
"loss": 0.5557, |
|
"step": 19100 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011510869565217391, |
|
"loss": 0.6073, |
|
"step": 19125 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011492753623188407, |
|
"loss": 0.6471, |
|
"step": 19150 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001147463768115942, |
|
"loss": 0.5657, |
|
"step": 19175 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011456521739130435, |
|
"loss": 0.6348, |
|
"step": 19200 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011438405797101448, |
|
"loss": 0.6581, |
|
"step": 19225 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011420289855072465, |
|
"loss": 0.5888, |
|
"step": 19250 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011402173913043479, |
|
"loss": 0.648, |
|
"step": 19275 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011384057971014493, |
|
"loss": 0.6632, |
|
"step": 19300 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011365942028985507, |
|
"loss": 0.627, |
|
"step": 19325 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011347826086956522, |
|
"loss": 0.6466, |
|
"step": 19350 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011329710144927537, |
|
"loss": 0.6318, |
|
"step": 19375 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001131159420289855, |
|
"loss": 0.6421, |
|
"step": 19400 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011293478260869565, |
|
"loss": 0.6173, |
|
"step": 19425 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011275362318840581, |
|
"loss": 0.6033, |
|
"step": 19450 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011257246376811594, |
|
"loss": 0.5807, |
|
"step": 19475 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011239130434782609, |
|
"loss": 0.5878, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011221014492753622, |
|
"loss": 0.5989, |
|
"step": 19525 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011202898550724639, |
|
"loss": 0.5648, |
|
"step": 19550 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011184782608695652, |
|
"loss": 0.6106, |
|
"step": 19575 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011166666666666667, |
|
"loss": 0.6043, |
|
"step": 19600 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011148550724637681, |
|
"loss": 0.6108, |
|
"step": 19625 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011130434782608696, |
|
"loss": 0.6729, |
|
"step": 19650 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011112318840579711, |
|
"loss": 0.6472, |
|
"step": 19675 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011094202898550724, |
|
"loss": 0.6716, |
|
"step": 19700 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011076086956521739, |
|
"loss": 0.6657, |
|
"step": 19725 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011057971014492754, |
|
"loss": 0.6297, |
|
"step": 19750 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011039855072463768, |
|
"loss": 0.6371, |
|
"step": 19775 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011021739130434783, |
|
"loss": 0.6213, |
|
"step": 19800 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00011003623188405798, |
|
"loss": 0.6303, |
|
"step": 19825 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010985507246376812, |
|
"loss": 0.665, |
|
"step": 19850 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010967391304347826, |
|
"loss": 0.6263, |
|
"step": 19875 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001094927536231884, |
|
"loss": 0.6424, |
|
"step": 19900 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010931159420289857, |
|
"loss": 0.6549, |
|
"step": 19925 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001091304347826087, |
|
"loss": 0.6327, |
|
"step": 19950 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010894927536231885, |
|
"loss": 0.5384, |
|
"step": 19975 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010876811594202898, |
|
"loss": 0.6174, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"eval_loss": 0.6079863905906677, |
|
"eval_runtime": 6150.9443, |
|
"eval_samples_per_second": 0.814, |
|
"eval_steps_per_second": 0.204, |
|
"eval_wer": 40.11723106860628, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010858695652173914, |
|
"loss": 0.6349, |
|
"step": 20025 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010840579710144927, |
|
"loss": 0.5833, |
|
"step": 20050 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010822463768115942, |
|
"loss": 0.5931, |
|
"step": 20075 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010804347826086957, |
|
"loss": 0.5622, |
|
"step": 20100 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010786231884057972, |
|
"loss": 0.5825, |
|
"step": 20125 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010768115942028986, |
|
"loss": 0.5919, |
|
"step": 20150 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001075, |
|
"loss": 0.5773, |
|
"step": 20175 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010731884057971014, |
|
"loss": 0.6487, |
|
"step": 20200 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010713768115942029, |
|
"loss": 0.6037, |
|
"step": 20225 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010695652173913044, |
|
"loss": 0.4728, |
|
"step": 20250 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010677536231884059, |
|
"loss": 0.594, |
|
"step": 20275 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010659420289855072, |
|
"loss": 0.5955, |
|
"step": 20300 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010641304347826088, |
|
"loss": 0.607, |
|
"step": 20325 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010623188405797101, |
|
"loss": 0.5924, |
|
"step": 20350 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010605072463768116, |
|
"loss": 0.5852, |
|
"step": 20375 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001058695652173913, |
|
"loss": 0.5136, |
|
"step": 20400 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010568840579710146, |
|
"loss": 0.537, |
|
"step": 20425 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001055072463768116, |
|
"loss": 0.6373, |
|
"step": 20450 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010532608695652174, |
|
"loss": 0.5526, |
|
"step": 20475 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010514492753623188, |
|
"loss": 0.5994, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010496376811594203, |
|
"loss": 0.583, |
|
"step": 20525 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010478260869565218, |
|
"loss": 0.6394, |
|
"step": 20550 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010460144927536231, |
|
"loss": 0.55, |
|
"step": 20575 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010442028985507247, |
|
"loss": 0.5821, |
|
"step": 20600 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010423913043478262, |
|
"loss": 0.6771, |
|
"step": 20625 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010405797101449275, |
|
"loss": 0.6192, |
|
"step": 20650 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001038768115942029, |
|
"loss": 0.6443, |
|
"step": 20675 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010369565217391305, |
|
"loss": 0.6412, |
|
"step": 20700 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001035144927536232, |
|
"loss": 0.6015, |
|
"step": 20725 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010333333333333333, |
|
"loss": 0.6236, |
|
"step": 20750 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010315217391304348, |
|
"loss": 0.5623, |
|
"step": 20775 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010297101449275364, |
|
"loss": 0.615, |
|
"step": 20800 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010278985507246377, |
|
"loss": 0.5963, |
|
"step": 20825 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010260869565217392, |
|
"loss": 0.5786, |
|
"step": 20850 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010242753623188405, |
|
"loss": 0.5758, |
|
"step": 20875 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001022536231884058, |
|
"loss": 0.6087, |
|
"step": 20900 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00010207246376811595, |
|
"loss": 0.5862, |
|
"step": 20925 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00010189130434782609, |
|
"loss": 0.6518, |
|
"step": 20950 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00010171014492753623, |
|
"loss": 0.602, |
|
"step": 20975 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00010152898550724637, |
|
"loss": 0.5551, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"eval_loss": 0.5896329283714294, |
|
"eval_runtime": 6188.8975, |
|
"eval_samples_per_second": 0.809, |
|
"eval_steps_per_second": 0.202, |
|
"eval_wer": 39.03984432473481, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00010134782608695653, |
|
"loss": 0.565, |
|
"step": 21025 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00010116666666666667, |
|
"loss": 0.6315, |
|
"step": 21050 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00010098550724637682, |
|
"loss": 0.5814, |
|
"step": 21075 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00010080434782608695, |
|
"loss": 0.6221, |
|
"step": 21100 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00010062318840579711, |
|
"loss": 0.5563, |
|
"step": 21125 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00010044202898550724, |
|
"loss": 0.648, |
|
"step": 21150 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00010026086956521739, |
|
"loss": 0.6128, |
|
"step": 21175 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00010007971014492754, |
|
"loss": 0.533, |
|
"step": 21200 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.989855072463769e-05, |
|
"loss": 0.6296, |
|
"step": 21225 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.971739130434783e-05, |
|
"loss": 0.6338, |
|
"step": 21250 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.953623188405797e-05, |
|
"loss": 0.579, |
|
"step": 21275 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.935507246376811e-05, |
|
"loss": 0.5706, |
|
"step": 21300 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.917391304347827e-05, |
|
"loss": 0.576, |
|
"step": 21325 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.899275362318841e-05, |
|
"loss": 0.6029, |
|
"step": 21350 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.881159420289855e-05, |
|
"loss": 0.585, |
|
"step": 21375 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.863043478260869e-05, |
|
"loss": 0.5899, |
|
"step": 21400 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.844927536231885e-05, |
|
"loss": 0.563, |
|
"step": 21425 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.826811594202898e-05, |
|
"loss": 0.5222, |
|
"step": 21450 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.808695652173913e-05, |
|
"loss": 0.611, |
|
"step": 21475 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.790579710144929e-05, |
|
"loss": 0.5361, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.772463768115942e-05, |
|
"loss": 0.5106, |
|
"step": 21525 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.754347826086957e-05, |
|
"loss": 0.5718, |
|
"step": 21550 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.73623188405797e-05, |
|
"loss": 0.4927, |
|
"step": 21575 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.718115942028987e-05, |
|
"loss": 0.6652, |
|
"step": 21600 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.7e-05, |
|
"loss": 0.5688, |
|
"step": 21625 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.681884057971015e-05, |
|
"loss": 0.5386, |
|
"step": 21650 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.66376811594203e-05, |
|
"loss": 0.5522, |
|
"step": 21675 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.645652173913044e-05, |
|
"loss": 0.5484, |
|
"step": 21700 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.627536231884059e-05, |
|
"loss": 0.6428, |
|
"step": 21725 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.609420289855072e-05, |
|
"loss": 0.6201, |
|
"step": 21750 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.591304347826087e-05, |
|
"loss": 0.5935, |
|
"step": 21775 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.573188405797102e-05, |
|
"loss": 0.5543, |
|
"step": 21800 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.555072463768116e-05, |
|
"loss": 0.6147, |
|
"step": 21825 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.536956521739131e-05, |
|
"loss": 0.6359, |
|
"step": 21850 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.518840579710144e-05, |
|
"loss": 0.607, |
|
"step": 21875 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.50072463768116e-05, |
|
"loss": 0.5923, |
|
"step": 21900 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.482608695652174e-05, |
|
"loss": 0.5105, |
|
"step": 21925 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.464492753623189e-05, |
|
"loss": 0.604, |
|
"step": 21950 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.446376811594202e-05, |
|
"loss": 0.5682, |
|
"step": 21975 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.428260869565218e-05, |
|
"loss": 0.5353, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"eval_loss": 0.5752917528152466, |
|
"eval_runtime": 6209.9534, |
|
"eval_samples_per_second": 0.806, |
|
"eval_steps_per_second": 0.202, |
|
"eval_wer": 39.12527587270699, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.410144927536233e-05, |
|
"loss": 0.5695, |
|
"step": 22025 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.392028985507246e-05, |
|
"loss": 0.6238, |
|
"step": 22050 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.373913043478261e-05, |
|
"loss": 0.5802, |
|
"step": 22075 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.355797101449276e-05, |
|
"loss": 0.5519, |
|
"step": 22100 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.33768115942029e-05, |
|
"loss": 0.5285, |
|
"step": 22125 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.319565217391304e-05, |
|
"loss": 0.6007, |
|
"step": 22150 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.301449275362318e-05, |
|
"loss": 0.6266, |
|
"step": 22175 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.283333333333334e-05, |
|
"loss": 0.4953, |
|
"step": 22200 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.265217391304348e-05, |
|
"loss": 0.5415, |
|
"step": 22225 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.247101449275363e-05, |
|
"loss": 0.5891, |
|
"step": 22250 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.228985507246376e-05, |
|
"loss": 0.5733, |
|
"step": 22275 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.210869565217392e-05, |
|
"loss": 0.5113, |
|
"step": 22300 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.192753623188405e-05, |
|
"loss": 0.5733, |
|
"step": 22325 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.17463768115942e-05, |
|
"loss": 0.6071, |
|
"step": 22350 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.156521739130436e-05, |
|
"loss": 0.6016, |
|
"step": 22375 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.13840579710145e-05, |
|
"loss": 0.583, |
|
"step": 22400 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.120289855072464e-05, |
|
"loss": 0.5587, |
|
"step": 22425 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.102173913043478e-05, |
|
"loss": 0.5064, |
|
"step": 22450 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.084057971014494e-05, |
|
"loss": 0.5962, |
|
"step": 22475 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.065942028985507e-05, |
|
"loss": 0.6031, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.047826086956522e-05, |
|
"loss": 0.6156, |
|
"step": 22525 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.029710144927536e-05, |
|
"loss": 0.5686, |
|
"step": 22550 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.011594202898551e-05, |
|
"loss": 0.5362, |
|
"step": 22575 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.993478260869566e-05, |
|
"loss": 0.6052, |
|
"step": 22600 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.975362318840579e-05, |
|
"loss": 0.562, |
|
"step": 22625 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.957246376811594e-05, |
|
"loss": 0.5782, |
|
"step": 22650 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.939130434782609e-05, |
|
"loss": 0.5337, |
|
"step": 22675 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.921014492753623e-05, |
|
"loss": 0.5238, |
|
"step": 22700 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.902898550724638e-05, |
|
"loss": 0.5321, |
|
"step": 22725 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.884782608695652e-05, |
|
"loss": 0.5133, |
|
"step": 22750 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.866666666666668e-05, |
|
"loss": 0.6038, |
|
"step": 22775 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.848550724637681e-05, |
|
"loss": 0.6108, |
|
"step": 22800 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.830434782608696e-05, |
|
"loss": 0.5916, |
|
"step": 22825 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.812318840579709e-05, |
|
"loss": 0.5767, |
|
"step": 22850 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.794202898550725e-05, |
|
"loss": 0.601, |
|
"step": 22875 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.77608695652174e-05, |
|
"loss": 0.5565, |
|
"step": 22900 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 8.757971014492753e-05, |
|
"loss": 0.6307, |
|
"step": 22925 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.739855072463768e-05, |
|
"loss": 0.5092, |
|
"step": 22950 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.721739130434783e-05, |
|
"loss": 0.5775, |
|
"step": 22975 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.703623188405797e-05, |
|
"loss": 0.5528, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_loss": 0.558778703212738, |
|
"eval_runtime": 6279.7061, |
|
"eval_samples_per_second": 0.797, |
|
"eval_steps_per_second": 0.199, |
|
"eval_wer": 40.28809416455065, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.685507246376812e-05, |
|
"loss": 0.5581, |
|
"step": 23025 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.667391304347825e-05, |
|
"loss": 0.5841, |
|
"step": 23050 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.649275362318842e-05, |
|
"loss": 0.5456, |
|
"step": 23075 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.631159420289855e-05, |
|
"loss": 0.603, |
|
"step": 23100 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.61304347826087e-05, |
|
"loss": 0.6094, |
|
"step": 23125 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.594927536231884e-05, |
|
"loss": 0.468, |
|
"step": 23150 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.576811594202899e-05, |
|
"loss": 0.63, |
|
"step": 23175 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.558695652173914e-05, |
|
"loss": 0.6095, |
|
"step": 23200 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.540579710144927e-05, |
|
"loss": 0.5802, |
|
"step": 23225 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.522463768115943e-05, |
|
"loss": 0.5892, |
|
"step": 23250 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.504347826086957e-05, |
|
"loss": 0.5736, |
|
"step": 23275 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.486231884057971e-05, |
|
"loss": 0.4859, |
|
"step": 23300 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.468115942028985e-05, |
|
"loss": 0.5839, |
|
"step": 23325 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.450000000000001e-05, |
|
"loss": 0.5858, |
|
"step": 23350 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.431884057971015e-05, |
|
"loss": 0.5604, |
|
"step": 23375 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.413768115942029e-05, |
|
"loss": 0.5168, |
|
"step": 23400 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.395652173913044e-05, |
|
"loss": 0.5481, |
|
"step": 23425 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.377536231884058e-05, |
|
"loss": 0.4701, |
|
"step": 23450 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.359420289855073e-05, |
|
"loss": 0.5258, |
|
"step": 23475 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.341304347826086e-05, |
|
"loss": 0.5736, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.323188405797101e-05, |
|
"loss": 0.5622, |
|
"step": 23525 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.305072463768117e-05, |
|
"loss": 0.5255, |
|
"step": 23550 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.28695652173913e-05, |
|
"loss": 0.6043, |
|
"step": 23575 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.268840579710145e-05, |
|
"loss": 0.8682, |
|
"step": 23600 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.250724637681159e-05, |
|
"loss": 0.4914, |
|
"step": 23625 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.232608695652175e-05, |
|
"loss": 0.5564, |
|
"step": 23650 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.214492753623188e-05, |
|
"loss": 0.5782, |
|
"step": 23675 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.196376811594203e-05, |
|
"loss": 0.5916, |
|
"step": 23700 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.178260869565217e-05, |
|
"loss": 0.5447, |
|
"step": 23725 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.160144927536232e-05, |
|
"loss": 0.514, |
|
"step": 23750 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.142028985507247e-05, |
|
"loss": 0.5487, |
|
"step": 23775 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.12391304347826e-05, |
|
"loss": 0.5429, |
|
"step": 23800 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.105797101449275e-05, |
|
"loss": 0.4828, |
|
"step": 23825 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.08768115942029e-05, |
|
"loss": 0.5015, |
|
"step": 23850 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.069565217391304e-05, |
|
"loss": 0.517, |
|
"step": 23875 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.051449275362319e-05, |
|
"loss": 0.5219, |
|
"step": 23900 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.033333333333334e-05, |
|
"loss": 0.5493, |
|
"step": 23925 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.015217391304349e-05, |
|
"loss": 0.4885, |
|
"step": 23950 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.997101449275362e-05, |
|
"loss": 0.5623, |
|
"step": 23975 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.978985507246377e-05, |
|
"loss": 0.5423, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_loss": 0.5444626212120056, |
|
"eval_runtime": 6160.1135, |
|
"eval_samples_per_second": 0.812, |
|
"eval_steps_per_second": 0.203, |
|
"eval_wer": 35.660551982723845, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.960869565217391e-05, |
|
"loss": 0.557, |
|
"step": 24025 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.942753623188406e-05, |
|
"loss": 0.5214, |
|
"step": 24050 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.924637681159421e-05, |
|
"loss": 0.5122, |
|
"step": 24075 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.906521739130434e-05, |
|
"loss": 0.5814, |
|
"step": 24100 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.88840579710145e-05, |
|
"loss": 0.5421, |
|
"step": 24125 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.870289855072464e-05, |
|
"loss": 0.526, |
|
"step": 24150 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.852173913043478e-05, |
|
"loss": 0.5424, |
|
"step": 24175 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.834057971014492e-05, |
|
"loss": 0.5408, |
|
"step": 24200 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.815942028985508e-05, |
|
"loss": 0.5618, |
|
"step": 24225 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.797826086956523e-05, |
|
"loss": 0.5007, |
|
"step": 24250 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.779710144927536e-05, |
|
"loss": 0.4739, |
|
"step": 24275 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.76159420289855e-05, |
|
"loss": 0.5238, |
|
"step": 24300 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.743478260869565e-05, |
|
"loss": 0.524, |
|
"step": 24325 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.72536231884058e-05, |
|
"loss": 0.5054, |
|
"step": 24350 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.707246376811595e-05, |
|
"loss": 0.5595, |
|
"step": 24375 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.689130434782608e-05, |
|
"loss": 0.5144, |
|
"step": 24400 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.671014492753624e-05, |
|
"loss": 0.5069, |
|
"step": 24425 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.652898550724638e-05, |
|
"loss": 0.5121, |
|
"step": 24450 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.634782608695652e-05, |
|
"loss": 0.5281, |
|
"step": 24475 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.616666666666666e-05, |
|
"loss": 0.5126, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.598550724637682e-05, |
|
"loss": 0.6006, |
|
"step": 24525 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.580434782608696e-05, |
|
"loss": 0.5609, |
|
"step": 24550 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.56231884057971e-05, |
|
"loss": 0.5247, |
|
"step": 24575 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.544202898550725e-05, |
|
"loss": 0.5286, |
|
"step": 24600 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.526086956521739e-05, |
|
"loss": 0.5367, |
|
"step": 24625 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.507971014492754e-05, |
|
"loss": 0.6143, |
|
"step": 24650 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.489855072463767e-05, |
|
"loss": 0.579, |
|
"step": 24675 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.471739130434783e-05, |
|
"loss": 0.5103, |
|
"step": 24700 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.453623188405798e-05, |
|
"loss": 0.5062, |
|
"step": 24725 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.435507246376812e-05, |
|
"loss": 0.5085, |
|
"step": 24750 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.417391304347826e-05, |
|
"loss": 0.6252, |
|
"step": 24775 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.399275362318841e-05, |
|
"loss": 0.5466, |
|
"step": 24800 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.381159420289856e-05, |
|
"loss": 0.5409, |
|
"step": 24825 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.363043478260869e-05, |
|
"loss": 0.5708, |
|
"step": 24850 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.344927536231884e-05, |
|
"loss": 0.4924, |
|
"step": 24875 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.327536231884057e-05, |
|
"loss": 0.5049, |
|
"step": 24900 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 7.309420289855073e-05, |
|
"loss": 0.5233, |
|
"step": 24925 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.291304347826088e-05, |
|
"loss": 0.5151, |
|
"step": 24950 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.273188405797101e-05, |
|
"loss": 0.4914, |
|
"step": 24975 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.255072463768116e-05, |
|
"loss": 0.5069, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"eval_loss": 0.5304020643234253, |
|
"eval_runtime": 6200.0725, |
|
"eval_samples_per_second": 0.807, |
|
"eval_steps_per_second": 0.202, |
|
"eval_wer": 35.93583141507867, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.236956521739131e-05, |
|
"loss": 0.5066, |
|
"step": 25025 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.218840579710145e-05, |
|
"loss": 0.5338, |
|
"step": 25050 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.200724637681159e-05, |
|
"loss": 0.4938, |
|
"step": 25075 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.182608695652174e-05, |
|
"loss": 0.4742, |
|
"step": 25100 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.16449275362319e-05, |
|
"loss": 0.439, |
|
"step": 25125 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.146376811594203e-05, |
|
"loss": 0.5899, |
|
"step": 25150 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.128260869565218e-05, |
|
"loss": 0.4483, |
|
"step": 25175 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.110144927536231e-05, |
|
"loss": 0.5661, |
|
"step": 25200 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.092028985507247e-05, |
|
"loss": 0.5919, |
|
"step": 25225 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.07391304347826e-05, |
|
"loss": 0.4567, |
|
"step": 25250 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.055797101449275e-05, |
|
"loss": 0.4964, |
|
"step": 25275 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.03768115942029e-05, |
|
"loss": 0.5314, |
|
"step": 25300 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.019565217391305e-05, |
|
"loss": 0.5589, |
|
"step": 25325 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 7.00144927536232e-05, |
|
"loss": 0.5106, |
|
"step": 25350 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.983333333333333e-05, |
|
"loss": 0.5249, |
|
"step": 25375 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.965217391304347e-05, |
|
"loss": 0.5737, |
|
"step": 25400 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.947101449275362e-05, |
|
"loss": 0.5539, |
|
"step": 25425 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.928985507246377e-05, |
|
"loss": 0.5074, |
|
"step": 25450 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.910869565217392e-05, |
|
"loss": 0.5577, |
|
"step": 25475 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.892753623188405e-05, |
|
"loss": 0.4824, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.874637681159421e-05, |
|
"loss": 0.5336, |
|
"step": 25525 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.856521739130434e-05, |
|
"loss": 0.5886, |
|
"step": 25550 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.838405797101449e-05, |
|
"loss": 0.5536, |
|
"step": 25575 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.820289855072465e-05, |
|
"loss": 0.5087, |
|
"step": 25600 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.802173913043479e-05, |
|
"loss": 0.5165, |
|
"step": 25625 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.784057971014493e-05, |
|
"loss": 0.5782, |
|
"step": 25650 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.765942028985507e-05, |
|
"loss": 0.5582, |
|
"step": 25675 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.747826086956523e-05, |
|
"loss": 0.5829, |
|
"step": 25700 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.729710144927536e-05, |
|
"loss": 0.4834, |
|
"step": 25725 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.711594202898551e-05, |
|
"loss": 0.5246, |
|
"step": 25750 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.693478260869566e-05, |
|
"loss": 0.5443, |
|
"step": 25775 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.67536231884058e-05, |
|
"loss": 0.5, |
|
"step": 25800 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.657246376811595e-05, |
|
"loss": 0.5199, |
|
"step": 25825 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.639130434782608e-05, |
|
"loss": 0.4944, |
|
"step": 25850 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.621014492753623e-05, |
|
"loss": 0.5081, |
|
"step": 25875 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.602898550724638e-05, |
|
"loss": 0.5663, |
|
"step": 25900 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.584782608695653e-05, |
|
"loss": 0.4847, |
|
"step": 25925 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.566666666666667e-05, |
|
"loss": 0.486, |
|
"step": 25950 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.54855072463768e-05, |
|
"loss": 0.5484, |
|
"step": 25975 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.530434782608697e-05, |
|
"loss": 0.4356, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"eval_loss": 0.5187134742736816, |
|
"eval_runtime": 6155.9894, |
|
"eval_samples_per_second": 0.813, |
|
"eval_steps_per_second": 0.203, |
|
"eval_wer": 34.49298749377061, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.51231884057971e-05, |
|
"loss": 0.4864, |
|
"step": 26025 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.494202898550725e-05, |
|
"loss": 0.5086, |
|
"step": 26050 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.476086956521738e-05, |
|
"loss": 0.5245, |
|
"step": 26075 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.457971014492754e-05, |
|
"loss": 0.4574, |
|
"step": 26100 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.439855072463769e-05, |
|
"loss": 0.5071, |
|
"step": 26125 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.421739130434782e-05, |
|
"loss": 0.5297, |
|
"step": 26150 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.403623188405797e-05, |
|
"loss": 0.5426, |
|
"step": 26175 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.385507246376812e-05, |
|
"loss": 0.4716, |
|
"step": 26200 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.367391304347826e-05, |
|
"loss": 0.4649, |
|
"step": 26225 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.34927536231884e-05, |
|
"loss": 0.4876, |
|
"step": 26250 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.331159420289855e-05, |
|
"loss": 0.4955, |
|
"step": 26275 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.31304347826087e-05, |
|
"loss": 0.4935, |
|
"step": 26300 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.294927536231884e-05, |
|
"loss": 0.5393, |
|
"step": 26325 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.276811594202899e-05, |
|
"loss": 0.5106, |
|
"step": 26350 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.258695652173913e-05, |
|
"loss": 0.4791, |
|
"step": 26375 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.240579710144928e-05, |
|
"loss": 0.4947, |
|
"step": 26400 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.222463768115942e-05, |
|
"loss": 0.5272, |
|
"step": 26425 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.204347826086958e-05, |
|
"loss": 0.5317, |
|
"step": 26450 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.186231884057971e-05, |
|
"loss": 0.541, |
|
"step": 26475 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.168115942028986e-05, |
|
"loss": 0.4706, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.15e-05, |
|
"loss": 0.4843, |
|
"step": 26525 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.131884057971015e-05, |
|
"loss": 0.55, |
|
"step": 26550 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.113768115942028e-05, |
|
"loss": 0.5025, |
|
"step": 26575 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.095652173913044e-05, |
|
"loss": 0.5481, |
|
"step": 26600 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.077536231884058e-05, |
|
"loss": 0.5372, |
|
"step": 26625 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.0594202898550727e-05, |
|
"loss": 0.4827, |
|
"step": 26650 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.041304347826087e-05, |
|
"loss": 0.5627, |
|
"step": 26675 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.023913043478261e-05, |
|
"loss": 0.6139, |
|
"step": 26700 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 6.0057971014492755e-05, |
|
"loss": 0.486, |
|
"step": 26725 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 5.9876811594202895e-05, |
|
"loss": 0.4263, |
|
"step": 26750 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 5.969565217391305e-05, |
|
"loss": 0.5085, |
|
"step": 26775 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 5.951449275362319e-05, |
|
"loss": 0.5375, |
|
"step": 26800 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 5.933333333333334e-05, |
|
"loss": 0.46, |
|
"step": 26825 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 5.915217391304348e-05, |
|
"loss": 0.4551, |
|
"step": 26850 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 5.8971014492753624e-05, |
|
"loss": 0.4416, |
|
"step": 26875 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 5.8789855072463765e-05, |
|
"loss": 0.4306, |
|
"step": 26900 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 5.860869565217392e-05, |
|
"loss": 0.4464, |
|
"step": 26925 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.842753623188406e-05, |
|
"loss": 0.5108, |
|
"step": 26950 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.82536231884058e-05, |
|
"loss": 0.5432, |
|
"step": 26975 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.807246376811595e-05, |
|
"loss": 0.5111, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"eval_loss": 0.5034857392311096, |
|
"eval_runtime": 6134.0304, |
|
"eval_samples_per_second": 0.816, |
|
"eval_steps_per_second": 0.204, |
|
"eval_wer": 33.422720045563494, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.789130434782609e-05, |
|
"loss": 0.4599, |
|
"step": 27025 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.7710144927536235e-05, |
|
"loss": 0.4427, |
|
"step": 27050 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.7528985507246375e-05, |
|
"loss": 0.5067, |
|
"step": 27075 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.734782608695652e-05, |
|
"loss": 0.4341, |
|
"step": 27100 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.716666666666666e-05, |
|
"loss": 0.4668, |
|
"step": 27125 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.6985507246376817e-05, |
|
"loss": 0.4743, |
|
"step": 27150 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.680434782608696e-05, |
|
"loss": 0.488, |
|
"step": 27175 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.6623188405797104e-05, |
|
"loss": 0.4982, |
|
"step": 27200 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.6442028985507245e-05, |
|
"loss": 0.5173, |
|
"step": 27225 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.626086956521739e-05, |
|
"loss": 0.5259, |
|
"step": 27250 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.607971014492753e-05, |
|
"loss": 0.4459, |
|
"step": 27275 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.589855072463768e-05, |
|
"loss": 0.4874, |
|
"step": 27300 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.571739130434783e-05, |
|
"loss": 0.4779, |
|
"step": 27325 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.5536231884057974e-05, |
|
"loss": 0.4924, |
|
"step": 27350 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.535507246376812e-05, |
|
"loss": 0.4998, |
|
"step": 27375 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.517391304347826e-05, |
|
"loss": 0.4812, |
|
"step": 27400 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.499275362318841e-05, |
|
"loss": 0.5353, |
|
"step": 27425 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.481159420289855e-05, |
|
"loss": 0.4757, |
|
"step": 27450 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.46304347826087e-05, |
|
"loss": 0.455, |
|
"step": 27475 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.4449275362318843e-05, |
|
"loss": 0.4996, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.426811594202899e-05, |
|
"loss": 0.4718, |
|
"step": 27525 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.408695652173913e-05, |
|
"loss": 0.4887, |
|
"step": 27550 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.390579710144928e-05, |
|
"loss": 0.5006, |
|
"step": 27575 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.372463768115942e-05, |
|
"loss": 0.5329, |
|
"step": 27600 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.3543478260869566e-05, |
|
"loss": 0.4868, |
|
"step": 27625 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.336231884057971e-05, |
|
"loss": 0.487, |
|
"step": 27650 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.318115942028986e-05, |
|
"loss": 0.5835, |
|
"step": 27675 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.3e-05, |
|
"loss": 0.4904, |
|
"step": 27700 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.281884057971015e-05, |
|
"loss": 0.5261, |
|
"step": 27725 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.263768115942029e-05, |
|
"loss": 0.4581, |
|
"step": 27750 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.2456521739130436e-05, |
|
"loss": 0.4993, |
|
"step": 27775 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.2275362318840576e-05, |
|
"loss": 0.5257, |
|
"step": 27800 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.209420289855073e-05, |
|
"loss": 0.4443, |
|
"step": 27825 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.191304347826087e-05, |
|
"loss": 0.5031, |
|
"step": 27850 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.173188405797102e-05, |
|
"loss": 0.475, |
|
"step": 27875 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.155072463768116e-05, |
|
"loss": 0.52, |
|
"step": 27900 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.1369565217391305e-05, |
|
"loss": 0.5038, |
|
"step": 27925 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.1188405797101446e-05, |
|
"loss": 0.4803, |
|
"step": 27950 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.100724637681159e-05, |
|
"loss": 0.493, |
|
"step": 27975 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.082608695652174e-05, |
|
"loss": 0.5613, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"eval_loss": 0.4912257194519043, |
|
"eval_runtime": 6183.2879, |
|
"eval_samples_per_second": 0.809, |
|
"eval_steps_per_second": 0.202, |
|
"eval_wer": 33.09523244500344, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.064492753623189e-05, |
|
"loss": 0.4801, |
|
"step": 28025 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.046376811594203e-05, |
|
"loss": 0.4915, |
|
"step": 28050 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.0282608695652175e-05, |
|
"loss": 0.4282, |
|
"step": 28075 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.0101449275362315e-05, |
|
"loss": 0.4381, |
|
"step": 28100 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.992028985507246e-05, |
|
"loss": 0.4421, |
|
"step": 28125 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.9739130434782617e-05, |
|
"loss": 0.4561, |
|
"step": 28150 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.955797101449276e-05, |
|
"loss": 0.5123, |
|
"step": 28175 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.9376811594202904e-05, |
|
"loss": 0.5428, |
|
"step": 28200 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.9195652173913045e-05, |
|
"loss": 0.4695, |
|
"step": 28225 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.901449275362319e-05, |
|
"loss": 0.4128, |
|
"step": 28250 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.883333333333333e-05, |
|
"loss": 0.4851, |
|
"step": 28275 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.865217391304348e-05, |
|
"loss": 0.4081, |
|
"step": 28300 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.847101449275363e-05, |
|
"loss": 0.4893, |
|
"step": 28325 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.8289855072463774e-05, |
|
"loss": 0.4521, |
|
"step": 28350 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.8108695652173914e-05, |
|
"loss": 0.5489, |
|
"step": 28375 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.792753623188406e-05, |
|
"loss": 0.5304, |
|
"step": 28400 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.77463768115942e-05, |
|
"loss": 0.4512, |
|
"step": 28425 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.756521739130435e-05, |
|
"loss": 0.4725, |
|
"step": 28450 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.738405797101449e-05, |
|
"loss": 0.5036, |
|
"step": 28475 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.7202898550724643e-05, |
|
"loss": 0.4832, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.7021739130434784e-05, |
|
"loss": 0.534, |
|
"step": 28525 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.684057971014493e-05, |
|
"loss": 0.4533, |
|
"step": 28550 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.665942028985507e-05, |
|
"loss": 0.5385, |
|
"step": 28575 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.647826086956522e-05, |
|
"loss": 0.4935, |
|
"step": 28600 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.629710144927536e-05, |
|
"loss": 0.443, |
|
"step": 28625 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.6115942028985506e-05, |
|
"loss": 0.4324, |
|
"step": 28650 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.5934782608695654e-05, |
|
"loss": 0.4784, |
|
"step": 28675 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.57536231884058e-05, |
|
"loss": 0.5117, |
|
"step": 28700 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.557246376811594e-05, |
|
"loss": 0.4398, |
|
"step": 28725 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.539130434782609e-05, |
|
"loss": 0.4913, |
|
"step": 28750 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.521014492753623e-05, |
|
"loss": 0.4358, |
|
"step": 28775 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.5028985507246376e-05, |
|
"loss": 0.4604, |
|
"step": 28800 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.4847826086956516e-05, |
|
"loss": 0.4732, |
|
"step": 28825 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.466666666666667e-05, |
|
"loss": 0.4656, |
|
"step": 28850 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.448550724637681e-05, |
|
"loss": 0.4768, |
|
"step": 28875 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.430434782608696e-05, |
|
"loss": 0.4306, |
|
"step": 28900 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.41231884057971e-05, |
|
"loss": 0.4821, |
|
"step": 28925 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.3942028985507246e-05, |
|
"loss": 0.538, |
|
"step": 28950 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.376086956521739e-05, |
|
"loss": 0.4845, |
|
"step": 28975 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.357971014492754e-05, |
|
"loss": 0.4165, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_loss": 0.48251873254776, |
|
"eval_runtime": 6175.4519, |
|
"eval_samples_per_second": 0.81, |
|
"eval_steps_per_second": 0.203, |
|
"eval_wer": 32.015472602577184, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.339855072463769e-05, |
|
"loss": 0.48, |
|
"step": 29025 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.321739130434783e-05, |
|
"loss": 0.5295, |
|
"step": 29050 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.3036231884057975e-05, |
|
"loss": 0.4944, |
|
"step": 29075 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.2855072463768115e-05, |
|
"loss": 0.4394, |
|
"step": 29100 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.267391304347826e-05, |
|
"loss": 0.5057, |
|
"step": 29125 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.24927536231884e-05, |
|
"loss": 0.5016, |
|
"step": 29150 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.231159420289856e-05, |
|
"loss": 0.3946, |
|
"step": 29175 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.21304347826087e-05, |
|
"loss": 0.5014, |
|
"step": 29200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.1949275362318845e-05, |
|
"loss": 0.5253, |
|
"step": 29225 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.1768115942028985e-05, |
|
"loss": 0.4362, |
|
"step": 29250 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.158695652173913e-05, |
|
"loss": 0.5088, |
|
"step": 29275 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.140579710144927e-05, |
|
"loss": 0.4703, |
|
"step": 29300 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.122463768115942e-05, |
|
"loss": 0.503, |
|
"step": 29325 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.104347826086957e-05, |
|
"loss": 0.4592, |
|
"step": 29350 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.0862318840579714e-05, |
|
"loss": 0.4553, |
|
"step": 29375 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.0681159420289855e-05, |
|
"loss": 0.5688, |
|
"step": 29400 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.05e-05, |
|
"loss": 0.4286, |
|
"step": 29425 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.031884057971014e-05, |
|
"loss": 0.5024, |
|
"step": 29450 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.013768115942029e-05, |
|
"loss": 0.5156, |
|
"step": 29475 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.995652173913043e-05, |
|
"loss": 0.4684, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.9775362318840584e-05, |
|
"loss": 0.4606, |
|
"step": 29525 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.9594202898550724e-05, |
|
"loss": 0.4924, |
|
"step": 29550 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.941304347826087e-05, |
|
"loss": 0.4926, |
|
"step": 29575 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.923188405797101e-05, |
|
"loss": 0.4257, |
|
"step": 29600 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.905072463768116e-05, |
|
"loss": 0.5036, |
|
"step": 29625 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.88695652173913e-05, |
|
"loss": 0.5142, |
|
"step": 29650 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.8688405797101454e-05, |
|
"loss": 0.5144, |
|
"step": 29675 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.8507246376811594e-05, |
|
"loss": 0.4234, |
|
"step": 29700 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.832608695652174e-05, |
|
"loss": 0.4776, |
|
"step": 29725 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.814492753623189e-05, |
|
"loss": 0.574, |
|
"step": 29750 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.796376811594203e-05, |
|
"loss": 0.451, |
|
"step": 29775 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.7782608695652176e-05, |
|
"loss": 0.478, |
|
"step": 29800 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.7601449275362316e-05, |
|
"loss": 0.4856, |
|
"step": 29825 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.742028985507247e-05, |
|
"loss": 0.5041, |
|
"step": 29850 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.723913043478261e-05, |
|
"loss": 0.4875, |
|
"step": 29875 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.705797101449276e-05, |
|
"loss": 0.4526, |
|
"step": 29900 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.68768115942029e-05, |
|
"loss": 0.4811, |
|
"step": 29925 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.6695652173913046e-05, |
|
"loss": 0.4831, |
|
"step": 29950 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.6514492753623186e-05, |
|
"loss": 0.4735, |
|
"step": 29975 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.633333333333333e-05, |
|
"loss": 0.4736, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_loss": 0.47158339619636536, |
|
"eval_runtime": 6176.7504, |
|
"eval_samples_per_second": 0.81, |
|
"eval_steps_per_second": 0.203, |
|
"eval_wer": 32.09141175633024, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.615217391304348e-05, |
|
"loss": 0.5359, |
|
"step": 30025 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.597101449275363e-05, |
|
"loss": 0.4393, |
|
"step": 30050 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.578985507246377e-05, |
|
"loss": 0.527, |
|
"step": 30075 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.5608695652173915e-05, |
|
"loss": 0.4477, |
|
"step": 30100 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.5427536231884056e-05, |
|
"loss": 0.4318, |
|
"step": 30125 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.52463768115942e-05, |
|
"loss": 0.4479, |
|
"step": 30150 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.5065217391304343e-05, |
|
"loss": 0.543, |
|
"step": 30175 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.48840579710145e-05, |
|
"loss": 0.421, |
|
"step": 30200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.470289855072464e-05, |
|
"loss": 0.4694, |
|
"step": 30225 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.4521739130434785e-05, |
|
"loss": 0.4164, |
|
"step": 30250 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.4340579710144925e-05, |
|
"loss": 0.459, |
|
"step": 30275 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.415942028985507e-05, |
|
"loss": 0.454, |
|
"step": 30300 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.397826086956521e-05, |
|
"loss": 0.5195, |
|
"step": 30325 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.379710144927537e-05, |
|
"loss": 0.445, |
|
"step": 30350 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.361594202898551e-05, |
|
"loss": 0.5167, |
|
"step": 30375 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.3434782608695655e-05, |
|
"loss": 0.5422, |
|
"step": 30400 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.3253623188405795e-05, |
|
"loss": 0.5054, |
|
"step": 30425 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.307246376811594e-05, |
|
"loss": 0.4505, |
|
"step": 30450 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.289130434782608e-05, |
|
"loss": 0.3841, |
|
"step": 30475 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.271014492753623e-05, |
|
"loss": 0.4823, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.2528985507246384e-05, |
|
"loss": 0.4393, |
|
"step": 30525 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.2347826086956524e-05, |
|
"loss": 0.3793, |
|
"step": 30550 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.216666666666667e-05, |
|
"loss": 0.4372, |
|
"step": 30575 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.198550724637681e-05, |
|
"loss": 0.4921, |
|
"step": 30600 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.180434782608696e-05, |
|
"loss": 0.4891, |
|
"step": 30625 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.16231884057971e-05, |
|
"loss": 0.4375, |
|
"step": 30650 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.144202898550725e-05, |
|
"loss": 0.4652, |
|
"step": 30675 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.1260869565217394e-05, |
|
"loss": 0.4635, |
|
"step": 30700 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.1079710144927534e-05, |
|
"loss": 0.4937, |
|
"step": 30725 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.089855072463768e-05, |
|
"loss": 0.4578, |
|
"step": 30750 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.071739130434783e-05, |
|
"loss": 0.4976, |
|
"step": 30775 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.053623188405797e-05, |
|
"loss": 0.4287, |
|
"step": 30800 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.0355072463768116e-05, |
|
"loss": 0.4332, |
|
"step": 30825 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.017391304347826e-05, |
|
"loss": 0.4527, |
|
"step": 30850 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 2.9992753623188404e-05, |
|
"loss": 0.5016, |
|
"step": 30875 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 2.9811594202898555e-05, |
|
"loss": 0.425, |
|
"step": 30900 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.96304347826087e-05, |
|
"loss": 0.419, |
|
"step": 30925 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.9449275362318842e-05, |
|
"loss": 0.4652, |
|
"step": 30950 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.9268115942028986e-05, |
|
"loss": 0.4079, |
|
"step": 30975 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.9086956521739133e-05, |
|
"loss": 0.4213, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_loss": 0.4618246555328369, |
|
"eval_runtime": 6209.3649, |
|
"eval_samples_per_second": 0.806, |
|
"eval_steps_per_second": 0.202, |
|
"eval_wer": 31.602553454044944, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.8905797101449277e-05, |
|
"loss": 0.4598, |
|
"step": 31025 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.872463768115942e-05, |
|
"loss": 0.4848, |
|
"step": 31050 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.8543478260869568e-05, |
|
"loss": 0.4737, |
|
"step": 31075 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.8362318840579712e-05, |
|
"loss": 0.41, |
|
"step": 31100 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.8181159420289856e-05, |
|
"loss": 0.5066, |
|
"step": 31125 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.8e-05, |
|
"loss": 0.4915, |
|
"step": 31150 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.7818840579710147e-05, |
|
"loss": 0.4951, |
|
"step": 31175 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.763768115942029e-05, |
|
"loss": 0.4934, |
|
"step": 31200 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.7456521739130434e-05, |
|
"loss": 0.4841, |
|
"step": 31225 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.727536231884058e-05, |
|
"loss": 0.4512, |
|
"step": 31250 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.7094202898550725e-05, |
|
"loss": 0.4406, |
|
"step": 31275 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.691304347826087e-05, |
|
"loss": 0.4719, |
|
"step": 31300 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.6731884057971016e-05, |
|
"loss": 0.4947, |
|
"step": 31325 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.655072463768116e-05, |
|
"loss": 0.4436, |
|
"step": 31350 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.6369565217391304e-05, |
|
"loss": 0.4927, |
|
"step": 31375 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.6188405797101448e-05, |
|
"loss": 0.44, |
|
"step": 31400 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.6007246376811595e-05, |
|
"loss": 0.4541, |
|
"step": 31425 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.582608695652174e-05, |
|
"loss": 0.4889, |
|
"step": 31450 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.5644927536231883e-05, |
|
"loss": 0.4327, |
|
"step": 31475 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.546376811594203e-05, |
|
"loss": 0.4534, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.5282608695652174e-05, |
|
"loss": 0.4646, |
|
"step": 31525 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.5101449275362318e-05, |
|
"loss": 0.4756, |
|
"step": 31550 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.492028985507246e-05, |
|
"loss": 0.4588, |
|
"step": 31575 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.473913043478261e-05, |
|
"loss": 0.4061, |
|
"step": 31600 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.4557971014492752e-05, |
|
"loss": 0.4451, |
|
"step": 31625 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.4376811594202896e-05, |
|
"loss": 0.4569, |
|
"step": 31650 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.4195652173913047e-05, |
|
"loss": 0.47, |
|
"step": 31675 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.401449275362319e-05, |
|
"loss": 0.4827, |
|
"step": 31700 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.3833333333333334e-05, |
|
"loss": 0.4325, |
|
"step": 31725 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.365217391304348e-05, |
|
"loss": 0.5046, |
|
"step": 31750 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.3471014492753625e-05, |
|
"loss": 0.4215, |
|
"step": 31775 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.328985507246377e-05, |
|
"loss": 0.4677, |
|
"step": 31800 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.3108695652173913e-05, |
|
"loss": 0.5024, |
|
"step": 31825 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.292753623188406e-05, |
|
"loss": 0.4391, |
|
"step": 31850 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.2746376811594204e-05, |
|
"loss": 0.4453, |
|
"step": 31875 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.2565217391304348e-05, |
|
"loss": 0.4534, |
|
"step": 31900 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.2384057971014495e-05, |
|
"loss": 0.4199, |
|
"step": 31925 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.220289855072464e-05, |
|
"loss": 0.49, |
|
"step": 31950 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.2021739130434783e-05, |
|
"loss": 0.4466, |
|
"step": 31975 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.184057971014493e-05, |
|
"loss": 0.4242, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_loss": 0.45143240690231323, |
|
"eval_runtime": 6170.6869, |
|
"eval_samples_per_second": 0.811, |
|
"eval_steps_per_second": 0.203, |
|
"eval_wer": 30.375661501222144, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.1659420289855074e-05, |
|
"loss": 0.5117, |
|
"step": 32025 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.1478260869565218e-05, |
|
"loss": 0.4279, |
|
"step": 32050 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.129710144927536e-05, |
|
"loss": 0.3904, |
|
"step": 32075 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.111594202898551e-05, |
|
"loss": 0.4152, |
|
"step": 32100 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.0934782608695652e-05, |
|
"loss": 0.4275, |
|
"step": 32125 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.0753623188405796e-05, |
|
"loss": 0.4093, |
|
"step": 32150 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.0572463768115943e-05, |
|
"loss": 0.4287, |
|
"step": 32175 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.0391304347826087e-05, |
|
"loss": 0.4427, |
|
"step": 32200 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.021014492753623e-05, |
|
"loss": 0.4304, |
|
"step": 32225 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.0028985507246375e-05, |
|
"loss": 0.4881, |
|
"step": 32250 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.9847826086956522e-05, |
|
"loss": 0.4714, |
|
"step": 32275 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.9666666666666666e-05, |
|
"loss": 0.4183, |
|
"step": 32300 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.948550724637681e-05, |
|
"loss": 0.4294, |
|
"step": 32325 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.9304347826086957e-05, |
|
"loss": 0.4769, |
|
"step": 32350 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.91231884057971e-05, |
|
"loss": 0.451, |
|
"step": 32375 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.8942028985507244e-05, |
|
"loss": 0.3997, |
|
"step": 32400 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.876086956521739e-05, |
|
"loss": 0.3907, |
|
"step": 32425 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.8579710144927536e-05, |
|
"loss": 0.469, |
|
"step": 32450 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.8398550724637683e-05, |
|
"loss": 0.4591, |
|
"step": 32475 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.8217391304347827e-05, |
|
"loss": 0.4348, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.8036231884057974e-05, |
|
"loss": 0.4522, |
|
"step": 32525 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.7855072463768118e-05, |
|
"loss": 0.4456, |
|
"step": 32550 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.767391304347826e-05, |
|
"loss": 0.396, |
|
"step": 32575 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.749275362318841e-05, |
|
"loss": 0.3869, |
|
"step": 32600 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.7311594202898552e-05, |
|
"loss": 0.4212, |
|
"step": 32625 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.7130434782608696e-05, |
|
"loss": 0.3793, |
|
"step": 32650 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.6949275362318843e-05, |
|
"loss": 0.4553, |
|
"step": 32675 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.6768115942028987e-05, |
|
"loss": 0.4395, |
|
"step": 32700 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.658695652173913e-05, |
|
"loss": 0.4249, |
|
"step": 32725 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.6405797101449275e-05, |
|
"loss": 0.4313, |
|
"step": 32750 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.6224637681159422e-05, |
|
"loss": 0.4577, |
|
"step": 32775 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.6043478260869566e-05, |
|
"loss": 0.381, |
|
"step": 32800 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.586231884057971e-05, |
|
"loss": 0.458, |
|
"step": 32825 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.5681159420289857e-05, |
|
"loss": 0.4388, |
|
"step": 32850 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.55e-05, |
|
"loss": 0.4619, |
|
"step": 32875 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.5318840579710144e-05, |
|
"loss": 0.4418, |
|
"step": 32900 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.513768115942029e-05, |
|
"loss": 0.388, |
|
"step": 32925 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.4956521739130434e-05, |
|
"loss": 0.4835, |
|
"step": 32950 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.477536231884058e-05, |
|
"loss": 0.4566, |
|
"step": 32975 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.4594202898550725e-05, |
|
"loss": 0.3837, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"eval_loss": 0.44476595520973206, |
|
"eval_runtime": 6195.5396, |
|
"eval_samples_per_second": 0.808, |
|
"eval_steps_per_second": 0.202, |
|
"eval_wer": 30.311587840243003, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.4420289855072464e-05, |
|
"loss": 0.439, |
|
"step": 33025 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.4239130434782609e-05, |
|
"loss": 0.4526, |
|
"step": 33050 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.4057971014492753e-05, |
|
"loss": 0.3809, |
|
"step": 33075 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.3876811594202898e-05, |
|
"loss": 0.4424, |
|
"step": 33100 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.3695652173913042e-05, |
|
"loss": 0.4592, |
|
"step": 33125 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.3514492753623188e-05, |
|
"loss": 0.4471, |
|
"step": 33150 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.3333333333333335e-05, |
|
"loss": 0.4752, |
|
"step": 33175 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.3152173913043479e-05, |
|
"loss": 0.4864, |
|
"step": 33200 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.2971014492753624e-05, |
|
"loss": 0.415, |
|
"step": 33225 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.2789855072463768e-05, |
|
"loss": 0.5136, |
|
"step": 33250 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.2608695652173914e-05, |
|
"loss": 0.4896, |
|
"step": 33275 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.2427536231884059e-05, |
|
"loss": 0.4255, |
|
"step": 33300 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.2246376811594203e-05, |
|
"loss": 0.4414, |
|
"step": 33325 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.2065217391304348e-05, |
|
"loss": 0.4077, |
|
"step": 33350 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.1884057971014492e-05, |
|
"loss": 0.3616, |
|
"step": 33375 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.1702898550724638e-05, |
|
"loss": 0.406, |
|
"step": 33400 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.1521739130434783e-05, |
|
"loss": 0.4286, |
|
"step": 33425 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.1340579710144927e-05, |
|
"loss": 0.4421, |
|
"step": 33450 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.1159420289855073e-05, |
|
"loss": 0.3742, |
|
"step": 33475 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.0978260869565216e-05, |
|
"loss": 0.3674, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.0797101449275362e-05, |
|
"loss": 0.4166, |
|
"step": 33525 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.0615942028985507e-05, |
|
"loss": 0.4147, |
|
"step": 33550 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.0434782608695653e-05, |
|
"loss": 0.4894, |
|
"step": 33575 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.0253623188405798e-05, |
|
"loss": 0.4049, |
|
"step": 33600 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.0072463768115942e-05, |
|
"loss": 0.4257, |
|
"step": 33625 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 9.891304347826088e-06, |
|
"loss": 0.472, |
|
"step": 33650 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 9.710144927536233e-06, |
|
"loss": 0.404, |
|
"step": 33675 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 9.528985507246377e-06, |
|
"loss": 0.4576, |
|
"step": 33700 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 9.347826086956523e-06, |
|
"loss": 0.4128, |
|
"step": 33725 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 9.166666666666666e-06, |
|
"loss": 0.4063, |
|
"step": 33750 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.985507246376812e-06, |
|
"loss": 0.4406, |
|
"step": 33775 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.804347826086956e-06, |
|
"loss": 0.4676, |
|
"step": 33800 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.623188405797101e-06, |
|
"loss": 0.4796, |
|
"step": 33825 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.442028985507247e-06, |
|
"loss": 0.4821, |
|
"step": 33850 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.26086956521739e-06, |
|
"loss": 0.4106, |
|
"step": 33875 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.079710144927536e-06, |
|
"loss": 0.4944, |
|
"step": 33900 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 7.89855072463768e-06, |
|
"loss": 0.4475, |
|
"step": 33925 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 7.717391304347827e-06, |
|
"loss": 0.4984, |
|
"step": 33950 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 7.536231884057971e-06, |
|
"loss": 0.4352, |
|
"step": 33975 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 7.355072463768116e-06, |
|
"loss": 0.4321, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"eval_loss": 0.43773025274276733, |
|
"eval_runtime": 6174.6708, |
|
"eval_samples_per_second": 0.811, |
|
"eval_steps_per_second": 0.203, |
|
"eval_wer": 29.469137853295045, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 7.173913043478261e-06, |
|
"loss": 0.4081, |
|
"step": 34025 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 6.992753623188406e-06, |
|
"loss": 0.4442, |
|
"step": 34050 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 6.811594202898551e-06, |
|
"loss": 0.4163, |
|
"step": 34075 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 6.630434782608696e-06, |
|
"loss": 0.4479, |
|
"step": 34100 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 6.449275362318841e-06, |
|
"loss": 0.4042, |
|
"step": 34125 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 6.268115942028985e-06, |
|
"loss": 0.4084, |
|
"step": 34150 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 6.086956521739131e-06, |
|
"loss": 0.4172, |
|
"step": 34175 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 5.905797101449275e-06, |
|
"loss": 0.4458, |
|
"step": 34200 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 5.72463768115942e-06, |
|
"loss": 0.414, |
|
"step": 34225 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 5.543478260869565e-06, |
|
"loss": 0.4291, |
|
"step": 34250 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 5.36231884057971e-06, |
|
"loss": 0.4316, |
|
"step": 34275 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 5.181159420289856e-06, |
|
"loss": 0.4382, |
|
"step": 34300 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4041, |
|
"step": 34325 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.818840579710145e-06, |
|
"loss": 0.4503, |
|
"step": 34350 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.63768115942029e-06, |
|
"loss": 0.4374, |
|
"step": 34375 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.456521739130434e-06, |
|
"loss": 0.4563, |
|
"step": 34400 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.275362318840579e-06, |
|
"loss": 0.4047, |
|
"step": 34425 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.094202898550725e-06, |
|
"loss": 0.3997, |
|
"step": 34450 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 3.91304347826087e-06, |
|
"loss": 0.4044, |
|
"step": 34475 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 3.7318840579710147e-06, |
|
"loss": 0.3637, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 3.5507246376811594e-06, |
|
"loss": 0.4967, |
|
"step": 34525 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 3.3695652173913045e-06, |
|
"loss": 0.4352, |
|
"step": 34550 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 3.188405797101449e-06, |
|
"loss": 0.4373, |
|
"step": 34575 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 3.0072463768115946e-06, |
|
"loss": 0.4713, |
|
"step": 34600 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 2.8260869565217393e-06, |
|
"loss": 0.4667, |
|
"step": 34625 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 2.644927536231884e-06, |
|
"loss": 0.4344, |
|
"step": 34650 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 2.463768115942029e-06, |
|
"loss": 0.4458, |
|
"step": 34675 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 2.282608695652174e-06, |
|
"loss": 0.3944, |
|
"step": 34700 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 2.101449275362319e-06, |
|
"loss": 0.4295, |
|
"step": 34725 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.920289855072464e-06, |
|
"loss": 0.4773, |
|
"step": 34750 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.7391304347826088e-06, |
|
"loss": 0.4438, |
|
"step": 34775 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.5579710144927536e-06, |
|
"loss": 0.3903, |
|
"step": 34800 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.3768115942028987e-06, |
|
"loss": 0.4185, |
|
"step": 34825 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.1956521739130434e-06, |
|
"loss": 0.4553, |
|
"step": 34850 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.0144927536231885e-06, |
|
"loss": 0.3928, |
|
"step": 34875 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.333333333333334e-07, |
|
"loss": 0.4137, |
|
"step": 34900 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 6.521739130434783e-07, |
|
"loss": 0.4452, |
|
"step": 34925 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.710144927536232e-07, |
|
"loss": 0.4904, |
|
"step": 34950 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 2.898550724637681e-07, |
|
"loss": 0.4443, |
|
"step": 34975 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.0869565217391305e-07, |
|
"loss": 0.4268, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"eval_loss": 0.4343973398208618, |
|
"eval_runtime": 6168.1717, |
|
"eval_samples_per_second": 0.811, |
|
"eval_steps_per_second": 0.203, |
|
"eval_wer": 29.205723913714138, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"step": 35001, |
|
"total_flos": 4.7538258866601984e+20, |
|
"train_loss": 1.5390448555129346e-05, |
|
"train_runtime": 6.8399, |
|
"train_samples_per_second": 40936.549, |
|
"train_steps_per_second": 5117.069 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 35000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 1000, |
|
"total_flos": 4.7538258866601984e+20, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|