DewiBrynJones's picture
End of training
7178cd0 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.833180568285976,
"eval_steps": 1000,
"global_step": 4000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.011457378551787351,
"grad_norm": 3.879394769668579,
"learning_rate": 5.000000000000001e-07,
"loss": 0.5663,
"step": 25
},
{
"epoch": 0.022914757103574702,
"grad_norm": 4.039769649505615,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.5106,
"step": 50
},
{
"epoch": 0.034372135655362054,
"grad_norm": 4.186612129211426,
"learning_rate": 1.5e-06,
"loss": 0.4423,
"step": 75
},
{
"epoch": 0.045829514207149404,
"grad_norm": 2.756096839904785,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.3852,
"step": 100
},
{
"epoch": 0.05728689275893675,
"grad_norm": 2.9499497413635254,
"learning_rate": 2.5e-06,
"loss": 0.3232,
"step": 125
},
{
"epoch": 0.06874427131072411,
"grad_norm": 2.5358545780181885,
"learning_rate": 3e-06,
"loss": 0.3331,
"step": 150
},
{
"epoch": 0.08020164986251145,
"grad_norm": 2.2754788398742676,
"learning_rate": 3.5e-06,
"loss": 0.3308,
"step": 175
},
{
"epoch": 0.09165902841429881,
"grad_norm": 2.565856695175171,
"learning_rate": 4.000000000000001e-06,
"loss": 0.3264,
"step": 200
},
{
"epoch": 0.10311640696608616,
"grad_norm": 2.5726332664489746,
"learning_rate": 4.5e-06,
"loss": 0.3006,
"step": 225
},
{
"epoch": 0.1145737855178735,
"grad_norm": 2.47786021232605,
"learning_rate": 5e-06,
"loss": 0.2915,
"step": 250
},
{
"epoch": 0.12603116406966086,
"grad_norm": 2.316861391067505,
"learning_rate": 5.500000000000001e-06,
"loss": 0.293,
"step": 275
},
{
"epoch": 0.13748854262144822,
"grad_norm": 2.3276755809783936,
"learning_rate": 6e-06,
"loss": 0.2825,
"step": 300
},
{
"epoch": 0.14894592117323557,
"grad_norm": 2.4386003017425537,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.3054,
"step": 325
},
{
"epoch": 0.1604032997250229,
"grad_norm": 2.051398277282715,
"learning_rate": 7e-06,
"loss": 0.2833,
"step": 350
},
{
"epoch": 0.17186067827681026,
"grad_norm": 2.4201934337615967,
"learning_rate": 7.500000000000001e-06,
"loss": 0.2796,
"step": 375
},
{
"epoch": 0.18331805682859761,
"grad_norm": 1.9952783584594727,
"learning_rate": 8.000000000000001e-06,
"loss": 0.275,
"step": 400
},
{
"epoch": 0.19477543538038497,
"grad_norm": 2.1200835704803467,
"learning_rate": 8.5e-06,
"loss": 0.2898,
"step": 425
},
{
"epoch": 0.20623281393217233,
"grad_norm": 2.4650216102600098,
"learning_rate": 9e-06,
"loss": 0.2645,
"step": 450
},
{
"epoch": 0.21769019248395968,
"grad_norm": 1.9929370880126953,
"learning_rate": 9.5e-06,
"loss": 0.278,
"step": 475
},
{
"epoch": 0.229147571035747,
"grad_norm": 2.1805336475372314,
"learning_rate": 1e-05,
"loss": 0.2702,
"step": 500
},
{
"epoch": 0.24060494958753437,
"grad_norm": 2.0869078636169434,
"learning_rate": 9.92857142857143e-06,
"loss": 0.2454,
"step": 525
},
{
"epoch": 0.2520623281393217,
"grad_norm": 1.7062550783157349,
"learning_rate": 9.857142857142859e-06,
"loss": 0.2534,
"step": 550
},
{
"epoch": 0.2635197066911091,
"grad_norm": 1.6921783685684204,
"learning_rate": 9.785714285714286e-06,
"loss": 0.2398,
"step": 575
},
{
"epoch": 0.27497708524289644,
"grad_norm": 2.111154794692993,
"learning_rate": 9.714285714285715e-06,
"loss": 0.2582,
"step": 600
},
{
"epoch": 0.2864344637946838,
"grad_norm": 2.4726927280426025,
"learning_rate": 9.642857142857144e-06,
"loss": 0.2365,
"step": 625
},
{
"epoch": 0.29789184234647115,
"grad_norm": 1.8263903856277466,
"learning_rate": 9.571428571428573e-06,
"loss": 0.2363,
"step": 650
},
{
"epoch": 0.3093492208982585,
"grad_norm": 2.1429872512817383,
"learning_rate": 9.5e-06,
"loss": 0.2355,
"step": 675
},
{
"epoch": 0.3208065994500458,
"grad_norm": 1.7079923152923584,
"learning_rate": 9.42857142857143e-06,
"loss": 0.2291,
"step": 700
},
{
"epoch": 0.33226397800183316,
"grad_norm": 2.1364455223083496,
"learning_rate": 9.357142857142859e-06,
"loss": 0.2284,
"step": 725
},
{
"epoch": 0.3437213565536205,
"grad_norm": 2.03523588180542,
"learning_rate": 9.285714285714288e-06,
"loss": 0.2439,
"step": 750
},
{
"epoch": 0.3551787351054079,
"grad_norm": 1.883085012435913,
"learning_rate": 9.214285714285715e-06,
"loss": 0.2238,
"step": 775
},
{
"epoch": 0.36663611365719523,
"grad_norm": 2.125384569168091,
"learning_rate": 9.142857142857144e-06,
"loss": 0.2335,
"step": 800
},
{
"epoch": 0.3780934922089826,
"grad_norm": 2.0674521923065186,
"learning_rate": 9.071428571428573e-06,
"loss": 0.235,
"step": 825
},
{
"epoch": 0.38955087076076994,
"grad_norm": 1.971111536026001,
"learning_rate": 9e-06,
"loss": 0.2343,
"step": 850
},
{
"epoch": 0.4010082493125573,
"grad_norm": 1.8734443187713623,
"learning_rate": 8.92857142857143e-06,
"loss": 0.2122,
"step": 875
},
{
"epoch": 0.41246562786434465,
"grad_norm": 1.9618253707885742,
"learning_rate": 8.857142857142858e-06,
"loss": 0.2189,
"step": 900
},
{
"epoch": 0.423923006416132,
"grad_norm": 2.4546687602996826,
"learning_rate": 8.785714285714286e-06,
"loss": 0.2137,
"step": 925
},
{
"epoch": 0.43538038496791936,
"grad_norm": 1.8629581928253174,
"learning_rate": 8.714285714285715e-06,
"loss": 0.214,
"step": 950
},
{
"epoch": 0.44683776351970667,
"grad_norm": 1.8550409078598022,
"learning_rate": 8.642857142857144e-06,
"loss": 0.2124,
"step": 975
},
{
"epoch": 0.458295142071494,
"grad_norm": 1.9905967712402344,
"learning_rate": 8.571428571428571e-06,
"loss": 0.23,
"step": 1000
},
{
"epoch": 0.458295142071494,
"eval_loss": 0.2573724389076233,
"eval_runtime": 2144.4724,
"eval_samples_per_second": 2.509,
"eval_steps_per_second": 0.157,
"eval_wer": 0.19923257385979562,
"step": 1000
},
{
"epoch": 0.4697525206232814,
"grad_norm": 1.6646443605422974,
"learning_rate": 8.5e-06,
"loss": 0.2414,
"step": 1025
},
{
"epoch": 0.48120989917506873,
"grad_norm": 2.01544189453125,
"learning_rate": 8.428571428571429e-06,
"loss": 0.2209,
"step": 1050
},
{
"epoch": 0.4926672777268561,
"grad_norm": 2.151685953140259,
"learning_rate": 8.357142857142858e-06,
"loss": 0.2155,
"step": 1075
},
{
"epoch": 0.5041246562786434,
"grad_norm": 2.1384220123291016,
"learning_rate": 8.285714285714287e-06,
"loss": 0.212,
"step": 1100
},
{
"epoch": 0.5155820348304307,
"grad_norm": 1.722456932067871,
"learning_rate": 8.214285714285714e-06,
"loss": 0.1897,
"step": 1125
},
{
"epoch": 0.5270394133822182,
"grad_norm": 1.6334404945373535,
"learning_rate": 8.142857142857143e-06,
"loss": 0.1983,
"step": 1150
},
{
"epoch": 0.5384967919340055,
"grad_norm": 1.665479063987732,
"learning_rate": 8.071428571428572e-06,
"loss": 0.1906,
"step": 1175
},
{
"epoch": 0.5499541704857929,
"grad_norm": 1.7400776147842407,
"learning_rate": 8.000000000000001e-06,
"loss": 0.2024,
"step": 1200
},
{
"epoch": 0.5614115490375802,
"grad_norm": 1.9680901765823364,
"learning_rate": 7.928571428571429e-06,
"loss": 0.2043,
"step": 1225
},
{
"epoch": 0.5728689275893676,
"grad_norm": 1.9159817695617676,
"learning_rate": 7.857142857142858e-06,
"loss": 0.2203,
"step": 1250
},
{
"epoch": 0.5843263061411549,
"grad_norm": 2.0622291564941406,
"learning_rate": 7.785714285714287e-06,
"loss": 0.2139,
"step": 1275
},
{
"epoch": 0.5957836846929423,
"grad_norm": 1.821271538734436,
"learning_rate": 7.714285714285716e-06,
"loss": 0.2035,
"step": 1300
},
{
"epoch": 0.6072410632447296,
"grad_norm": 2.25801944732666,
"learning_rate": 7.642857142857143e-06,
"loss": 0.2112,
"step": 1325
},
{
"epoch": 0.618698441796517,
"grad_norm": 1.8023837804794312,
"learning_rate": 7.571428571428572e-06,
"loss": 0.1935,
"step": 1350
},
{
"epoch": 0.6301558203483043,
"grad_norm": 2.021291971206665,
"learning_rate": 7.500000000000001e-06,
"loss": 0.1928,
"step": 1375
},
{
"epoch": 0.6416131989000916,
"grad_norm": 1.789577841758728,
"learning_rate": 7.428571428571429e-06,
"loss": 0.1967,
"step": 1400
},
{
"epoch": 0.653070577451879,
"grad_norm": 1.4411484003067017,
"learning_rate": 7.357142857142858e-06,
"loss": 0.1806,
"step": 1425
},
{
"epoch": 0.6645279560036663,
"grad_norm": 1.5292807817459106,
"learning_rate": 7.285714285714286e-06,
"loss": 0.2042,
"step": 1450
},
{
"epoch": 0.6759853345554537,
"grad_norm": 1.9637993574142456,
"learning_rate": 7.2142857142857145e-06,
"loss": 0.1944,
"step": 1475
},
{
"epoch": 0.687442713107241,
"grad_norm": 1.4942073822021484,
"learning_rate": 7.1428571428571436e-06,
"loss": 0.1786,
"step": 1500
},
{
"epoch": 0.6989000916590284,
"grad_norm": 2.0111465454101562,
"learning_rate": 7.0714285714285726e-06,
"loss": 0.1955,
"step": 1525
},
{
"epoch": 0.7103574702108157,
"grad_norm": 1.8509060144424438,
"learning_rate": 7e-06,
"loss": 0.1878,
"step": 1550
},
{
"epoch": 0.7218148487626032,
"grad_norm": 1.9552288055419922,
"learning_rate": 6.928571428571429e-06,
"loss": 0.192,
"step": 1575
},
{
"epoch": 0.7332722273143905,
"grad_norm": 1.8190360069274902,
"learning_rate": 6.857142857142858e-06,
"loss": 0.1906,
"step": 1600
},
{
"epoch": 0.7447296058661779,
"grad_norm": 1.8309643268585205,
"learning_rate": 6.785714285714287e-06,
"loss": 0.1727,
"step": 1625
},
{
"epoch": 0.7561869844179652,
"grad_norm": 1.8337007761001587,
"learning_rate": 6.714285714285714e-06,
"loss": 0.1783,
"step": 1650
},
{
"epoch": 0.7676443629697525,
"grad_norm": 1.7660903930664062,
"learning_rate": 6.642857142857143e-06,
"loss": 0.1997,
"step": 1675
},
{
"epoch": 0.7791017415215399,
"grad_norm": 2.344595193862915,
"learning_rate": 6.571428571428572e-06,
"loss": 0.1944,
"step": 1700
},
{
"epoch": 0.7905591200733272,
"grad_norm": 1.631115436553955,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.181,
"step": 1725
},
{
"epoch": 0.8020164986251146,
"grad_norm": 1.8305103778839111,
"learning_rate": 6.4285714285714295e-06,
"loss": 0.1685,
"step": 1750
},
{
"epoch": 0.8134738771769019,
"grad_norm": 1.9758498668670654,
"learning_rate": 6.357142857142858e-06,
"loss": 0.1714,
"step": 1775
},
{
"epoch": 0.8249312557286893,
"grad_norm": 1.4304888248443604,
"learning_rate": 6.285714285714286e-06,
"loss": 0.1621,
"step": 1800
},
{
"epoch": 0.8363886342804766,
"grad_norm": 2.101116418838501,
"learning_rate": 6.214285714285715e-06,
"loss": 0.1754,
"step": 1825
},
{
"epoch": 0.847846012832264,
"grad_norm": 1.6567695140838623,
"learning_rate": 6.142857142857144e-06,
"loss": 0.1659,
"step": 1850
},
{
"epoch": 0.8593033913840513,
"grad_norm": 1.8299853801727295,
"learning_rate": 6.071428571428571e-06,
"loss": 0.1863,
"step": 1875
},
{
"epoch": 0.8707607699358387,
"grad_norm": 1.789523720741272,
"learning_rate": 6e-06,
"loss": 0.1756,
"step": 1900
},
{
"epoch": 0.882218148487626,
"grad_norm": 1.8699467182159424,
"learning_rate": 5.928571428571429e-06,
"loss": 0.1937,
"step": 1925
},
{
"epoch": 0.8936755270394133,
"grad_norm": 1.700272560119629,
"learning_rate": 5.857142857142858e-06,
"loss": 0.186,
"step": 1950
},
{
"epoch": 0.9051329055912007,
"grad_norm": 2.002411127090454,
"learning_rate": 5.785714285714286e-06,
"loss": 0.1635,
"step": 1975
},
{
"epoch": 0.916590284142988,
"grad_norm": 1.8900529146194458,
"learning_rate": 5.7142857142857145e-06,
"loss": 0.1775,
"step": 2000
},
{
"epoch": 0.916590284142988,
"eval_loss": 0.25274595618247986,
"eval_runtime": 2153.2081,
"eval_samples_per_second": 2.499,
"eval_steps_per_second": 0.157,
"eval_wer": 0.20147918406298462,
"step": 2000
},
{
"epoch": 0.9280476626947755,
"grad_norm": 2.0428643226623535,
"learning_rate": 5.6428571428571435e-06,
"loss": 0.1785,
"step": 2025
},
{
"epoch": 0.9395050412465628,
"grad_norm": 1.6772222518920898,
"learning_rate": 5.571428571428572e-06,
"loss": 0.1748,
"step": 2050
},
{
"epoch": 0.9509624197983502,
"grad_norm": 1.6048625707626343,
"learning_rate": 5.500000000000001e-06,
"loss": 0.1819,
"step": 2075
},
{
"epoch": 0.9624197983501375,
"grad_norm": 1.6015398502349854,
"learning_rate": 5.428571428571429e-06,
"loss": 0.1642,
"step": 2100
},
{
"epoch": 0.9738771769019249,
"grad_norm": 1.4918720722198486,
"learning_rate": 5.357142857142857e-06,
"loss": 0.1746,
"step": 2125
},
{
"epoch": 0.9853345554537122,
"grad_norm": 1.2924649715423584,
"learning_rate": 5.285714285714286e-06,
"loss": 0.171,
"step": 2150
},
{
"epoch": 0.9967919340054996,
"grad_norm": 2.293318748474121,
"learning_rate": 5.214285714285715e-06,
"loss": 0.1677,
"step": 2175
},
{
"epoch": 1.008249312557287,
"grad_norm": 1.5060619115829468,
"learning_rate": 5.142857142857142e-06,
"loss": 0.1174,
"step": 2200
},
{
"epoch": 1.0197066911090742,
"grad_norm": 1.4803202152252197,
"learning_rate": 5.071428571428571e-06,
"loss": 0.1145,
"step": 2225
},
{
"epoch": 1.0311640696608615,
"grad_norm": 1.4294520616531372,
"learning_rate": 5e-06,
"loss": 0.1055,
"step": 2250
},
{
"epoch": 1.042621448212649,
"grad_norm": 1.2599146366119385,
"learning_rate": 4.928571428571429e-06,
"loss": 0.0992,
"step": 2275
},
{
"epoch": 1.0540788267644363,
"grad_norm": 1.4009997844696045,
"learning_rate": 4.857142857142858e-06,
"loss": 0.0975,
"step": 2300
},
{
"epoch": 1.0655362053162236,
"grad_norm": 1.5712811946868896,
"learning_rate": 4.785714285714287e-06,
"loss": 0.1084,
"step": 2325
},
{
"epoch": 1.076993583868011,
"grad_norm": 1.3722585439682007,
"learning_rate": 4.714285714285715e-06,
"loss": 0.1118,
"step": 2350
},
{
"epoch": 1.0884509624197984,
"grad_norm": 1.5365544557571411,
"learning_rate": 4.642857142857144e-06,
"loss": 0.1082,
"step": 2375
},
{
"epoch": 1.0999083409715857,
"grad_norm": 1.2618945837020874,
"learning_rate": 4.571428571428572e-06,
"loss": 0.1173,
"step": 2400
},
{
"epoch": 1.111365719523373,
"grad_norm": 1.70241379737854,
"learning_rate": 4.5e-06,
"loss": 0.1015,
"step": 2425
},
{
"epoch": 1.1228230980751603,
"grad_norm": 1.8518885374069214,
"learning_rate": 4.428571428571429e-06,
"loss": 0.1122,
"step": 2450
},
{
"epoch": 1.1342804766269476,
"grad_norm": 1.4387401342391968,
"learning_rate": 4.357142857142857e-06,
"loss": 0.1139,
"step": 2475
},
{
"epoch": 1.1457378551787352,
"grad_norm": 1.3494459390640259,
"learning_rate": 4.2857142857142855e-06,
"loss": 0.106,
"step": 2500
},
{
"epoch": 1.1571952337305225,
"grad_norm": 1.4629216194152832,
"learning_rate": 4.2142857142857145e-06,
"loss": 0.1073,
"step": 2525
},
{
"epoch": 1.1686526122823098,
"grad_norm": 1.5630906820297241,
"learning_rate": 4.1428571428571435e-06,
"loss": 0.0988,
"step": 2550
},
{
"epoch": 1.180109990834097,
"grad_norm": 1.3475199937820435,
"learning_rate": 4.071428571428572e-06,
"loss": 0.1094,
"step": 2575
},
{
"epoch": 1.1915673693858846,
"grad_norm": 1.1417912244796753,
"learning_rate": 4.000000000000001e-06,
"loss": 0.104,
"step": 2600
},
{
"epoch": 1.203024747937672,
"grad_norm": 1.4655263423919678,
"learning_rate": 3.928571428571429e-06,
"loss": 0.113,
"step": 2625
},
{
"epoch": 1.2144821264894592,
"grad_norm": 1.585945963859558,
"learning_rate": 3.857142857142858e-06,
"loss": 0.1079,
"step": 2650
},
{
"epoch": 1.2259395050412465,
"grad_norm": 1.3465677499771118,
"learning_rate": 3.785714285714286e-06,
"loss": 0.1121,
"step": 2675
},
{
"epoch": 1.2373968835930338,
"grad_norm": 1.6796523332595825,
"learning_rate": 3.7142857142857146e-06,
"loss": 0.1181,
"step": 2700
},
{
"epoch": 1.2488542621448213,
"grad_norm": 1.2698748111724854,
"learning_rate": 3.642857142857143e-06,
"loss": 0.1105,
"step": 2725
},
{
"epoch": 1.2603116406966086,
"grad_norm": 1.820233941078186,
"learning_rate": 3.5714285714285718e-06,
"loss": 0.1118,
"step": 2750
},
{
"epoch": 1.271769019248396,
"grad_norm": 1.2463735342025757,
"learning_rate": 3.5e-06,
"loss": 0.1026,
"step": 2775
},
{
"epoch": 1.2832263978001834,
"grad_norm": 2.136754274368286,
"learning_rate": 3.428571428571429e-06,
"loss": 0.1173,
"step": 2800
},
{
"epoch": 1.2946837763519707,
"grad_norm": 1.8479666709899902,
"learning_rate": 3.357142857142857e-06,
"loss": 0.1104,
"step": 2825
},
{
"epoch": 1.306141154903758,
"grad_norm": 1.870710849761963,
"learning_rate": 3.285714285714286e-06,
"loss": 0.1151,
"step": 2850
},
{
"epoch": 1.3175985334555453,
"grad_norm": 1.6915826797485352,
"learning_rate": 3.2142857142857147e-06,
"loss": 0.1092,
"step": 2875
},
{
"epoch": 1.3290559120073326,
"grad_norm": 1.5454246997833252,
"learning_rate": 3.142857142857143e-06,
"loss": 0.1056,
"step": 2900
},
{
"epoch": 1.34051329055912,
"grad_norm": 1.4232581853866577,
"learning_rate": 3.071428571428572e-06,
"loss": 0.0981,
"step": 2925
},
{
"epoch": 1.3519706691109075,
"grad_norm": 1.5464751720428467,
"learning_rate": 3e-06,
"loss": 0.1051,
"step": 2950
},
{
"epoch": 1.3634280476626948,
"grad_norm": 1.617953896522522,
"learning_rate": 2.928571428571429e-06,
"loss": 0.1012,
"step": 2975
},
{
"epoch": 1.374885426214482,
"grad_norm": 1.522476315498352,
"learning_rate": 2.8571428571428573e-06,
"loss": 0.0978,
"step": 3000
},
{
"epoch": 1.374885426214482,
"eval_loss": 0.25593996047973633,
"eval_runtime": 2144.4647,
"eval_samples_per_second": 2.509,
"eval_steps_per_second": 0.157,
"eval_wer": 0.19509722056543002,
"step": 3000
},
{
"epoch": 1.3863428047662696,
"grad_norm": 1.7234809398651123,
"learning_rate": 2.785714285714286e-06,
"loss": 0.1026,
"step": 3025
},
{
"epoch": 1.3978001833180569,
"grad_norm": 1.527714490890503,
"learning_rate": 2.7142857142857144e-06,
"loss": 0.1024,
"step": 3050
},
{
"epoch": 1.4092575618698442,
"grad_norm": 1.8621338605880737,
"learning_rate": 2.642857142857143e-06,
"loss": 0.1087,
"step": 3075
},
{
"epoch": 1.4207149404216315,
"grad_norm": 1.4294075965881348,
"learning_rate": 2.571428571428571e-06,
"loss": 0.0975,
"step": 3100
},
{
"epoch": 1.4321723189734188,
"grad_norm": 1.9539209604263306,
"learning_rate": 2.5e-06,
"loss": 0.0971,
"step": 3125
},
{
"epoch": 1.4436296975252063,
"grad_norm": 1.8456083536148071,
"learning_rate": 2.428571428571429e-06,
"loss": 0.1057,
"step": 3150
},
{
"epoch": 1.4550870760769936,
"grad_norm": 1.2178508043289185,
"learning_rate": 2.3571428571428574e-06,
"loss": 0.0884,
"step": 3175
},
{
"epoch": 1.466544454628781,
"grad_norm": 1.3775935173034668,
"learning_rate": 2.285714285714286e-06,
"loss": 0.1056,
"step": 3200
},
{
"epoch": 1.4780018331805682,
"grad_norm": 1.5138427019119263,
"learning_rate": 2.2142857142857146e-06,
"loss": 0.1042,
"step": 3225
},
{
"epoch": 1.4894592117323557,
"grad_norm": 1.603317141532898,
"learning_rate": 2.1428571428571427e-06,
"loss": 0.0981,
"step": 3250
},
{
"epoch": 1.500916590284143,
"grad_norm": 1.2418969869613647,
"learning_rate": 2.0714285714285717e-06,
"loss": 0.0986,
"step": 3275
},
{
"epoch": 1.5123739688359303,
"grad_norm": 1.4893618822097778,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.1015,
"step": 3300
},
{
"epoch": 1.5238313473877176,
"grad_norm": 1.6855746507644653,
"learning_rate": 1.928571428571429e-06,
"loss": 0.1074,
"step": 3325
},
{
"epoch": 1.535288725939505,
"grad_norm": 1.8536219596862793,
"learning_rate": 1.8571428571428573e-06,
"loss": 0.1139,
"step": 3350
},
{
"epoch": 1.5467461044912922,
"grad_norm": 1.944806456565857,
"learning_rate": 1.7857142857142859e-06,
"loss": 0.1083,
"step": 3375
},
{
"epoch": 1.5582034830430798,
"grad_norm": 1.5871174335479736,
"learning_rate": 1.7142857142857145e-06,
"loss": 0.097,
"step": 3400
},
{
"epoch": 1.569660861594867,
"grad_norm": 1.466895580291748,
"learning_rate": 1.642857142857143e-06,
"loss": 0.0901,
"step": 3425
},
{
"epoch": 1.5811182401466546,
"grad_norm": 1.374851942062378,
"learning_rate": 1.5714285714285714e-06,
"loss": 0.0989,
"step": 3450
},
{
"epoch": 1.5925756186984419,
"grad_norm": 1.267625093460083,
"learning_rate": 1.5e-06,
"loss": 0.1102,
"step": 3475
},
{
"epoch": 1.6040329972502292,
"grad_norm": 1.3605536222457886,
"learning_rate": 1.4285714285714286e-06,
"loss": 0.0922,
"step": 3500
},
{
"epoch": 1.6154903758020165,
"grad_norm": 1.5604383945465088,
"learning_rate": 1.3571428571428572e-06,
"loss": 0.0935,
"step": 3525
},
{
"epoch": 1.6269477543538038,
"grad_norm": 1.552208423614502,
"learning_rate": 1.2857142857142856e-06,
"loss": 0.0976,
"step": 3550
},
{
"epoch": 1.638405132905591,
"grad_norm": 1.723597526550293,
"learning_rate": 1.2142857142857144e-06,
"loss": 0.0903,
"step": 3575
},
{
"epoch": 1.6498625114573784,
"grad_norm": 1.799728512763977,
"learning_rate": 1.142857142857143e-06,
"loss": 0.1024,
"step": 3600
},
{
"epoch": 1.661319890009166,
"grad_norm": 1.9864362478256226,
"learning_rate": 1.0714285714285714e-06,
"loss": 0.1041,
"step": 3625
},
{
"epoch": 1.6727772685609532,
"grad_norm": 1.837119460105896,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.1042,
"step": 3650
},
{
"epoch": 1.6842346471127407,
"grad_norm": 1.6304919719696045,
"learning_rate": 9.285714285714287e-07,
"loss": 0.0969,
"step": 3675
},
{
"epoch": 1.695692025664528,
"grad_norm": 1.7215403318405151,
"learning_rate": 8.571428571428572e-07,
"loss": 0.0958,
"step": 3700
},
{
"epoch": 1.7071494042163153,
"grad_norm": 1.7634841203689575,
"learning_rate": 7.857142857142857e-07,
"loss": 0.1047,
"step": 3725
},
{
"epoch": 1.7186067827681026,
"grad_norm": 1.5024609565734863,
"learning_rate": 7.142857142857143e-07,
"loss": 0.0857,
"step": 3750
},
{
"epoch": 1.73006416131989,
"grad_norm": 1.2845063209533691,
"learning_rate": 6.428571428571428e-07,
"loss": 0.0883,
"step": 3775
},
{
"epoch": 1.7415215398716772,
"grad_norm": 1.503865361213684,
"learning_rate": 5.714285714285715e-07,
"loss": 0.093,
"step": 3800
},
{
"epoch": 1.7529789184234648,
"grad_norm": 1.4570015668869019,
"learning_rate": 5.000000000000001e-07,
"loss": 0.0984,
"step": 3825
},
{
"epoch": 1.764436296975252,
"grad_norm": 1.2372715473175049,
"learning_rate": 4.285714285714286e-07,
"loss": 0.0959,
"step": 3850
},
{
"epoch": 1.7758936755270394,
"grad_norm": 1.4279190301895142,
"learning_rate": 3.5714285714285716e-07,
"loss": 0.0978,
"step": 3875
},
{
"epoch": 1.7873510540788269,
"grad_norm": 2.094740629196167,
"learning_rate": 2.8571428571428575e-07,
"loss": 0.0989,
"step": 3900
},
{
"epoch": 1.7988084326306142,
"grad_norm": 1.5156452655792236,
"learning_rate": 2.142857142857143e-07,
"loss": 0.0983,
"step": 3925
},
{
"epoch": 1.8102658111824015,
"grad_norm": 1.3175643682479858,
"learning_rate": 1.4285714285714287e-07,
"loss": 0.1031,
"step": 3950
},
{
"epoch": 1.8217231897341888,
"grad_norm": 1.2365864515304565,
"learning_rate": 7.142857142857144e-08,
"loss": 0.096,
"step": 3975
},
{
"epoch": 1.833180568285976,
"grad_norm": 1.4035123586654663,
"learning_rate": 0.0,
"loss": 0.0902,
"step": 4000
},
{
"epoch": 1.833180568285976,
"eval_loss": 0.25563082098960876,
"eval_runtime": 2173.621,
"eval_samples_per_second": 2.476,
"eval_steps_per_second": 0.155,
"eval_wer": 0.19340729253648256,
"step": 4000
},
{
"epoch": 1.833180568285976,
"step": 4000,
"total_flos": 4.348390132600013e+20,
"train_loss": 0.1730844228863716,
"train_runtime": 33932.8776,
"train_samples_per_second": 3.772,
"train_steps_per_second": 0.118
}
],
"logging_steps": 25,
"max_steps": 4000,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.348390132600013e+20,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}