whisper-small-mix-it / trainer_state.json
deepdml's picture
End of training
811caa9 verified
raw
history blame
34 kB
{
"best_metric": 10.587474512857398,
"best_model_checkpoint": "./whisper-small-mix-it/checkpoint-5000",
"epoch": 1.0,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005,
"grad_norm": 7.571747303009033,
"learning_rate": 4.800000000000001e-07,
"loss": 0.8106,
"step": 25
},
{
"epoch": 0.01,
"grad_norm": 4.4452080726623535,
"learning_rate": 9.800000000000001e-07,
"loss": 0.6948,
"step": 50
},
{
"epoch": 0.015,
"grad_norm": 3.8859105110168457,
"learning_rate": 1.48e-06,
"loss": 0.5709,
"step": 75
},
{
"epoch": 0.02,
"grad_norm": 2.8216567039489746,
"learning_rate": 1.98e-06,
"loss": 0.4134,
"step": 100
},
{
"epoch": 0.025,
"grad_norm": 2.741655111312866,
"learning_rate": 2.4800000000000004e-06,
"loss": 0.3693,
"step": 125
},
{
"epoch": 0.03,
"grad_norm": 2.7941854000091553,
"learning_rate": 2.9800000000000003e-06,
"loss": 0.3578,
"step": 150
},
{
"epoch": 0.035,
"grad_norm": 2.9997148513793945,
"learning_rate": 3.48e-06,
"loss": 0.3242,
"step": 175
},
{
"epoch": 0.04,
"grad_norm": 3.136533498764038,
"learning_rate": 3.980000000000001e-06,
"loss": 0.3224,
"step": 200
},
{
"epoch": 0.045,
"grad_norm": 2.4372665882110596,
"learning_rate": 4.48e-06,
"loss": 0.3051,
"step": 225
},
{
"epoch": 0.05,
"grad_norm": 2.798402786254883,
"learning_rate": 4.980000000000001e-06,
"loss": 0.3132,
"step": 250
},
{
"epoch": 0.055,
"grad_norm": 2.634129524230957,
"learning_rate": 5.480000000000001e-06,
"loss": 0.3025,
"step": 275
},
{
"epoch": 0.06,
"grad_norm": 2.955035924911499,
"learning_rate": 5.98e-06,
"loss": 0.3381,
"step": 300
},
{
"epoch": 0.065,
"grad_norm": 2.6961045265197754,
"learning_rate": 6.480000000000001e-06,
"loss": 0.3108,
"step": 325
},
{
"epoch": 0.07,
"grad_norm": 2.5583386421203613,
"learning_rate": 6.98e-06,
"loss": 0.3108,
"step": 350
},
{
"epoch": 0.075,
"grad_norm": 2.4596619606018066,
"learning_rate": 7.48e-06,
"loss": 0.3041,
"step": 375
},
{
"epoch": 0.08,
"grad_norm": 2.315897226333618,
"learning_rate": 7.980000000000002e-06,
"loss": 0.2813,
"step": 400
},
{
"epoch": 0.085,
"grad_norm": 2.350675582885742,
"learning_rate": 8.48e-06,
"loss": 0.2673,
"step": 425
},
{
"epoch": 0.09,
"grad_norm": 2.274202585220337,
"learning_rate": 8.98e-06,
"loss": 0.2589,
"step": 450
},
{
"epoch": 0.095,
"grad_norm": 2.398346424102783,
"learning_rate": 9.48e-06,
"loss": 0.2554,
"step": 475
},
{
"epoch": 0.1,
"grad_norm": 2.4121956825256348,
"learning_rate": 9.980000000000001e-06,
"loss": 0.2365,
"step": 500
},
{
"epoch": 0.105,
"grad_norm": 2.320568323135376,
"learning_rate": 9.946666666666667e-06,
"loss": 0.2527,
"step": 525
},
{
"epoch": 0.11,
"grad_norm": 2.2950375080108643,
"learning_rate": 9.891111111111113e-06,
"loss": 0.2731,
"step": 550
},
{
"epoch": 0.115,
"grad_norm": 2.712926149368286,
"learning_rate": 9.835555555555556e-06,
"loss": 0.3282,
"step": 575
},
{
"epoch": 0.12,
"grad_norm": 2.2590723037719727,
"learning_rate": 9.780000000000001e-06,
"loss": 0.3311,
"step": 600
},
{
"epoch": 0.125,
"grad_norm": 2.4605965614318848,
"learning_rate": 9.724444444444445e-06,
"loss": 0.2867,
"step": 625
},
{
"epoch": 0.13,
"grad_norm": 2.426866292953491,
"learning_rate": 9.66888888888889e-06,
"loss": 0.2805,
"step": 650
},
{
"epoch": 0.135,
"grad_norm": 2.631782293319702,
"learning_rate": 9.613333333333335e-06,
"loss": 0.2511,
"step": 675
},
{
"epoch": 0.14,
"grad_norm": 2.4071285724639893,
"learning_rate": 9.557777777777777e-06,
"loss": 0.2404,
"step": 700
},
{
"epoch": 0.145,
"grad_norm": 2.018366575241089,
"learning_rate": 9.502222222222223e-06,
"loss": 0.2236,
"step": 725
},
{
"epoch": 0.15,
"grad_norm": 2.0572237968444824,
"learning_rate": 9.446666666666667e-06,
"loss": 0.2244,
"step": 750
},
{
"epoch": 0.155,
"grad_norm": 2.3840184211730957,
"learning_rate": 9.391111111111111e-06,
"loss": 0.2346,
"step": 775
},
{
"epoch": 0.16,
"grad_norm": 2.5831480026245117,
"learning_rate": 9.335555555555557e-06,
"loss": 0.325,
"step": 800
},
{
"epoch": 0.165,
"grad_norm": 2.831907272338867,
"learning_rate": 9.280000000000001e-06,
"loss": 0.332,
"step": 825
},
{
"epoch": 0.17,
"grad_norm": 2.4622819423675537,
"learning_rate": 9.224444444444445e-06,
"loss": 0.3338,
"step": 850
},
{
"epoch": 0.175,
"grad_norm": 2.7406504154205322,
"learning_rate": 9.168888888888889e-06,
"loss": 0.2918,
"step": 875
},
{
"epoch": 0.18,
"grad_norm": 2.1826331615448,
"learning_rate": 9.113333333333335e-06,
"loss": 0.2737,
"step": 900
},
{
"epoch": 0.185,
"grad_norm": 2.2491261959075928,
"learning_rate": 9.057777777777779e-06,
"loss": 0.2247,
"step": 925
},
{
"epoch": 0.19,
"grad_norm": 1.9993724822998047,
"learning_rate": 9.002222222222223e-06,
"loss": 0.2251,
"step": 950
},
{
"epoch": 0.195,
"grad_norm": 2.3380165100097656,
"learning_rate": 8.946666666666669e-06,
"loss": 0.2129,
"step": 975
},
{
"epoch": 0.2,
"grad_norm": 2.147649049758911,
"learning_rate": 8.891111111111111e-06,
"loss": 0.2213,
"step": 1000
},
{
"epoch": 0.2,
"eval_loss": 0.24065828323364258,
"eval_runtime": 704.3288,
"eval_samples_per_second": 21.517,
"eval_steps_per_second": 2.691,
"eval_wer": 13.460510600671737,
"step": 1000
},
{
"epoch": 0.205,
"grad_norm": 2.1159000396728516,
"learning_rate": 8.835555555555557e-06,
"loss": 0.2184,
"step": 1025
},
{
"epoch": 0.21,
"grad_norm": 1.7620288133621216,
"learning_rate": 8.78e-06,
"loss": 0.2027,
"step": 1050
},
{
"epoch": 0.215,
"grad_norm": 1.9889628887176514,
"learning_rate": 8.724444444444445e-06,
"loss": 0.2087,
"step": 1075
},
{
"epoch": 0.22,
"grad_norm": 1.8565176725387573,
"learning_rate": 8.66888888888889e-06,
"loss": 0.213,
"step": 1100
},
{
"epoch": 0.225,
"grad_norm": 1.9121789932250977,
"learning_rate": 8.613333333333333e-06,
"loss": 0.1944,
"step": 1125
},
{
"epoch": 0.23,
"grad_norm": 2.0282061100006104,
"learning_rate": 8.557777777777778e-06,
"loss": 0.2029,
"step": 1150
},
{
"epoch": 0.235,
"grad_norm": 2.0103440284729004,
"learning_rate": 8.502222222222223e-06,
"loss": 0.193,
"step": 1175
},
{
"epoch": 0.24,
"grad_norm": 1.8842862844467163,
"learning_rate": 8.446666666666668e-06,
"loss": 0.1881,
"step": 1200
},
{
"epoch": 0.245,
"grad_norm": 2.2236127853393555,
"learning_rate": 8.391111111111112e-06,
"loss": 0.1776,
"step": 1225
},
{
"epoch": 0.25,
"grad_norm": 1.9313597679138184,
"learning_rate": 8.335555555555556e-06,
"loss": 0.1725,
"step": 1250
},
{
"epoch": 0.255,
"grad_norm": 1.8937866687774658,
"learning_rate": 8.28e-06,
"loss": 0.1967,
"step": 1275
},
{
"epoch": 0.26,
"grad_norm": 2.6142194271087646,
"learning_rate": 8.224444444444444e-06,
"loss": 0.2598,
"step": 1300
},
{
"epoch": 0.265,
"grad_norm": 2.1339993476867676,
"learning_rate": 8.16888888888889e-06,
"loss": 0.2464,
"step": 1325
},
{
"epoch": 0.27,
"grad_norm": 1.7635416984558105,
"learning_rate": 8.113333333333334e-06,
"loss": 0.2103,
"step": 1350
},
{
"epoch": 0.275,
"grad_norm": 1.9434435367584229,
"learning_rate": 8.057777777777778e-06,
"loss": 0.2077,
"step": 1375
},
{
"epoch": 0.28,
"grad_norm": 1.7413808107376099,
"learning_rate": 8.002222222222222e-06,
"loss": 0.2123,
"step": 1400
},
{
"epoch": 0.285,
"grad_norm": 1.676006555557251,
"learning_rate": 7.946666666666666e-06,
"loss": 0.1803,
"step": 1425
},
{
"epoch": 0.29,
"grad_norm": 1.9897364377975464,
"learning_rate": 7.891111111111112e-06,
"loss": 0.1647,
"step": 1450
},
{
"epoch": 0.295,
"grad_norm": 2.0311105251312256,
"learning_rate": 7.835555555555556e-06,
"loss": 0.1721,
"step": 1475
},
{
"epoch": 0.3,
"grad_norm": 2.149789333343506,
"learning_rate": 7.78e-06,
"loss": 0.1799,
"step": 1500
},
{
"epoch": 0.305,
"grad_norm": 2.448418617248535,
"learning_rate": 7.724444444444446e-06,
"loss": 0.1805,
"step": 1525
},
{
"epoch": 0.31,
"grad_norm": 2.129755735397339,
"learning_rate": 7.66888888888889e-06,
"loss": 0.2502,
"step": 1550
},
{
"epoch": 0.315,
"grad_norm": 1.7567689418792725,
"learning_rate": 7.613333333333334e-06,
"loss": 0.2249,
"step": 1575
},
{
"epoch": 0.32,
"grad_norm": 1.5997744798660278,
"learning_rate": 7.557777777777779e-06,
"loss": 0.1647,
"step": 1600
},
{
"epoch": 0.325,
"grad_norm": 1.6361225843429565,
"learning_rate": 7.502222222222223e-06,
"loss": 0.1646,
"step": 1625
},
{
"epoch": 0.33,
"grad_norm": 1.6844338178634644,
"learning_rate": 7.446666666666668e-06,
"loss": 0.1532,
"step": 1650
},
{
"epoch": 0.335,
"grad_norm": 1.4503517150878906,
"learning_rate": 7.3911111111111125e-06,
"loss": 0.1494,
"step": 1675
},
{
"epoch": 0.34,
"grad_norm": 1.5753148794174194,
"learning_rate": 7.335555555555556e-06,
"loss": 0.1488,
"step": 1700
},
{
"epoch": 0.345,
"grad_norm": 1.8972795009613037,
"learning_rate": 7.280000000000001e-06,
"loss": 0.1671,
"step": 1725
},
{
"epoch": 0.35,
"grad_norm": 2.3437507152557373,
"learning_rate": 7.224444444444445e-06,
"loss": 0.1862,
"step": 1750
},
{
"epoch": 0.355,
"grad_norm": 1.9804730415344238,
"learning_rate": 7.1688888888888895e-06,
"loss": 0.1904,
"step": 1775
},
{
"epoch": 0.36,
"grad_norm": 2.2449533939361572,
"learning_rate": 7.113333333333334e-06,
"loss": 0.1938,
"step": 1800
},
{
"epoch": 0.365,
"grad_norm": 2.0063376426696777,
"learning_rate": 7.057777777777778e-06,
"loss": 0.1933,
"step": 1825
},
{
"epoch": 0.37,
"grad_norm": 1.804547905921936,
"learning_rate": 7.0022222222222225e-06,
"loss": 0.1592,
"step": 1850
},
{
"epoch": 0.375,
"grad_norm": 1.7678463459014893,
"learning_rate": 6.946666666666667e-06,
"loss": 0.1508,
"step": 1875
},
{
"epoch": 0.38,
"grad_norm": 1.8199127912521362,
"learning_rate": 6.891111111111111e-06,
"loss": 0.1678,
"step": 1900
},
{
"epoch": 0.385,
"grad_norm": 2.3224880695343018,
"learning_rate": 6.835555555555556e-06,
"loss": 0.1972,
"step": 1925
},
{
"epoch": 0.39,
"grad_norm": 2.035727024078369,
"learning_rate": 6.780000000000001e-06,
"loss": 0.2074,
"step": 1950
},
{
"epoch": 0.395,
"grad_norm": 2.022366523742676,
"learning_rate": 6.724444444444444e-06,
"loss": 0.1959,
"step": 1975
},
{
"epoch": 0.4,
"grad_norm": 2.046525716781616,
"learning_rate": 6.668888888888889e-06,
"loss": 0.1582,
"step": 2000
},
{
"epoch": 0.4,
"eval_loss": 0.21434400975704193,
"eval_runtime": 707.9201,
"eval_samples_per_second": 21.408,
"eval_steps_per_second": 2.677,
"eval_wer": 12.264182068929111,
"step": 2000
},
{
"epoch": 0.405,
"grad_norm": 1.9399248361587524,
"learning_rate": 6.613333333333334e-06,
"loss": 0.1593,
"step": 2025
},
{
"epoch": 0.41,
"grad_norm": 1.952101469039917,
"learning_rate": 6.557777777777778e-06,
"loss": 0.1676,
"step": 2050
},
{
"epoch": 0.415,
"grad_norm": 2.004068613052368,
"learning_rate": 6.502222222222223e-06,
"loss": 0.2002,
"step": 2075
},
{
"epoch": 0.42,
"grad_norm": 2.298147439956665,
"learning_rate": 6.446666666666668e-06,
"loss": 0.1977,
"step": 2100
},
{
"epoch": 0.425,
"grad_norm": 2.5287961959838867,
"learning_rate": 6.391111111111111e-06,
"loss": 0.3416,
"step": 2125
},
{
"epoch": 0.43,
"grad_norm": 1.9228953123092651,
"learning_rate": 6.335555555555556e-06,
"loss": 0.3219,
"step": 2150
},
{
"epoch": 0.435,
"grad_norm": 1.8267163038253784,
"learning_rate": 6.280000000000001e-06,
"loss": 0.23,
"step": 2175
},
{
"epoch": 0.44,
"grad_norm": 1.5915461778640747,
"learning_rate": 6.224444444444445e-06,
"loss": 0.1586,
"step": 2200
},
{
"epoch": 0.445,
"grad_norm": 1.4649773836135864,
"learning_rate": 6.16888888888889e-06,
"loss": 0.1314,
"step": 2225
},
{
"epoch": 0.45,
"grad_norm": 1.800898790359497,
"learning_rate": 6.113333333333333e-06,
"loss": 0.1303,
"step": 2250
},
{
"epoch": 0.455,
"grad_norm": 1.6255425214767456,
"learning_rate": 6.057777777777778e-06,
"loss": 0.1482,
"step": 2275
},
{
"epoch": 0.46,
"grad_norm": 1.6611112356185913,
"learning_rate": 6.002222222222223e-06,
"loss": 0.1262,
"step": 2300
},
{
"epoch": 0.465,
"grad_norm": 1.6362543106079102,
"learning_rate": 5.946666666666668e-06,
"loss": 0.1413,
"step": 2325
},
{
"epoch": 0.47,
"grad_norm": 1.7811627388000488,
"learning_rate": 5.891111111111112e-06,
"loss": 0.1587,
"step": 2350
},
{
"epoch": 0.475,
"grad_norm": 1.9331281185150146,
"learning_rate": 5.8355555555555565e-06,
"loss": 0.1623,
"step": 2375
},
{
"epoch": 0.48,
"grad_norm": 1.6756068468093872,
"learning_rate": 5.78e-06,
"loss": 0.1495,
"step": 2400
},
{
"epoch": 0.485,
"grad_norm": 1.714150309562683,
"learning_rate": 5.724444444444445e-06,
"loss": 0.1542,
"step": 2425
},
{
"epoch": 0.49,
"grad_norm": 1.4862266778945923,
"learning_rate": 5.6688888888888895e-06,
"loss": 0.1569,
"step": 2450
},
{
"epoch": 0.495,
"grad_norm": 1.72484290599823,
"learning_rate": 5.613333333333334e-06,
"loss": 0.1449,
"step": 2475
},
{
"epoch": 0.5,
"grad_norm": 1.7387865781784058,
"learning_rate": 5.557777777777778e-06,
"loss": 0.137,
"step": 2500
},
{
"epoch": 0.505,
"grad_norm": 2.0343728065490723,
"learning_rate": 5.5022222222222224e-06,
"loss": 0.1448,
"step": 2525
},
{
"epoch": 0.51,
"grad_norm": 1.5081489086151123,
"learning_rate": 5.4466666666666665e-06,
"loss": 0.1304,
"step": 2550
},
{
"epoch": 0.515,
"grad_norm": 1.6394362449645996,
"learning_rate": 5.391111111111111e-06,
"loss": 0.1362,
"step": 2575
},
{
"epoch": 0.52,
"grad_norm": 1.968766689300537,
"learning_rate": 5.335555555555556e-06,
"loss": 0.157,
"step": 2600
},
{
"epoch": 0.525,
"grad_norm": 2.227083206176758,
"learning_rate": 5.28e-06,
"loss": 0.1589,
"step": 2625
},
{
"epoch": 0.53,
"grad_norm": 1.7511582374572754,
"learning_rate": 5.224444444444445e-06,
"loss": 0.1544,
"step": 2650
},
{
"epoch": 0.535,
"grad_norm": 1.838564157485962,
"learning_rate": 5.168888888888889e-06,
"loss": 0.1408,
"step": 2675
},
{
"epoch": 0.54,
"grad_norm": 1.5015802383422852,
"learning_rate": 5.113333333333333e-06,
"loss": 0.1452,
"step": 2700
},
{
"epoch": 0.545,
"grad_norm": 2.10288667678833,
"learning_rate": 5.057777777777778e-06,
"loss": 0.1664,
"step": 2725
},
{
"epoch": 0.55,
"grad_norm": 2.287795305252075,
"learning_rate": 5.002222222222223e-06,
"loss": 0.1751,
"step": 2750
},
{
"epoch": 0.555,
"grad_norm": 2.386364698410034,
"learning_rate": 4.946666666666667e-06,
"loss": 0.1635,
"step": 2775
},
{
"epoch": 0.56,
"grad_norm": 1.9961427450180054,
"learning_rate": 4.891111111111111e-06,
"loss": 0.1544,
"step": 2800
},
{
"epoch": 0.565,
"grad_norm": 1.952929973602295,
"learning_rate": 4.835555555555556e-06,
"loss": 0.1682,
"step": 2825
},
{
"epoch": 0.57,
"grad_norm": 1.7093037366867065,
"learning_rate": 4.78e-06,
"loss": 0.1738,
"step": 2850
},
{
"epoch": 0.575,
"grad_norm": 1.9177967309951782,
"learning_rate": 4.724444444444445e-06,
"loss": 0.1369,
"step": 2875
},
{
"epoch": 0.58,
"grad_norm": 1.7161290645599365,
"learning_rate": 4.66888888888889e-06,
"loss": 0.1386,
"step": 2900
},
{
"epoch": 0.585,
"grad_norm": 2.1072142124176025,
"learning_rate": 4.613333333333334e-06,
"loss": 0.222,
"step": 2925
},
{
"epoch": 0.59,
"grad_norm": 1.521346092224121,
"learning_rate": 4.557777777777778e-06,
"loss": 0.1801,
"step": 2950
},
{
"epoch": 0.595,
"grad_norm": 1.8204540014266968,
"learning_rate": 4.502222222222223e-06,
"loss": 0.155,
"step": 2975
},
{
"epoch": 0.6,
"grad_norm": 1.8535057306289673,
"learning_rate": 4.446666666666667e-06,
"loss": 0.1913,
"step": 3000
},
{
"epoch": 0.6,
"eval_loss": 0.20219513773918152,
"eval_runtime": 700.7499,
"eval_samples_per_second": 21.627,
"eval_steps_per_second": 2.704,
"eval_wer": 11.232818861475318,
"step": 3000
},
{
"epoch": 0.605,
"grad_norm": 1.7448265552520752,
"learning_rate": 4.391111111111112e-06,
"loss": 0.1911,
"step": 3025
},
{
"epoch": 0.61,
"grad_norm": 1.9879142045974731,
"learning_rate": 4.3355555555555565e-06,
"loss": 0.1887,
"step": 3050
},
{
"epoch": 0.615,
"grad_norm": 1.6797877550125122,
"learning_rate": 4.2800000000000005e-06,
"loss": 0.1838,
"step": 3075
},
{
"epoch": 0.62,
"grad_norm": 2.2724123001098633,
"learning_rate": 4.2244444444444446e-06,
"loss": 0.1914,
"step": 3100
},
{
"epoch": 0.625,
"grad_norm": 2.0982179641723633,
"learning_rate": 4.168888888888889e-06,
"loss": 0.1798,
"step": 3125
},
{
"epoch": 0.63,
"grad_norm": 2.1598305702209473,
"learning_rate": 4.1133333333333335e-06,
"loss": 0.1717,
"step": 3150
},
{
"epoch": 0.635,
"grad_norm": 2.225451707839966,
"learning_rate": 4.057777777777778e-06,
"loss": 0.1788,
"step": 3175
},
{
"epoch": 0.64,
"grad_norm": 1.8868625164031982,
"learning_rate": 4.002222222222222e-06,
"loss": 0.1768,
"step": 3200
},
{
"epoch": 0.645,
"grad_norm": 1.864670991897583,
"learning_rate": 3.946666666666667e-06,
"loss": 0.1661,
"step": 3225
},
{
"epoch": 0.65,
"grad_norm": 2.103564500808716,
"learning_rate": 3.891111111111111e-06,
"loss": 0.1931,
"step": 3250
},
{
"epoch": 0.655,
"grad_norm": 1.8054132461547852,
"learning_rate": 3.835555555555555e-06,
"loss": 0.1883,
"step": 3275
},
{
"epoch": 0.66,
"grad_norm": 1.7523831129074097,
"learning_rate": 3.7800000000000002e-06,
"loss": 0.1575,
"step": 3300
},
{
"epoch": 0.665,
"grad_norm": 1.9287195205688477,
"learning_rate": 3.724444444444445e-06,
"loss": 0.1597,
"step": 3325
},
{
"epoch": 0.67,
"grad_norm": 1.4184482097625732,
"learning_rate": 3.668888888888889e-06,
"loss": 0.1468,
"step": 3350
},
{
"epoch": 0.675,
"grad_norm": 1.6231290102005005,
"learning_rate": 3.6133333333333336e-06,
"loss": 0.139,
"step": 3375
},
{
"epoch": 0.68,
"grad_norm": 1.2594499588012695,
"learning_rate": 3.5577777777777785e-06,
"loss": 0.1551,
"step": 3400
},
{
"epoch": 0.685,
"grad_norm": 1.8359473943710327,
"learning_rate": 3.5022222222222225e-06,
"loss": 0.1454,
"step": 3425
},
{
"epoch": 0.69,
"grad_norm": 1.5489341020584106,
"learning_rate": 3.446666666666667e-06,
"loss": 0.1627,
"step": 3450
},
{
"epoch": 0.695,
"grad_norm": 1.7028127908706665,
"learning_rate": 3.391111111111111e-06,
"loss": 0.1429,
"step": 3475
},
{
"epoch": 0.7,
"grad_norm": 1.9561749696731567,
"learning_rate": 3.335555555555556e-06,
"loss": 0.1368,
"step": 3500
},
{
"epoch": 0.705,
"grad_norm": 2.1024222373962402,
"learning_rate": 3.2800000000000004e-06,
"loss": 0.1661,
"step": 3525
},
{
"epoch": 0.71,
"grad_norm": 2.004310131072998,
"learning_rate": 3.2244444444444444e-06,
"loss": 0.1432,
"step": 3550
},
{
"epoch": 0.715,
"grad_norm": 1.3818813562393188,
"learning_rate": 3.1688888888888893e-06,
"loss": 0.1251,
"step": 3575
},
{
"epoch": 0.72,
"grad_norm": 1.6478841304779053,
"learning_rate": 3.1133333333333337e-06,
"loss": 0.1261,
"step": 3600
},
{
"epoch": 0.725,
"grad_norm": 1.5140513181686401,
"learning_rate": 3.0577777777777778e-06,
"loss": 0.1178,
"step": 3625
},
{
"epoch": 0.73,
"grad_norm": 1.7134217023849487,
"learning_rate": 3.0022222222222227e-06,
"loss": 0.1448,
"step": 3650
},
{
"epoch": 0.735,
"grad_norm": 2.0633442401885986,
"learning_rate": 2.946666666666667e-06,
"loss": 0.1637,
"step": 3675
},
{
"epoch": 0.74,
"grad_norm": 1.6094017028808594,
"learning_rate": 2.891111111111111e-06,
"loss": 0.1342,
"step": 3700
},
{
"epoch": 0.745,
"grad_norm": 1.776197910308838,
"learning_rate": 2.835555555555556e-06,
"loss": 0.1567,
"step": 3725
},
{
"epoch": 0.75,
"grad_norm": 2.1370580196380615,
"learning_rate": 2.7800000000000005e-06,
"loss": 0.1423,
"step": 3750
},
{
"epoch": 0.755,
"grad_norm": 2.156872510910034,
"learning_rate": 2.7244444444444445e-06,
"loss": 0.1311,
"step": 3775
},
{
"epoch": 0.76,
"grad_norm": 1.609500765800476,
"learning_rate": 2.6688888888888894e-06,
"loss": 0.1392,
"step": 3800
},
{
"epoch": 0.765,
"grad_norm": 1.677157998085022,
"learning_rate": 2.6133333333333334e-06,
"loss": 0.1383,
"step": 3825
},
{
"epoch": 0.77,
"grad_norm": 1.6813416481018066,
"learning_rate": 2.557777777777778e-06,
"loss": 0.1359,
"step": 3850
},
{
"epoch": 0.775,
"grad_norm": 2.159526824951172,
"learning_rate": 2.5022222222222224e-06,
"loss": 0.1295,
"step": 3875
},
{
"epoch": 0.78,
"grad_norm": 1.3956880569458008,
"learning_rate": 2.446666666666667e-06,
"loss": 0.127,
"step": 3900
},
{
"epoch": 0.785,
"grad_norm": 1.8989673852920532,
"learning_rate": 2.3911111111111113e-06,
"loss": 0.1273,
"step": 3925
},
{
"epoch": 0.79,
"grad_norm": 1.9193179607391357,
"learning_rate": 2.3355555555555557e-06,
"loss": 0.1647,
"step": 3950
},
{
"epoch": 0.795,
"grad_norm": 1.7393921613693237,
"learning_rate": 2.28e-06,
"loss": 0.1726,
"step": 3975
},
{
"epoch": 0.8,
"grad_norm": 1.9714525938034058,
"learning_rate": 2.2244444444444447e-06,
"loss": 0.1538,
"step": 4000
},
{
"epoch": 0.8,
"eval_loss": 0.19509215652942657,
"eval_runtime": 768.8189,
"eval_samples_per_second": 19.712,
"eval_steps_per_second": 2.465,
"eval_wer": 11.118662857067445,
"step": 4000
},
{
"epoch": 0.805,
"grad_norm": 1.8712801933288574,
"learning_rate": 2.168888888888889e-06,
"loss": 0.1365,
"step": 4025
},
{
"epoch": 0.81,
"grad_norm": 1.3689473867416382,
"learning_rate": 2.1133333333333336e-06,
"loss": 0.121,
"step": 4050
},
{
"epoch": 0.815,
"grad_norm": 2.0017051696777344,
"learning_rate": 2.057777777777778e-06,
"loss": 0.1405,
"step": 4075
},
{
"epoch": 0.82,
"grad_norm": 2.032273292541504,
"learning_rate": 2.0022222222222225e-06,
"loss": 0.1652,
"step": 4100
},
{
"epoch": 0.825,
"grad_norm": 2.2192623615264893,
"learning_rate": 1.9466666666666665e-06,
"loss": 0.1721,
"step": 4125
},
{
"epoch": 0.83,
"grad_norm": 2.077394723892212,
"learning_rate": 1.8911111111111114e-06,
"loss": 0.1687,
"step": 4150
},
{
"epoch": 0.835,
"grad_norm": 1.6695953607559204,
"learning_rate": 1.8355555555555557e-06,
"loss": 0.1539,
"step": 4175
},
{
"epoch": 0.84,
"grad_norm": 1.6516363620758057,
"learning_rate": 1.7800000000000001e-06,
"loss": 0.1506,
"step": 4200
},
{
"epoch": 0.845,
"grad_norm": 1.743139386177063,
"learning_rate": 1.7244444444444448e-06,
"loss": 0.1628,
"step": 4225
},
{
"epoch": 0.85,
"grad_norm": 1.993188500404358,
"learning_rate": 1.668888888888889e-06,
"loss": 0.1706,
"step": 4250
},
{
"epoch": 0.855,
"grad_norm": 1.659005045890808,
"learning_rate": 1.6133333333333335e-06,
"loss": 0.1684,
"step": 4275
},
{
"epoch": 0.86,
"grad_norm": 2.1015443801879883,
"learning_rate": 1.5577777777777777e-06,
"loss": 0.165,
"step": 4300
},
{
"epoch": 0.865,
"grad_norm": 1.6033889055252075,
"learning_rate": 1.5022222222222224e-06,
"loss": 0.1458,
"step": 4325
},
{
"epoch": 0.87,
"grad_norm": 1.5493004322052002,
"learning_rate": 1.4466666666666669e-06,
"loss": 0.1456,
"step": 4350
},
{
"epoch": 0.875,
"grad_norm": 1.7360889911651611,
"learning_rate": 1.3911111111111111e-06,
"loss": 0.1351,
"step": 4375
},
{
"epoch": 0.88,
"grad_norm": 1.69246244430542,
"learning_rate": 1.3355555555555558e-06,
"loss": 0.1433,
"step": 4400
},
{
"epoch": 0.885,
"grad_norm": 2.408151149749756,
"learning_rate": 1.28e-06,
"loss": 0.1997,
"step": 4425
},
{
"epoch": 0.89,
"grad_norm": 2.8492488861083984,
"learning_rate": 1.2244444444444445e-06,
"loss": 0.387,
"step": 4450
},
{
"epoch": 0.895,
"grad_norm": 2.1923134326934814,
"learning_rate": 1.168888888888889e-06,
"loss": 0.3364,
"step": 4475
},
{
"epoch": 0.9,
"grad_norm": 2.078341484069824,
"learning_rate": 1.1133333333333334e-06,
"loss": 0.2295,
"step": 4500
},
{
"epoch": 0.905,
"grad_norm": 1.9317213296890259,
"learning_rate": 1.0577777777777779e-06,
"loss": 0.1771,
"step": 4525
},
{
"epoch": 0.91,
"grad_norm": 1.9359657764434814,
"learning_rate": 1.0022222222222223e-06,
"loss": 0.1849,
"step": 4550
},
{
"epoch": 0.915,
"grad_norm": 1.6038821935653687,
"learning_rate": 9.466666666666667e-07,
"loss": 0.1669,
"step": 4575
},
{
"epoch": 0.92,
"grad_norm": 1.6796211004257202,
"learning_rate": 8.911111111111112e-07,
"loss": 0.1604,
"step": 4600
},
{
"epoch": 0.925,
"grad_norm": 2.241725206375122,
"learning_rate": 8.355555555555556e-07,
"loss": 0.1614,
"step": 4625
},
{
"epoch": 0.93,
"grad_norm": 1.6054015159606934,
"learning_rate": 7.8e-07,
"loss": 0.1528,
"step": 4650
},
{
"epoch": 0.935,
"grad_norm": 1.6806738376617432,
"learning_rate": 7.244444444444446e-07,
"loss": 0.1255,
"step": 4675
},
{
"epoch": 0.94,
"grad_norm": 1.7540109157562256,
"learning_rate": 6.68888888888889e-07,
"loss": 0.1163,
"step": 4700
},
{
"epoch": 0.945,
"grad_norm": 1.2936944961547852,
"learning_rate": 6.133333333333333e-07,
"loss": 0.1199,
"step": 4725
},
{
"epoch": 0.95,
"grad_norm": 1.535158634185791,
"learning_rate": 5.577777777777779e-07,
"loss": 0.1166,
"step": 4750
},
{
"epoch": 0.955,
"grad_norm": 1.7007349729537964,
"learning_rate": 5.022222222222222e-07,
"loss": 0.1122,
"step": 4775
},
{
"epoch": 0.96,
"grad_norm": 1.4036020040512085,
"learning_rate": 4.466666666666667e-07,
"loss": 0.1155,
"step": 4800
},
{
"epoch": 0.965,
"grad_norm": 1.7395439147949219,
"learning_rate": 3.9111111111111115e-07,
"loss": 0.1244,
"step": 4825
},
{
"epoch": 0.97,
"grad_norm": 1.4144448041915894,
"learning_rate": 3.3555555555555556e-07,
"loss": 0.122,
"step": 4850
},
{
"epoch": 0.975,
"grad_norm": 1.5096015930175781,
"learning_rate": 2.8e-07,
"loss": 0.1314,
"step": 4875
},
{
"epoch": 0.98,
"grad_norm": 1.5810081958770752,
"learning_rate": 2.2444444444444445e-07,
"loss": 0.1249,
"step": 4900
},
{
"epoch": 0.985,
"grad_norm": 1.706178903579712,
"learning_rate": 1.6888888888888888e-07,
"loss": 0.1295,
"step": 4925
},
{
"epoch": 0.99,
"grad_norm": 1.7708213329315186,
"learning_rate": 1.1333333333333336e-07,
"loss": 0.1282,
"step": 4950
},
{
"epoch": 0.995,
"grad_norm": 1.7594830989837646,
"learning_rate": 5.777777777777778e-08,
"loss": 0.1366,
"step": 4975
},
{
"epoch": 1.0,
"grad_norm": 2.096646308898926,
"learning_rate": 2.2222222222222225e-09,
"loss": 0.1286,
"step": 5000
},
{
"epoch": 1.0,
"eval_loss": 0.19087658822536469,
"eval_runtime": 819.4158,
"eval_samples_per_second": 18.495,
"eval_steps_per_second": 2.313,
"eval_wer": 10.587474512857398,
"step": 5000
},
{
"epoch": 1.0,
"step": 5000,
"total_flos": 9.23473281024e+19,
"train_loss": 0.1977850432395935,
"train_runtime": 19215.1319,
"train_samples_per_second": 16.654,
"train_steps_per_second": 0.26
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.23473281024e+19,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}