punjabi-wav2vec-bert-model / trainer_state.json
cdactvm's picture
Upload 8 files
6ae1a3b verified
{
"best_metric": 0.35703112038112395,
"best_model_checkpoint": "results/punjabi_model/20240829_200444/checkpoint-3000",
"epoch": 9.999656616990592,
"eval_steps": 3000,
"global_step": 109200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.045784401254492596,
"grad_norm": 2.4544355869293213,
"learning_rate": 9.980000000000001e-06,
"loss": 3.9787,
"step": 500
},
{
"epoch": 0.09156880250898519,
"grad_norm": 5.300338268280029,
"learning_rate": 1.9980000000000002e-05,
"loss": 2.7973,
"step": 1000
},
{
"epoch": 0.1373532037634778,
"grad_norm": 5.187418460845947,
"learning_rate": 2.9980000000000004e-05,
"loss": 2.1081,
"step": 1500
},
{
"epoch": 0.18313760501797038,
"grad_norm": 5.451952934265137,
"learning_rate": 3.998000000000001e-05,
"loss": 1.7718,
"step": 2000
},
{
"epoch": 0.22892200627246298,
"grad_norm": 10.363802909851074,
"learning_rate": 3.962053231939164e-05,
"loss": 1.6058,
"step": 2500
},
{
"epoch": 0.2747064075269556,
"grad_norm": 3.4008052349090576,
"learning_rate": 3.924030418250951e-05,
"loss": 1.5005,
"step": 3000
},
{
"epoch": 0.2747064075269556,
"eval_loss": 0.580813467502594,
"eval_runtime": 645.4536,
"eval_samples_per_second": 10.151,
"eval_steps_per_second": 10.151,
"eval_wer": 0.35703112038112395,
"step": 3000
},
{
"epoch": 0.32049080878144814,
"grad_norm": 2.9593212604522705,
"learning_rate": 3.886007604562738e-05,
"loss": 1.4034,
"step": 3500
},
{
"epoch": 0.36627521003594077,
"grad_norm": 4.573808193206787,
"learning_rate": 3.847984790874525e-05,
"loss": 1.3577,
"step": 4000
},
{
"epoch": 0.41205961129043334,
"grad_norm": 2.684593915939331,
"learning_rate": 3.809961977186312e-05,
"loss": 1.3114,
"step": 4500
},
{
"epoch": 0.45784401254492596,
"grad_norm": 2.6589484214782715,
"learning_rate": 3.771939163498099e-05,
"loss": 1.3163,
"step": 5000
},
{
"epoch": 0.5036284137994186,
"grad_norm": 3.144775390625,
"learning_rate": 3.733992395437262e-05,
"loss": 1.2375,
"step": 5500
},
{
"epoch": 0.5494128150539112,
"grad_norm": 3.1532704830169678,
"learning_rate": 3.6959695817490496e-05,
"loss": 1.2442,
"step": 6000
},
{
"epoch": 0.5494128150539112,
"eval_loss": 0.43034785985946655,
"eval_runtime": 665.7368,
"eval_samples_per_second": 9.842,
"eval_steps_per_second": 9.842,
"eval_wer": 0.2554395487189211,
"step": 6000
},
{
"epoch": 0.5951972163084037,
"grad_norm": 9.455696105957031,
"learning_rate": 3.657946768060837e-05,
"loss": 1.225,
"step": 6500
},
{
"epoch": 0.6409816175628963,
"grad_norm": 3.3579087257385254,
"learning_rate": 3.619923954372624e-05,
"loss": 1.1457,
"step": 7000
},
{
"epoch": 0.686766018817389,
"grad_norm": 4.421695709228516,
"learning_rate": 3.581901140684411e-05,
"loss": 1.1656,
"step": 7500
},
{
"epoch": 0.7325504200718815,
"grad_norm": 3.5701684951782227,
"learning_rate": 3.5439543726235745e-05,
"loss": 1.175,
"step": 8000
},
{
"epoch": 0.7783348213263741,
"grad_norm": 4.08390474319458,
"learning_rate": 3.505931558935361e-05,
"loss": 1.131,
"step": 8500
},
{
"epoch": 0.8241192225808667,
"grad_norm": 3.4614651203155518,
"learning_rate": 3.4679847908745255e-05,
"loss": 1.1133,
"step": 9000
},
{
"epoch": 0.8241192225808667,
"eval_loss": 0.3901245594024658,
"eval_runtime": 664.9166,
"eval_samples_per_second": 9.854,
"eval_steps_per_second": 9.854,
"eval_wer": 0.2110935981607452,
"step": 9000
},
{
"epoch": 0.8699036238353592,
"grad_norm": 2.597609519958496,
"learning_rate": 3.429961977186312e-05,
"loss": 1.0977,
"step": 9500
},
{
"epoch": 0.9156880250898519,
"grad_norm": 3.4706475734710693,
"learning_rate": 3.3919391634980995e-05,
"loss": 1.0986,
"step": 10000
},
{
"epoch": 0.9614724263443445,
"grad_norm": 2.1491544246673584,
"learning_rate": 3.353916349809886e-05,
"loss": 1.1154,
"step": 10500
},
{
"epoch": 1.0072568275988372,
"grad_norm": 1.495735764503479,
"learning_rate": 3.3158935361216734e-05,
"loss": 1.1157,
"step": 11000
},
{
"epoch": 1.0530412288533297,
"grad_norm": 2.120736837387085,
"learning_rate": 3.277870722433461e-05,
"loss": 1.0495,
"step": 11500
},
{
"epoch": 1.0988256301078223,
"grad_norm": 2.2971479892730713,
"learning_rate": 3.239847908745247e-05,
"loss": 1.0785,
"step": 12000
},
{
"epoch": 1.0988256301078223,
"eval_loss": 0.34569498896598816,
"eval_runtime": 655.7186,
"eval_samples_per_second": 9.992,
"eval_steps_per_second": 9.992,
"eval_wer": 0.19471569766063854,
"step": 12000
},
{
"epoch": 1.1446100313623149,
"grad_norm": 1.080255150794983,
"learning_rate": 3.2018250950570346e-05,
"loss": 1.0311,
"step": 12500
},
{
"epoch": 1.1903944326168074,
"grad_norm": 2.3899152278900146,
"learning_rate": 3.163802281368821e-05,
"loss": 1.107,
"step": 13000
},
{
"epoch": 1.2361788338713,
"grad_norm": 2.9057199954986572,
"learning_rate": 3.125855513307985e-05,
"loss": 1.0711,
"step": 13500
},
{
"epoch": 1.2819632351257926,
"grad_norm": 1.879520058631897,
"learning_rate": 3.087832699619772e-05,
"loss": 1.0348,
"step": 14000
},
{
"epoch": 1.3277476363802854,
"grad_norm": 2.777270555496216,
"learning_rate": 3.0498098859315592e-05,
"loss": 1.0308,
"step": 14500
},
{
"epoch": 1.373532037634778,
"grad_norm": 3.571882724761963,
"learning_rate": 3.0117870722433462e-05,
"loss": 1.0245,
"step": 15000
},
{
"epoch": 1.373532037634778,
"eval_loss": 0.3307068943977356,
"eval_runtime": 655.2961,
"eval_samples_per_second": 9.999,
"eval_steps_per_second": 9.999,
"eval_wer": 0.1764061055675381,
"step": 15000
},
{
"epoch": 1.4193164388892705,
"grad_norm": 2.154317855834961,
"learning_rate": 2.973764258555133e-05,
"loss": 1.0197,
"step": 15500
},
{
"epoch": 1.465100840143763,
"grad_norm": 2.7176313400268555,
"learning_rate": 2.9357414448669205e-05,
"loss": 1.0042,
"step": 16000
},
{
"epoch": 1.5108852413982556,
"grad_norm": 3.9277138710021973,
"learning_rate": 2.8977946768060842e-05,
"loss": 0.9801,
"step": 16500
},
{
"epoch": 1.5566696426527482,
"grad_norm": 6.810076713562012,
"learning_rate": 2.859771863117871e-05,
"loss": 0.9256,
"step": 17000
},
{
"epoch": 1.6024540439072408,
"grad_norm": 1.7569458484649658,
"learning_rate": 2.821749049429658e-05,
"loss": 0.981,
"step": 17500
},
{
"epoch": 1.6482384451617333,
"grad_norm": 3.3207457065582275,
"learning_rate": 2.783726235741445e-05,
"loss": 0.9452,
"step": 18000
},
{
"epoch": 1.6482384451617333,
"eval_loss": 0.3014201521873474,
"eval_runtime": 651.2443,
"eval_samples_per_second": 10.061,
"eval_steps_per_second": 10.061,
"eval_wer": 0.1733545068853547,
"step": 18000
},
{
"epoch": 1.694022846416226,
"grad_norm": 1.4468517303466797,
"learning_rate": 2.7457034220532324e-05,
"loss": 0.9969,
"step": 18500
},
{
"epoch": 1.7398072476707185,
"grad_norm": 1.2308686971664429,
"learning_rate": 2.7077566539923958e-05,
"loss": 0.9551,
"step": 19000
},
{
"epoch": 1.785591648925211,
"grad_norm": 3.698532819747925,
"learning_rate": 2.6697338403041827e-05,
"loss": 0.9585,
"step": 19500
},
{
"epoch": 1.8313760501797036,
"grad_norm": 1.4992170333862305,
"learning_rate": 2.6317110266159697e-05,
"loss": 0.951,
"step": 20000
},
{
"epoch": 1.8771604514341964,
"grad_norm": 3.0142011642456055,
"learning_rate": 2.5936882129277566e-05,
"loss": 0.9677,
"step": 20500
},
{
"epoch": 1.922944852688689,
"grad_norm": 1.9342625141143799,
"learning_rate": 2.5556653992395443e-05,
"loss": 0.9656,
"step": 21000
},
{
"epoch": 1.922944852688689,
"eval_loss": 0.3123963475227356,
"eval_runtime": 653.0667,
"eval_samples_per_second": 10.033,
"eval_steps_per_second": 10.033,
"eval_wer": 0.15938233272498875,
"step": 21000
},
{
"epoch": 1.9687292539431815,
"grad_norm": 5.628562927246094,
"learning_rate": 2.5177186311787077e-05,
"loss": 0.9511,
"step": 21500
},
{
"epoch": 2.0145136551976743,
"grad_norm": 2.930298089981079,
"learning_rate": 2.4796958174904946e-05,
"loss": 0.918,
"step": 22000
},
{
"epoch": 2.060298056452167,
"grad_norm": 1.2405227422714233,
"learning_rate": 2.4416730038022816e-05,
"loss": 0.9372,
"step": 22500
},
{
"epoch": 2.1060824577066595,
"grad_norm": 2.1558220386505127,
"learning_rate": 2.4036501901140686e-05,
"loss": 0.9673,
"step": 23000
},
{
"epoch": 2.151866858961152,
"grad_norm": 1.4096624851226807,
"learning_rate": 2.365703422053232e-05,
"loss": 0.9826,
"step": 23500
},
{
"epoch": 2.1976512602156446,
"grad_norm": 0.9921212196350098,
"learning_rate": 2.3276806083650192e-05,
"loss": 0.9261,
"step": 24000
},
{
"epoch": 2.1976512602156446,
"eval_loss": 0.30950331687927246,
"eval_runtime": 651.0611,
"eval_samples_per_second": 10.064,
"eval_steps_per_second": 10.064,
"eval_wer": 0.15439904244981156,
"step": 24000
},
{
"epoch": 2.243435661470137,
"grad_norm": 3.321803331375122,
"learning_rate": 2.2896577946768065e-05,
"loss": 0.8988,
"step": 24500
},
{
"epoch": 2.2892200627246297,
"grad_norm": 1.4012964963912964,
"learning_rate": 2.2516349809885935e-05,
"loss": 0.8717,
"step": 25000
},
{
"epoch": 2.3350044639791223,
"grad_norm": 1.9718306064605713,
"learning_rate": 2.2136121673003805e-05,
"loss": 0.9545,
"step": 25500
},
{
"epoch": 2.380788865233615,
"grad_norm": 2.5009524822235107,
"learning_rate": 2.1755893536121674e-05,
"loss": 0.9311,
"step": 26000
},
{
"epoch": 2.4265732664881074,
"grad_norm": 1.2410893440246582,
"learning_rate": 2.137642585551331e-05,
"loss": 0.8701,
"step": 26500
},
{
"epoch": 2.4723576677426,
"grad_norm": 1.5738617181777954,
"learning_rate": 2.099619771863118e-05,
"loss": 0.8938,
"step": 27000
},
{
"epoch": 2.4723576677426,
"eval_loss": 0.26886311173439026,
"eval_runtime": 654.1765,
"eval_samples_per_second": 10.016,
"eval_steps_per_second": 10.016,
"eval_wer": 0.15074304946552583,
"step": 27000
},
{
"epoch": 2.5181420689970926,
"grad_norm": 2.2281384468078613,
"learning_rate": 2.061596958174905e-05,
"loss": 0.8609,
"step": 27500
},
{
"epoch": 2.563926470251585,
"grad_norm": 2.347487449645996,
"learning_rate": 2.023574144486692e-05,
"loss": 0.9237,
"step": 28000
},
{
"epoch": 2.6097108715060777,
"grad_norm": 1.710715413093567,
"learning_rate": 1.9855513307984794e-05,
"loss": 0.8778,
"step": 28500
},
{
"epoch": 2.6554952727605707,
"grad_norm": 9.36032772064209,
"learning_rate": 1.947680608365019e-05,
"loss": 0.9092,
"step": 29000
},
{
"epoch": 2.701279674015063,
"grad_norm": 1.8656507730484009,
"learning_rate": 1.909657794676806e-05,
"loss": 0.8829,
"step": 29500
},
{
"epoch": 2.747064075269556,
"grad_norm": 0.8339200019836426,
"learning_rate": 1.8716349809885934e-05,
"loss": 0.8979,
"step": 30000
},
{
"epoch": 2.747064075269556,
"eval_loss": 0.2853592336177826,
"eval_runtime": 652.9254,
"eval_samples_per_second": 10.035,
"eval_steps_per_second": 10.035,
"eval_wer": 0.14718778886492379,
"step": 30000
},
{
"epoch": 2.792848476524048,
"grad_norm": 1.8189595937728882,
"learning_rate": 1.8336121673003804e-05,
"loss": 0.9216,
"step": 30500
},
{
"epoch": 2.838632877778541,
"grad_norm": 1.2487553358078003,
"learning_rate": 1.7955893536121673e-05,
"loss": 0.8714,
"step": 31000
},
{
"epoch": 2.8844172790330336,
"grad_norm": 2.8581480979919434,
"learning_rate": 1.757642585551331e-05,
"loss": 0.8675,
"step": 31500
},
{
"epoch": 2.930201680287526,
"grad_norm": 1.5087124109268188,
"learning_rate": 1.719619771863118e-05,
"loss": 0.9003,
"step": 32000
},
{
"epoch": 2.9759860815420187,
"grad_norm": 2.1860768795013428,
"learning_rate": 1.6816730038022814e-05,
"loss": 0.906,
"step": 32500
},
{
"epoch": 3.0217704827965113,
"grad_norm": 2.0595364570617676,
"learning_rate": 1.6436501901140687e-05,
"loss": 0.8624,
"step": 33000
},
{
"epoch": 3.0217704827965113,
"eval_loss": 0.282227486371994,
"eval_runtime": 652.2893,
"eval_samples_per_second": 10.045,
"eval_steps_per_second": 10.045,
"eval_wer": 0.14017800004740347,
"step": 33000
},
{
"epoch": 3.067554884051004,
"grad_norm": 2.31689453125,
"learning_rate": 1.6056273764258557e-05,
"loss": 0.8498,
"step": 33500
},
{
"epoch": 3.1133392853054964,
"grad_norm": 1.297116756439209,
"learning_rate": 1.5676045627376426e-05,
"loss": 0.8301,
"step": 34000
},
{
"epoch": 3.159123686559989,
"grad_norm": 1.9447718858718872,
"learning_rate": 1.52958174904943e-05,
"loss": 0.8445,
"step": 34500
},
{
"epoch": 3.2049080878144816,
"grad_norm": 1.8142869472503662,
"learning_rate": 1.4915589353612167e-05,
"loss": 0.8988,
"step": 35000
},
{
"epoch": 3.250692489068974,
"grad_norm": 26.81559944152832,
"learning_rate": 1.4535361216730039e-05,
"loss": 0.8358,
"step": 35500
},
{
"epoch": 3.2964768903234667,
"grad_norm": 1.4768437147140503,
"learning_rate": 1.4155133079847908e-05,
"loss": 0.8269,
"step": 36000
},
{
"epoch": 3.2964768903234667,
"eval_loss": 0.26364651322364807,
"eval_runtime": 653.8923,
"eval_samples_per_second": 10.02,
"eval_steps_per_second": 10.02,
"eval_wer": 0.13765376502097604,
"step": 36000
},
{
"epoch": 3.3422612915779593,
"grad_norm": 3.271960496902466,
"learning_rate": 1.3774904942965781e-05,
"loss": 0.8389,
"step": 36500
},
{
"epoch": 3.388045692832452,
"grad_norm": 1.8337602615356445,
"learning_rate": 1.3395437262357415e-05,
"loss": 0.8295,
"step": 37000
},
{
"epoch": 3.4338300940869444,
"grad_norm": 2.3357372283935547,
"learning_rate": 1.301596958174905e-05,
"loss": 0.8646,
"step": 37500
},
{
"epoch": 3.479614495341437,
"grad_norm": 3.577268600463867,
"learning_rate": 1.2636501901140685e-05,
"loss": 0.844,
"step": 38000
},
{
"epoch": 3.52539889659593,
"grad_norm": 3.0567266941070557,
"learning_rate": 1.2256273764258558e-05,
"loss": 0.8212,
"step": 38500
},
{
"epoch": 3.571183297850422,
"grad_norm": 5.759117126464844,
"learning_rate": 1.1876045627376427e-05,
"loss": 0.85,
"step": 39000
},
{
"epoch": 3.571183297850422,
"eval_loss": 0.24618284404277802,
"eval_runtime": 648.8718,
"eval_samples_per_second": 10.098,
"eval_steps_per_second": 10.098,
"eval_wer": 0.13248678628143443,
"step": 39000
},
{
"epoch": 3.616967699104915,
"grad_norm": 1.7661995887756348,
"learning_rate": 1.1495817490494297e-05,
"loss": 0.8213,
"step": 39500
},
{
"epoch": 3.6627521003594077,
"grad_norm": 2.657759428024292,
"learning_rate": 1.1115589353612168e-05,
"loss": 0.8379,
"step": 40000
},
{
"epoch": 3.7085365016139002,
"grad_norm": 2.6153953075408936,
"learning_rate": 1.0735361216730038e-05,
"loss": 0.807,
"step": 40500
},
{
"epoch": 3.754320902868393,
"grad_norm": 4.843667507171631,
"learning_rate": 1.035513307984791e-05,
"loss": 0.8265,
"step": 41000
},
{
"epoch": 3.8001053041228854,
"grad_norm": 3.4964911937713623,
"learning_rate": 9.97490494296578e-06,
"loss": 0.85,
"step": 41500
},
{
"epoch": 3.845889705377378,
"grad_norm": 2.8312175273895264,
"learning_rate": 9.594676806083652e-06,
"loss": 0.8557,
"step": 42000
},
{
"epoch": 3.845889705377378,
"eval_loss": 0.24823951721191406,
"eval_runtime": 641.5128,
"eval_samples_per_second": 10.213,
"eval_steps_per_second": 10.213,
"eval_wer": 0.12771088620795903,
"step": 42000
},
{
"epoch": 3.8916741066318705,
"grad_norm": 2.185664653778076,
"learning_rate": 9.214448669201521e-06,
"loss": 0.8157,
"step": 42500
},
{
"epoch": 3.937458507886363,
"grad_norm": 3.490175724029541,
"learning_rate": 8.834220532319391e-06,
"loss": 0.8202,
"step": 43000
},
{
"epoch": 3.9832429091408557,
"grad_norm": 1.9320560693740845,
"learning_rate": 8.453992395437262e-06,
"loss": 0.8344,
"step": 43500
},
{
"epoch": 4.029027310395349,
"grad_norm": 1.6495819091796875,
"learning_rate": 8.073764258555134e-06,
"loss": 0.8167,
"step": 44000
},
{
"epoch": 4.074811711649841,
"grad_norm": 5.037049770355225,
"learning_rate": 7.694296577946768e-06,
"loss": 0.8033,
"step": 44500
},
{
"epoch": 4.120596112904334,
"grad_norm": 2.1924211978912354,
"learning_rate": 7.314828897338404e-06,
"loss": 0.8177,
"step": 45000
},
{
"epoch": 4.120596112904334,
"eval_loss": 0.2399299442768097,
"eval_runtime": 647.8262,
"eval_samples_per_second": 10.114,
"eval_steps_per_second": 10.114,
"eval_wer": 0.12562513332227251,
"step": 45000
},
{
"epoch": 4.166380514158826,
"grad_norm": 0.6167405843734741,
"learning_rate": 6.9346007604562745e-06,
"loss": 0.8205,
"step": 45500
},
{
"epoch": 4.212164915413319,
"grad_norm": 1.5424410104751587,
"learning_rate": 6.554372623574144e-06,
"loss": 0.8096,
"step": 46000
},
{
"epoch": 4.257949316667811,
"grad_norm": 3.1014537811279297,
"learning_rate": 6.1741444866920154e-06,
"loss": 0.7904,
"step": 46500
},
{
"epoch": 4.303733717922304,
"grad_norm": 1.1060818433761597,
"learning_rate": 5.794676806083651e-06,
"loss": 0.7818,
"step": 47000
},
{
"epoch": 4.349518119176796,
"grad_norm": 1.345553994178772,
"learning_rate": 5.414448669201522e-06,
"loss": 0.8144,
"step": 47500
},
{
"epoch": 4.395302520431289,
"grad_norm": 2.172055721282959,
"learning_rate": 5.034220532319392e-06,
"loss": 0.7933,
"step": 48000
},
{
"epoch": 4.395302520431289,
"eval_loss": 0.2388318032026291,
"eval_runtime": 640.8224,
"eval_samples_per_second": 10.224,
"eval_steps_per_second": 10.224,
"eval_wer": 0.12215282880235122,
"step": 48000
},
{
"epoch": 4.441086921685781,
"grad_norm": 3.54559063911438,
"learning_rate": 4.653992395437263e-06,
"loss": 0.814,
"step": 48500
},
{
"epoch": 4.486871322940274,
"grad_norm": 3.8439674377441406,
"learning_rate": 4.273764258555134e-06,
"loss": 0.8641,
"step": 49000
},
{
"epoch": 4.5326557241947665,
"grad_norm": 1.967161774635315,
"learning_rate": 3.893536121673004e-06,
"loss": 0.7962,
"step": 49500
},
{
"epoch": 4.5784401254492595,
"grad_norm": 1.7992150783538818,
"learning_rate": 3.5133079847908747e-06,
"loss": 0.7871,
"step": 50000
},
{
"epoch": 4.6242245267037525,
"grad_norm": 1.4352868795394897,
"learning_rate": 3.1330798479087456e-06,
"loss": 0.839,
"step": 50500
},
{
"epoch": 4.670008927958245,
"grad_norm": 1.4436633586883545,
"learning_rate": 2.7536121673003807e-06,
"loss": 0.7864,
"step": 51000
},
{
"epoch": 4.670008927958245,
"eval_loss": 0.237684428691864,
"eval_runtime": 642.8597,
"eval_samples_per_second": 10.192,
"eval_steps_per_second": 10.192,
"eval_wer": 0.12045815458273092,
"step": 51000
},
{
"epoch": 4.715793329212737,
"grad_norm": 6.0789055824279785,
"learning_rate": 2.373384030418251e-06,
"loss": 0.7787,
"step": 51500
},
{
"epoch": 4.76157773046723,
"grad_norm": 3.2274794578552246,
"learning_rate": 1.993916349809886e-06,
"loss": 0.8234,
"step": 52000
},
{
"epoch": 4.807362131721723,
"grad_norm": 2.8651950359344482,
"learning_rate": 1.6136882129277568e-06,
"loss": 0.8,
"step": 52500
},
{
"epoch": 4.853146532976215,
"grad_norm": 1.4273629188537598,
"learning_rate": 1.2334600760456275e-06,
"loss": 0.7861,
"step": 53000
},
{
"epoch": 4.898930934230708,
"grad_norm": 1.4547092914581299,
"learning_rate": 8.532319391634982e-07,
"loss": 0.7801,
"step": 53500
},
{
"epoch": 4.9447153354852,
"grad_norm": 2.2551023960113525,
"learning_rate": 4.737642585551331e-07,
"loss": 0.8371,
"step": 54000
},
{
"epoch": 4.9447153354852,
"eval_loss": 0.23816268146038055,
"eval_runtime": 641.5849,
"eval_samples_per_second": 10.212,
"eval_steps_per_second": 10.212,
"eval_wer": 0.11996634353298097,
"step": 54000
},
{
"epoch": 4.990499736739693,
"grad_norm": 1.8520026206970215,
"learning_rate": 9.353612167300382e-08,
"loss": 0.7373,
"step": 54500
},
{
"epoch": 5.036627521003594,
"grad_norm": 5.179400444030762,
"learning_rate": 2.0232462686567166e-05,
"loss": 0.836,
"step": 55000
},
{
"epoch": 5.082411922258086,
"grad_norm": 7.242797374725342,
"learning_rate": 2.004589552238806e-05,
"loss": 0.8493,
"step": 55500
},
{
"epoch": 5.128196323512579,
"grad_norm": 3.6222658157348633,
"learning_rate": 1.9859328358208957e-05,
"loss": 0.8105,
"step": 56000
},
{
"epoch": 5.1739807247670715,
"grad_norm": 2.839559316635132,
"learning_rate": 1.9672761194029853e-05,
"loss": 0.8075,
"step": 56500
},
{
"epoch": 5.2197651260215645,
"grad_norm": 2.1728529930114746,
"learning_rate": 1.9486940298507463e-05,
"loss": 0.7846,
"step": 57000
},
{
"epoch": 5.2197651260215645,
"eval_loss": 0.24261941015720367,
"eval_runtime": 650.6489,
"eval_samples_per_second": 10.07,
"eval_steps_per_second": 10.07,
"eval_wer": 0.13190609371666942,
"step": 57000
},
{
"epoch": 5.2655495272760575,
"grad_norm": 1.945101022720337,
"learning_rate": 1.9300373134328362e-05,
"loss": 0.8194,
"step": 57500
},
{
"epoch": 5.31133392853055,
"grad_norm": 1.497315764427185,
"learning_rate": 1.9113805970149254e-05,
"loss": 0.8367,
"step": 58000
},
{
"epoch": 5.357118329785042,
"grad_norm": 3.0750606060028076,
"learning_rate": 1.8927238805970153e-05,
"loss": 0.8442,
"step": 58500
},
{
"epoch": 5.402902731039535,
"grad_norm": 2.4695701599121094,
"learning_rate": 1.8740671641791046e-05,
"loss": 0.8124,
"step": 59000
},
{
"epoch": 5.448687132294028,
"grad_norm": 1.8706340789794922,
"learning_rate": 1.85544776119403e-05,
"loss": 0.775,
"step": 59500
},
{
"epoch": 5.49447153354852,
"grad_norm": 5.028220176696777,
"learning_rate": 1.8367910447761194e-05,
"loss": 0.808,
"step": 60000
},
{
"epoch": 5.49447153354852,
"eval_loss": 0.24327826499938965,
"eval_runtime": 650.6547,
"eval_samples_per_second": 10.07,
"eval_steps_per_second": 10.07,
"eval_wer": 0.13021734493138348,
"step": 60000
},
{
"epoch": 5.540255934803013,
"grad_norm": 1.9498703479766846,
"learning_rate": 1.818134328358209e-05,
"loss": 0.7813,
"step": 60500
},
{
"epoch": 5.586040336057505,
"grad_norm": 3.7338151931762695,
"learning_rate": 1.7994776119402986e-05,
"loss": 0.7698,
"step": 61000
},
{
"epoch": 5.631824737311998,
"grad_norm": 3.364290475845337,
"learning_rate": 1.780820895522388e-05,
"loss": 0.7783,
"step": 61500
},
{
"epoch": 5.67760913856649,
"grad_norm": 3.4009501934051514,
"learning_rate": 1.7622014925373137e-05,
"loss": 0.7652,
"step": 62000
},
{
"epoch": 5.723393539820983,
"grad_norm": 2.9030606746673584,
"learning_rate": 1.743544776119403e-05,
"loss": 0.7946,
"step": 62500
},
{
"epoch": 5.769177941075475,
"grad_norm": 1.3028395175933838,
"learning_rate": 1.724888059701493e-05,
"loss": 0.7456,
"step": 63000
},
{
"epoch": 5.769177941075475,
"eval_loss": 0.2477836161851883,
"eval_runtime": 648.6941,
"eval_samples_per_second": 10.1,
"eval_steps_per_second": 10.1,
"eval_wer": 0.1254769974639141,
"step": 63000
},
{
"epoch": 5.814962342329968,
"grad_norm": 1.8734581470489502,
"learning_rate": 1.706231343283582e-05,
"loss": 0.7893,
"step": 63500
},
{
"epoch": 5.8607467435844605,
"grad_norm": 2.084261178970337,
"learning_rate": 1.6876119402985077e-05,
"loss": 0.7331,
"step": 64000
},
{
"epoch": 5.9065311448389535,
"grad_norm": 2.031810760498047,
"learning_rate": 1.668955223880597e-05,
"loss": 0.747,
"step": 64500
},
{
"epoch": 5.952315546093446,
"grad_norm": 4.709017276763916,
"learning_rate": 1.650298507462687e-05,
"loss": 0.7473,
"step": 65000
},
{
"epoch": 5.998099947347939,
"grad_norm": 3.2480199337005615,
"learning_rate": 1.631641791044776e-05,
"loss": 0.7195,
"step": 65500
},
{
"epoch": 6.043884348602431,
"grad_norm": 2.1403627395629883,
"learning_rate": 1.6129850746268657e-05,
"loss": 0.7217,
"step": 66000
},
{
"epoch": 6.043884348602431,
"eval_loss": 0.24559348821640015,
"eval_runtime": 650.412,
"eval_samples_per_second": 10.074,
"eval_steps_per_second": 10.074,
"eval_wer": 0.12621175132137186,
"step": 66000
},
{
"epoch": 6.089668749856924,
"grad_norm": 2.90395188331604,
"learning_rate": 1.5943283582089553e-05,
"loss": 0.7285,
"step": 66500
},
{
"epoch": 6.135453151111417,
"grad_norm": 2.9291510581970215,
"learning_rate": 1.5757089552238805e-05,
"loss": 0.7324,
"step": 67000
},
{
"epoch": 6.181237552365909,
"grad_norm": 12.322357177734375,
"learning_rate": 1.5570522388059705e-05,
"loss": 0.7198,
"step": 67500
},
{
"epoch": 6.227021953620402,
"grad_norm": 4.177962303161621,
"learning_rate": 1.5383955223880597e-05,
"loss": 0.7206,
"step": 68000
},
{
"epoch": 6.272806354874894,
"grad_norm": 1.2690945863723755,
"learning_rate": 1.5197388059701494e-05,
"loss": 0.729,
"step": 68500
},
{
"epoch": 6.318590756129387,
"grad_norm": 1.8948352336883545,
"learning_rate": 1.5010820895522389e-05,
"loss": 0.7115,
"step": 69000
},
{
"epoch": 6.318590756129387,
"eval_loss": 0.2540062367916107,
"eval_runtime": 648.0874,
"eval_samples_per_second": 10.11,
"eval_steps_per_second": 10.11,
"eval_wer": 0.12306534568983907,
"step": 69000
},
{
"epoch": 6.364375157383879,
"grad_norm": 2.9900403022766113,
"learning_rate": 1.4824253731343286e-05,
"loss": 0.7681,
"step": 69500
},
{
"epoch": 6.410159558638372,
"grad_norm": 3.5702733993530273,
"learning_rate": 1.4638059701492537e-05,
"loss": 0.7142,
"step": 70000
},
{
"epoch": 6.455943959892864,
"grad_norm": 2.3290789127349854,
"learning_rate": 1.4451492537313434e-05,
"loss": 0.71,
"step": 70500
},
{
"epoch": 6.501728361147357,
"grad_norm": 6.307437896728516,
"learning_rate": 1.4264925373134328e-05,
"loss": 0.6944,
"step": 71000
},
{
"epoch": 6.547512762401849,
"grad_norm": 5.269557952880859,
"learning_rate": 1.4078358208955226e-05,
"loss": 0.7638,
"step": 71500
},
{
"epoch": 6.593297163656342,
"grad_norm": 1.6152143478393555,
"learning_rate": 1.389179104477612e-05,
"loss": 0.6701,
"step": 72000
},
{
"epoch": 6.593297163656342,
"eval_loss": 0.2436157912015915,
"eval_runtime": 649.7342,
"eval_samples_per_second": 10.084,
"eval_steps_per_second": 10.084,
"eval_wer": 0.12396008627432391,
"step": 72000
},
{
"epoch": 6.639081564910835,
"grad_norm": 2.4076077938079834,
"learning_rate": 1.3705223880597016e-05,
"loss": 0.7058,
"step": 72500
},
{
"epoch": 6.684865966165328,
"grad_norm": 3.9522953033447266,
"learning_rate": 1.351865671641791e-05,
"loss": 0.6958,
"step": 73000
},
{
"epoch": 6.73065036741982,
"grad_norm": 1.8242387771606445,
"learning_rate": 1.3332089552238807e-05,
"loss": 0.7166,
"step": 73500
},
{
"epoch": 6.776434768674313,
"grad_norm": 4.560197353363037,
"learning_rate": 1.3145522388059701e-05,
"loss": 0.6946,
"step": 74000
},
{
"epoch": 6.822219169928806,
"grad_norm": 2.9841842651367188,
"learning_rate": 1.2958955223880599e-05,
"loss": 0.7179,
"step": 74500
},
{
"epoch": 6.868003571183298,
"grad_norm": 1.7246359586715698,
"learning_rate": 1.2772761194029852e-05,
"loss": 0.6966,
"step": 75000
},
{
"epoch": 6.868003571183298,
"eval_loss": 0.23714399337768555,
"eval_runtime": 653.4649,
"eval_samples_per_second": 10.027,
"eval_steps_per_second": 10.027,
"eval_wer": 0.12096774193548387,
"step": 75000
},
{
"epoch": 6.91378797243779,
"grad_norm": 3.9530229568481445,
"learning_rate": 1.2586194029850747e-05,
"loss": 0.6542,
"step": 75500
},
{
"epoch": 6.959572373692283,
"grad_norm": 4.2303690910339355,
"learning_rate": 1.2399626865671643e-05,
"loss": 0.683,
"step": 76000
},
{
"epoch": 7.005356774946776,
"grad_norm": 1.5437530279159546,
"learning_rate": 1.2213059701492537e-05,
"loss": 0.6415,
"step": 76500
},
{
"epoch": 7.051141176201268,
"grad_norm": 2.246727466583252,
"learning_rate": 1.2026492537313435e-05,
"loss": 0.6831,
"step": 77000
},
{
"epoch": 7.096925577455761,
"grad_norm": 5.2752580642700195,
"learning_rate": 1.1839925373134329e-05,
"loss": 0.6666,
"step": 77500
},
{
"epoch": 7.142709978710253,
"grad_norm": 1.453157663345337,
"learning_rate": 1.1653358208955226e-05,
"loss": 0.7056,
"step": 78000
},
{
"epoch": 7.142709978710253,
"eval_loss": 0.23014602065086365,
"eval_runtime": 648.8331,
"eval_samples_per_second": 10.098,
"eval_steps_per_second": 10.098,
"eval_wer": 0.11875755492877628,
"step": 78000
},
{
"epoch": 7.188494379964746,
"grad_norm": 1.6704216003417969,
"learning_rate": 1.1467164179104479e-05,
"loss": 0.7214,
"step": 78500
},
{
"epoch": 7.234278781219238,
"grad_norm": 2.2454934120178223,
"learning_rate": 1.1280597014925375e-05,
"loss": 0.6921,
"step": 79000
},
{
"epoch": 7.280063182473731,
"grad_norm": 0.8851762413978577,
"learning_rate": 1.1094029850746269e-05,
"loss": 0.6605,
"step": 79500
},
{
"epoch": 7.3258475837282235,
"grad_norm": 2.482797861099243,
"learning_rate": 1.0907462686567165e-05,
"loss": 0.6383,
"step": 80000
},
{
"epoch": 7.3716319849827165,
"grad_norm": 2.4076988697052,
"learning_rate": 1.072089552238806e-05,
"loss": 0.6638,
"step": 80500
},
{
"epoch": 7.417416386237209,
"grad_norm": 2.0605874061584473,
"learning_rate": 1.0534328358208956e-05,
"loss": 0.6654,
"step": 81000
},
{
"epoch": 7.417416386237209,
"eval_loss": 0.24034035205841064,
"eval_runtime": 650.4954,
"eval_samples_per_second": 10.072,
"eval_steps_per_second": 10.072,
"eval_wer": 0.11891754165580337,
"step": 81000
},
{
"epoch": 7.463200787491702,
"grad_norm": 1.975950002670288,
"learning_rate": 1.034776119402985e-05,
"loss": 0.6744,
"step": 81500
},
{
"epoch": 7.508985188746194,
"grad_norm": 1.52113676071167,
"learning_rate": 1.0161940298507465e-05,
"loss": 0.6832,
"step": 82000
},
{
"epoch": 7.554769590000687,
"grad_norm": 2.152534246444702,
"learning_rate": 9.975373134328359e-06,
"loss": 0.6719,
"step": 82500
},
{
"epoch": 7.600553991255179,
"grad_norm": 1.5796542167663574,
"learning_rate": 9.788805970149254e-06,
"loss": 0.6515,
"step": 83000
},
{
"epoch": 7.646338392509672,
"grad_norm": 1.5929287672042847,
"learning_rate": 9.60223880597015e-06,
"loss": 0.666,
"step": 83500
},
{
"epoch": 7.692122793764165,
"grad_norm": 1.7882949113845825,
"learning_rate": 9.415671641791046e-06,
"loss": 0.6339,
"step": 84000
},
{
"epoch": 7.692122793764165,
"eval_loss": 0.23006904125213623,
"eval_runtime": 649.7568,
"eval_samples_per_second": 10.084,
"eval_steps_per_second": 10.084,
"eval_wer": 0.11488232087412008,
"step": 84000
},
{
"epoch": 7.737907195018657,
"grad_norm": 1.4368743896484375,
"learning_rate": 9.22910447761194e-06,
"loss": 0.6861,
"step": 84500
},
{
"epoch": 7.78369159627315,
"grad_norm": 2.0316922664642334,
"learning_rate": 9.042537313432836e-06,
"loss": 0.6513,
"step": 85000
},
{
"epoch": 7.829475997527642,
"grad_norm": 1.2255696058273315,
"learning_rate": 8.855970149253732e-06,
"loss": 0.6336,
"step": 85500
},
{
"epoch": 7.875260398782135,
"grad_norm": 2.4979262351989746,
"learning_rate": 8.669776119402986e-06,
"loss": 0.639,
"step": 86000
},
{
"epoch": 7.921044800036627,
"grad_norm": 2.6587982177734375,
"learning_rate": 8.483208955223882e-06,
"loss": 0.6506,
"step": 86500
},
{
"epoch": 7.96682920129112,
"grad_norm": 2.314595937728882,
"learning_rate": 8.296641791044778e-06,
"loss": 0.6649,
"step": 87000
},
{
"epoch": 7.96682920129112,
"eval_loss": 0.24233344197273254,
"eval_runtime": 654.1461,
"eval_samples_per_second": 10.016,
"eval_steps_per_second": 10.016,
"eval_wer": 0.11500675499514114,
"step": 87000
},
{
"epoch": 8.012613602545613,
"grad_norm": 2.341566562652588,
"learning_rate": 8.110074626865673e-06,
"loss": 0.6722,
"step": 87500
},
{
"epoch": 8.058398003800106,
"grad_norm": 3.6063222885131836,
"learning_rate": 7.923507462686567e-06,
"loss": 0.6567,
"step": 88000
},
{
"epoch": 8.104182405054598,
"grad_norm": 3.134598970413208,
"learning_rate": 7.736940298507463e-06,
"loss": 0.6455,
"step": 88500
},
{
"epoch": 8.14996680630909,
"grad_norm": 2.014883041381836,
"learning_rate": 7.550373134328359e-06,
"loss": 0.6618,
"step": 89000
},
{
"epoch": 8.195751207563584,
"grad_norm": 2.0325255393981934,
"learning_rate": 7.363805970149255e-06,
"loss": 0.609,
"step": 89500
},
{
"epoch": 8.241535608818076,
"grad_norm": 2.343564748764038,
"learning_rate": 7.177238805970151e-06,
"loss": 0.5974,
"step": 90000
},
{
"epoch": 8.241535608818076,
"eval_loss": 0.24035315215587616,
"eval_runtime": 656.8993,
"eval_samples_per_second": 9.974,
"eval_steps_per_second": 9.974,
"eval_wer": 0.11389869877462018,
"step": 90000
},
{
"epoch": 8.287320010072568,
"grad_norm": 3.118222951889038,
"learning_rate": 6.9906716417910455e-06,
"loss": 0.6189,
"step": 90500
},
{
"epoch": 8.33310441132706,
"grad_norm": 1.9298070669174194,
"learning_rate": 6.804104477611941e-06,
"loss": 0.6321,
"step": 91000
},
{
"epoch": 8.378888812581554,
"grad_norm": 2.3452179431915283,
"learning_rate": 6.617537313432837e-06,
"loss": 0.6131,
"step": 91500
},
{
"epoch": 8.424673213836046,
"grad_norm": 1.260360836982727,
"learning_rate": 6.43134328358209e-06,
"loss": 0.6485,
"step": 92000
},
{
"epoch": 8.470457615090538,
"grad_norm": 2.775022506713867,
"learning_rate": 6.2447761194029854e-06,
"loss": 0.6397,
"step": 92500
},
{
"epoch": 8.516242016345032,
"grad_norm": 2.084218740463257,
"learning_rate": 6.058208955223881e-06,
"loss": 0.5994,
"step": 93000
},
{
"epoch": 8.516242016345032,
"eval_loss": 0.2302207350730896,
"eval_runtime": 661.0282,
"eval_samples_per_second": 9.912,
"eval_steps_per_second": 9.912,
"eval_wer": 0.11068118793107534,
"step": 93000
},
{
"epoch": 8.562026417599524,
"grad_norm": 3.495532751083374,
"learning_rate": 5.871641791044776e-06,
"loss": 0.6213,
"step": 93500
},
{
"epoch": 8.607810818854016,
"grad_norm": 1.2543615102767944,
"learning_rate": 5.68544776119403e-06,
"loss": 0.6118,
"step": 94000
},
{
"epoch": 8.653595220108508,
"grad_norm": 2.4980568885803223,
"learning_rate": 5.498880597014926e-06,
"loss": 0.5992,
"step": 94500
},
{
"epoch": 8.699379621363002,
"grad_norm": 3.1688904762268066,
"learning_rate": 5.312313432835822e-06,
"loss": 0.629,
"step": 95000
},
{
"epoch": 8.745164022617494,
"grad_norm": 1.777855396270752,
"learning_rate": 5.125746268656717e-06,
"loss": 0.5818,
"step": 95500
},
{
"epoch": 8.790948423871987,
"grad_norm": 4.508159637451172,
"learning_rate": 4.939179104477612e-06,
"loss": 0.6323,
"step": 96000
},
{
"epoch": 8.790948423871987,
"eval_loss": 0.23431606590747833,
"eval_runtime": 665.4475,
"eval_samples_per_second": 9.846,
"eval_steps_per_second": 9.846,
"eval_wer": 0.11001753928562964,
"step": 96000
},
{
"epoch": 8.836732825126479,
"grad_norm": 2.9219791889190674,
"learning_rate": 4.752611940298508e-06,
"loss": 0.5897,
"step": 96500
},
{
"epoch": 8.882517226380973,
"grad_norm": 1.1439307928085327,
"learning_rate": 4.5660447761194035e-06,
"loss": 0.611,
"step": 97000
},
{
"epoch": 8.928301627635465,
"grad_norm": 2.547057628631592,
"learning_rate": 4.379477611940298e-06,
"loss": 0.6163,
"step": 97500
},
{
"epoch": 8.974086028889957,
"grad_norm": 1.996872901916504,
"learning_rate": 4.193283582089553e-06,
"loss": 0.6063,
"step": 98000
},
{
"epoch": 9.019870430144449,
"grad_norm": 1.6827927827835083,
"learning_rate": 4.006716417910448e-06,
"loss": 0.594,
"step": 98500
},
{
"epoch": 9.065654831398943,
"grad_norm": 1.5130544900894165,
"learning_rate": 3.820149253731343e-06,
"loss": 0.5862,
"step": 99000
},
{
"epoch": 9.065654831398943,
"eval_loss": 0.22885525226593018,
"eval_runtime": 661.9197,
"eval_samples_per_second": 9.898,
"eval_steps_per_second": 9.898,
"eval_wer": 0.10877912350975326,
"step": 99000
},
{
"epoch": 9.111439232653435,
"grad_norm": 2.690355062484741,
"learning_rate": 3.6335820895522388e-06,
"loss": 0.5986,
"step": 99500
},
{
"epoch": 9.157223633907927,
"grad_norm": 1.3742425441741943,
"learning_rate": 3.4470149253731346e-06,
"loss": 0.5974,
"step": 100000
},
{
"epoch": 9.20300803516242,
"grad_norm": 2.864790678024292,
"learning_rate": 3.2608208955223884e-06,
"loss": 0.5916,
"step": 100500
},
{
"epoch": 9.248792436416913,
"grad_norm": 1.65906822681427,
"learning_rate": 3.074253731343284e-06,
"loss": 0.5724,
"step": 101000
},
{
"epoch": 9.294576837671405,
"grad_norm": 1.9599213600158691,
"learning_rate": 2.8876865671641795e-06,
"loss": 0.5857,
"step": 101500
},
{
"epoch": 9.340361238925897,
"grad_norm": 3.1552865505218506,
"learning_rate": 2.7011194029850745e-06,
"loss": 0.6196,
"step": 102000
},
{
"epoch": 9.340361238925897,
"eval_loss": 0.23824112117290497,
"eval_runtime": 661.8149,
"eval_samples_per_second": 9.9,
"eval_steps_per_second": 9.9,
"eval_wer": 0.10700149320945225,
"step": 102000
},
{
"epoch": 9.386145640180391,
"grad_norm": 2.2402780055999756,
"learning_rate": 2.5145522388059703e-06,
"loss": 0.6165,
"step": 102500
},
{
"epoch": 9.431930041434883,
"grad_norm": 3.58760929107666,
"learning_rate": 2.327985074626866e-06,
"loss": 0.584,
"step": 103000
},
{
"epoch": 9.477714442689376,
"grad_norm": 4.692273139953613,
"learning_rate": 2.1417910447761194e-06,
"loss": 0.5801,
"step": 103500
},
{
"epoch": 9.523498843943868,
"grad_norm": 1.1370134353637695,
"learning_rate": 1.955223880597015e-06,
"loss": 0.5876,
"step": 104000
},
{
"epoch": 9.569283245198362,
"grad_norm": 1.0059013366699219,
"learning_rate": 1.7686567164179106e-06,
"loss": 0.6329,
"step": 104500
},
{
"epoch": 9.615067646452854,
"grad_norm": 2.666593551635742,
"learning_rate": 1.582089552238806e-06,
"loss": 0.6077,
"step": 105000
},
{
"epoch": 9.615067646452854,
"eval_loss": 0.22313112020492554,
"eval_runtime": 661.0901,
"eval_samples_per_second": 9.911,
"eval_steps_per_second": 9.911,
"eval_wer": 0.10743997535019317,
"step": 105000
},
{
"epoch": 9.660852047707346,
"grad_norm": 3.046264410018921,
"learning_rate": 1.3955223880597016e-06,
"loss": 0.5714,
"step": 105500
},
{
"epoch": 9.706636448961838,
"grad_norm": 1.997536301612854,
"learning_rate": 1.2089552238805971e-06,
"loss": 0.6201,
"step": 106000
},
{
"epoch": 9.752420850216332,
"grad_norm": 3.185781478881836,
"learning_rate": 1.0223880597014927e-06,
"loss": 0.5782,
"step": 106500
},
{
"epoch": 9.798205251470824,
"grad_norm": 4.050471782684326,
"learning_rate": 8.361940298507463e-07,
"loss": 0.5841,
"step": 107000
},
{
"epoch": 9.843989652725316,
"grad_norm": 2.3069846630096436,
"learning_rate": 6.496268656716419e-07,
"loss": 0.5601,
"step": 107500
},
{
"epoch": 9.88977405397981,
"grad_norm": 3.8783347606658936,
"learning_rate": 4.6343283582089555e-07,
"loss": 0.5949,
"step": 108000
},
{
"epoch": 9.88977405397981,
"eval_loss": 0.22683313488960266,
"eval_runtime": 661.0107,
"eval_samples_per_second": 9.912,
"eval_steps_per_second": 9.912,
"eval_wer": 0.1066222654120547,
"step": 108000
},
{
"epoch": 9.935558455234302,
"grad_norm": 1.1555778980255127,
"learning_rate": 2.768656716417911e-07,
"loss": 0.5902,
"step": 108500
},
{
"epoch": 9.981342856488794,
"grad_norm": 0.8959473371505737,
"learning_rate": 9.029850746268658e-08,
"loss": 0.5785,
"step": 109000
}
],
"logging_steps": 500,
"max_steps": 109200,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 3000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0560519550521973e+20,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}