Muedi's picture
Training in progress, step 13500, checkpoint
b000316 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.5276735459662288,
"eval_steps": 500,
"global_step": 13500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.003908692933083177,
"grad_norm": 0.6370952129364014,
"learning_rate": 4.98240400406663e-05,
"loss": 1.4791,
"step": 100
},
{
"epoch": 0.007817385866166354,
"grad_norm": 0.27393946051597595,
"learning_rate": 4.9628528974739973e-05,
"loss": 1.3743,
"step": 200
},
{
"epoch": 0.01172607879924953,
"grad_norm": 0.364713191986084,
"learning_rate": 4.943301790881364e-05,
"loss": 1.3525,
"step": 300
},
{
"epoch": 0.015634771732332707,
"grad_norm": 0.33364734053611755,
"learning_rate": 4.923750684288731e-05,
"loss": 1.3457,
"step": 400
},
{
"epoch": 0.019543464665415886,
"grad_norm": 0.3672705292701721,
"learning_rate": 4.904199577696098e-05,
"loss": 1.3375,
"step": 500
},
{
"epoch": 0.02345215759849906,
"grad_norm": 0.16828787326812744,
"learning_rate": 4.884648471103465e-05,
"loss": 1.3348,
"step": 600
},
{
"epoch": 0.02736085053158224,
"grad_norm": 0.24634529650211334,
"learning_rate": 4.8650973645108314e-05,
"loss": 1.3343,
"step": 700
},
{
"epoch": 0.031269543464665414,
"grad_norm": 0.5399248003959656,
"learning_rate": 4.845546257918198e-05,
"loss": 1.3287,
"step": 800
},
{
"epoch": 0.03517823639774859,
"grad_norm": 0.5724157691001892,
"learning_rate": 4.825995151325565e-05,
"loss": 1.3236,
"step": 900
},
{
"epoch": 0.03908692933083177,
"grad_norm": 0.5199714303016663,
"learning_rate": 4.8064440447329324e-05,
"loss": 1.3189,
"step": 1000
},
{
"epoch": 0.04299562226391495,
"grad_norm": 0.30912989377975464,
"learning_rate": 4.786892938140299e-05,
"loss": 1.315,
"step": 1100
},
{
"epoch": 0.04690431519699812,
"grad_norm": 0.37046098709106445,
"learning_rate": 4.767341831547666e-05,
"loss": 1.3151,
"step": 1200
},
{
"epoch": 0.0508130081300813,
"grad_norm": 0.26445892453193665,
"learning_rate": 4.747790724955033e-05,
"loss": 1.3118,
"step": 1300
},
{
"epoch": 0.05472170106316448,
"grad_norm": 0.3207770586013794,
"learning_rate": 4.728239618362399e-05,
"loss": 1.3087,
"step": 1400
},
{
"epoch": 0.05863039399624766,
"grad_norm": 0.4711158573627472,
"learning_rate": 4.7086885117697664e-05,
"loss": 1.31,
"step": 1500
},
{
"epoch": 0.06253908692933083,
"grad_norm": 0.524340808391571,
"learning_rate": 4.689137405177133e-05,
"loss": 1.3062,
"step": 1600
},
{
"epoch": 0.06644777986241401,
"grad_norm": 0.3912469744682312,
"learning_rate": 4.6695862985845e-05,
"loss": 1.3078,
"step": 1700
},
{
"epoch": 0.07035647279549719,
"grad_norm": 0.3625667691230774,
"learning_rate": 4.6500351919918674e-05,
"loss": 1.3039,
"step": 1800
},
{
"epoch": 0.07426516572858036,
"grad_norm": 0.3897973299026489,
"learning_rate": 4.630484085399234e-05,
"loss": 1.3027,
"step": 1900
},
{
"epoch": 0.07817385866166354,
"grad_norm": 0.30837342143058777,
"learning_rate": 4.6109329788066005e-05,
"loss": 1.3055,
"step": 2000
},
{
"epoch": 0.08208255159474671,
"grad_norm": 0.3017924129962921,
"learning_rate": 4.591381872213967e-05,
"loss": 1.3021,
"step": 2100
},
{
"epoch": 0.0859912445278299,
"grad_norm": 0.5234686136245728,
"learning_rate": 4.571830765621334e-05,
"loss": 1.3054,
"step": 2200
},
{
"epoch": 0.08989993746091307,
"grad_norm": 0.3180347681045532,
"learning_rate": 4.5522796590287015e-05,
"loss": 1.3016,
"step": 2300
},
{
"epoch": 0.09380863039399624,
"grad_norm": 0.22097885608673096,
"learning_rate": 4.532728552436068e-05,
"loss": 1.3014,
"step": 2400
},
{
"epoch": 0.09771732332707943,
"grad_norm": 0.3503361642360687,
"learning_rate": 4.513177445843435e-05,
"loss": 1.3004,
"step": 2500
},
{
"epoch": 0.1016260162601626,
"grad_norm": 0.3880802392959595,
"learning_rate": 4.493626339250802e-05,
"loss": 1.2992,
"step": 2600
},
{
"epoch": 0.10553470919324578,
"grad_norm": 0.3632693886756897,
"learning_rate": 4.474075232658168e-05,
"loss": 1.3009,
"step": 2700
},
{
"epoch": 0.10944340212632896,
"grad_norm": 0.4512663781642914,
"learning_rate": 4.4545241260655355e-05,
"loss": 1.2999,
"step": 2800
},
{
"epoch": 0.11335209505941213,
"grad_norm": 0.3041514456272125,
"learning_rate": 4.434973019472902e-05,
"loss": 1.3007,
"step": 2900
},
{
"epoch": 0.11726078799249531,
"grad_norm": 0.34578946232795715,
"learning_rate": 4.415421912880269e-05,
"loss": 1.2985,
"step": 3000
},
{
"epoch": 0.12116948092557848,
"grad_norm": 0.2616026997566223,
"learning_rate": 4.3958708062876365e-05,
"loss": 1.2984,
"step": 3100
},
{
"epoch": 0.12507817385866166,
"grad_norm": 0.253749817609787,
"learning_rate": 4.376319699695003e-05,
"loss": 1.2963,
"step": 3200
},
{
"epoch": 0.12898686679174484,
"grad_norm": 0.24605727195739746,
"learning_rate": 4.35676859310237e-05,
"loss": 1.3005,
"step": 3300
},
{
"epoch": 0.13289555972482803,
"grad_norm": 0.31458768248558044,
"learning_rate": 4.337217486509736e-05,
"loss": 1.3008,
"step": 3400
},
{
"epoch": 0.13680425265791119,
"grad_norm": 0.23786158859729767,
"learning_rate": 4.3176663799171034e-05,
"loss": 1.2975,
"step": 3500
},
{
"epoch": 0.14071294559099437,
"grad_norm": 0.3280108869075775,
"learning_rate": 4.2981152733244706e-05,
"loss": 1.2987,
"step": 3600
},
{
"epoch": 0.14462163852407756,
"grad_norm": 0.25433337688446045,
"learning_rate": 4.278564166731837e-05,
"loss": 1.2972,
"step": 3700
},
{
"epoch": 0.1485303314571607,
"grad_norm": 0.2792419493198395,
"learning_rate": 4.2590130601392044e-05,
"loss": 1.2944,
"step": 3800
},
{
"epoch": 0.1524390243902439,
"grad_norm": 0.34164726734161377,
"learning_rate": 4.239461953546571e-05,
"loss": 1.298,
"step": 3900
},
{
"epoch": 0.15634771732332708,
"grad_norm": 0.2886170744895935,
"learning_rate": 4.219910846953938e-05,
"loss": 1.2948,
"step": 4000
},
{
"epoch": 0.16025641025641027,
"grad_norm": 0.3007793426513672,
"learning_rate": 4.2003597403613047e-05,
"loss": 1.2954,
"step": 4100
},
{
"epoch": 0.16416510318949343,
"grad_norm": 0.32882916927337646,
"learning_rate": 4.180808633768671e-05,
"loss": 1.2942,
"step": 4200
},
{
"epoch": 0.1680737961225766,
"grad_norm": 0.2950049936771393,
"learning_rate": 4.1612575271760384e-05,
"loss": 1.2938,
"step": 4300
},
{
"epoch": 0.1719824890556598,
"grad_norm": 0.7425180673599243,
"learning_rate": 4.141706420583405e-05,
"loss": 1.2926,
"step": 4400
},
{
"epoch": 0.17589118198874296,
"grad_norm": 0.26747408509254456,
"learning_rate": 4.122155313990772e-05,
"loss": 1.2933,
"step": 4500
},
{
"epoch": 0.17979987492182614,
"grad_norm": 0.3664441704750061,
"learning_rate": 4.1026042073981394e-05,
"loss": 1.2916,
"step": 4600
},
{
"epoch": 0.18370856785490933,
"grad_norm": 0.32603704929351807,
"learning_rate": 4.083053100805506e-05,
"loss": 1.2934,
"step": 4700
},
{
"epoch": 0.18761726078799248,
"grad_norm": 0.4145171046257019,
"learning_rate": 4.0635019942128725e-05,
"loss": 1.2922,
"step": 4800
},
{
"epoch": 0.19152595372107567,
"grad_norm": 0.42517679929733276,
"learning_rate": 4.04395088762024e-05,
"loss": 1.293,
"step": 4900
},
{
"epoch": 0.19543464665415886,
"grad_norm": 0.48030540347099304,
"learning_rate": 4.024399781027606e-05,
"loss": 1.2906,
"step": 5000
},
{
"epoch": 0.199343339587242,
"grad_norm": 0.4900824725627899,
"learning_rate": 4.0048486744349735e-05,
"loss": 1.293,
"step": 5100
},
{
"epoch": 0.2032520325203252,
"grad_norm": 0.400856614112854,
"learning_rate": 3.98529756784234e-05,
"loss": 1.2903,
"step": 5200
},
{
"epoch": 0.20716072545340838,
"grad_norm": 0.3311282694339752,
"learning_rate": 3.965746461249707e-05,
"loss": 1.2917,
"step": 5300
},
{
"epoch": 0.21106941838649157,
"grad_norm": 0.5248217582702637,
"learning_rate": 3.946195354657074e-05,
"loss": 1.2913,
"step": 5400
},
{
"epoch": 0.21497811131957473,
"grad_norm": 0.6463488936424255,
"learning_rate": 3.92664424806444e-05,
"loss": 1.2899,
"step": 5500
},
{
"epoch": 0.2188868042526579,
"grad_norm": 0.3202870786190033,
"learning_rate": 3.9070931414718075e-05,
"loss": 1.2866,
"step": 5600
},
{
"epoch": 0.2227954971857411,
"grad_norm": 0.5758823752403259,
"learning_rate": 3.887542034879174e-05,
"loss": 1.2907,
"step": 5700
},
{
"epoch": 0.22670419011882426,
"grad_norm": 0.4410454034805298,
"learning_rate": 3.867990928286541e-05,
"loss": 1.2885,
"step": 5800
},
{
"epoch": 0.23061288305190744,
"grad_norm": 0.3980095088481903,
"learning_rate": 3.8484398216939085e-05,
"loss": 1.2895,
"step": 5900
},
{
"epoch": 0.23452157598499063,
"grad_norm": 0.3670996427536011,
"learning_rate": 3.828888715101275e-05,
"loss": 1.2888,
"step": 6000
},
{
"epoch": 0.23843026891807378,
"grad_norm": 0.302168071269989,
"learning_rate": 3.8093376085086416e-05,
"loss": 1.2874,
"step": 6100
},
{
"epoch": 0.24233896185115697,
"grad_norm": 0.4420079290866852,
"learning_rate": 3.789786501916008e-05,
"loss": 1.2849,
"step": 6200
},
{
"epoch": 0.24624765478424016,
"grad_norm": 0.4248519539833069,
"learning_rate": 3.770235395323375e-05,
"loss": 1.2853,
"step": 6300
},
{
"epoch": 0.2501563477173233,
"grad_norm": 0.5483678579330444,
"learning_rate": 3.7506842887307426e-05,
"loss": 1.2883,
"step": 6400
},
{
"epoch": 0.2540650406504065,
"grad_norm": 0.7262341380119324,
"learning_rate": 3.731133182138109e-05,
"loss": 1.2893,
"step": 6500
},
{
"epoch": 0.2579737335834897,
"grad_norm": 0.47499603033065796,
"learning_rate": 3.711582075545476e-05,
"loss": 1.2861,
"step": 6600
},
{
"epoch": 0.26188242651657284,
"grad_norm": 0.4093151092529297,
"learning_rate": 3.692030968952843e-05,
"loss": 1.288,
"step": 6700
},
{
"epoch": 0.26579111944965605,
"grad_norm": 0.3050064146518707,
"learning_rate": 3.6724798623602094e-05,
"loss": 1.2891,
"step": 6800
},
{
"epoch": 0.2696998123827392,
"grad_norm": 0.28250014781951904,
"learning_rate": 3.6529287557675766e-05,
"loss": 1.288,
"step": 6900
},
{
"epoch": 0.27360850531582237,
"grad_norm": 0.37311726808547974,
"learning_rate": 3.633377649174943e-05,
"loss": 1.2864,
"step": 7000
},
{
"epoch": 0.2775171982489056,
"grad_norm": 0.3157326877117157,
"learning_rate": 3.6138265425823104e-05,
"loss": 1.2862,
"step": 7100
},
{
"epoch": 0.28142589118198874,
"grad_norm": 0.3368279039859772,
"learning_rate": 3.5942754359896776e-05,
"loss": 1.2884,
"step": 7200
},
{
"epoch": 0.2853345841150719,
"grad_norm": 0.4087933301925659,
"learning_rate": 3.574724329397044e-05,
"loss": 1.2848,
"step": 7300
},
{
"epoch": 0.2892432770481551,
"grad_norm": 0.9965713620185852,
"learning_rate": 3.555173222804411e-05,
"loss": 1.2832,
"step": 7400
},
{
"epoch": 0.29315196998123827,
"grad_norm": 0.4364921748638153,
"learning_rate": 3.535622116211777e-05,
"loss": 1.2869,
"step": 7500
},
{
"epoch": 0.2970606629143214,
"grad_norm": 0.30812186002731323,
"learning_rate": 3.5160710096191444e-05,
"loss": 1.2845,
"step": 7600
},
{
"epoch": 0.30096935584740464,
"grad_norm": 0.4500378966331482,
"learning_rate": 3.4965199030265117e-05,
"loss": 1.2855,
"step": 7700
},
{
"epoch": 0.3048780487804878,
"grad_norm": 0.6226646900177002,
"learning_rate": 3.476968796433878e-05,
"loss": 1.2837,
"step": 7800
},
{
"epoch": 0.30878674171357096,
"grad_norm": 0.35613521933555603,
"learning_rate": 3.4574176898412454e-05,
"loss": 1.2824,
"step": 7900
},
{
"epoch": 0.31269543464665417,
"grad_norm": 0.4375666081905365,
"learning_rate": 3.437866583248612e-05,
"loss": 1.285,
"step": 8000
},
{
"epoch": 0.3166041275797373,
"grad_norm": 0.5314582586288452,
"learning_rate": 3.418315476655979e-05,
"loss": 1.284,
"step": 8100
},
{
"epoch": 0.32051282051282054,
"grad_norm": 0.310276597738266,
"learning_rate": 3.398764370063346e-05,
"loss": 1.2846,
"step": 8200
},
{
"epoch": 0.3244215134459037,
"grad_norm": 0.42439979314804077,
"learning_rate": 3.379213263470712e-05,
"loss": 1.2863,
"step": 8300
},
{
"epoch": 0.32833020637898686,
"grad_norm": 0.4255527853965759,
"learning_rate": 3.3596621568780795e-05,
"loss": 1.283,
"step": 8400
},
{
"epoch": 0.33223889931207007,
"grad_norm": 0.4299289584159851,
"learning_rate": 3.340111050285447e-05,
"loss": 1.2828,
"step": 8500
},
{
"epoch": 0.3361475922451532,
"grad_norm": 0.36310991644859314,
"learning_rate": 3.320559943692813e-05,
"loss": 1.2826,
"step": 8600
},
{
"epoch": 0.3400562851782364,
"grad_norm": 0.7407525181770325,
"learning_rate": 3.3010088371001805e-05,
"loss": 1.2836,
"step": 8700
},
{
"epoch": 0.3439649781113196,
"grad_norm": 0.34384685754776,
"learning_rate": 3.281457730507547e-05,
"loss": 1.2845,
"step": 8800
},
{
"epoch": 0.34787367104440275,
"grad_norm": 0.33801528811454773,
"learning_rate": 3.2619066239149135e-05,
"loss": 1.2841,
"step": 8900
},
{
"epoch": 0.3517823639774859,
"grad_norm": 0.4284690022468567,
"learning_rate": 3.242355517322281e-05,
"loss": 1.2813,
"step": 9000
},
{
"epoch": 0.3556910569105691,
"grad_norm": 0.46512600779533386,
"learning_rate": 3.222804410729647e-05,
"loss": 1.2847,
"step": 9100
},
{
"epoch": 0.3595997498436523,
"grad_norm": 0.44570988416671753,
"learning_rate": 3.2032533041370145e-05,
"loss": 1.2811,
"step": 9200
},
{
"epoch": 0.36350844277673544,
"grad_norm": 0.613578200340271,
"learning_rate": 3.183702197544381e-05,
"loss": 1.2779,
"step": 9300
},
{
"epoch": 0.36741713570981865,
"grad_norm": 0.3196867108345032,
"learning_rate": 3.164151090951748e-05,
"loss": 1.2814,
"step": 9400
},
{
"epoch": 0.3713258286429018,
"grad_norm": 0.5302968621253967,
"learning_rate": 3.144599984359115e-05,
"loss": 1.2791,
"step": 9500
},
{
"epoch": 0.37523452157598497,
"grad_norm": 0.5436352491378784,
"learning_rate": 3.1250488777664814e-05,
"loss": 1.2841,
"step": 9600
},
{
"epoch": 0.3791432145090682,
"grad_norm": 0.42423689365386963,
"learning_rate": 3.1054977711738486e-05,
"loss": 1.2846,
"step": 9700
},
{
"epoch": 0.38305190744215134,
"grad_norm": 0.33073633909225464,
"learning_rate": 3.085946664581215e-05,
"loss": 1.2834,
"step": 9800
},
{
"epoch": 0.3869606003752345,
"grad_norm": 0.2981216609477997,
"learning_rate": 3.0663955579885823e-05,
"loss": 1.2818,
"step": 9900
},
{
"epoch": 0.3908692933083177,
"grad_norm": 0.3958970010280609,
"learning_rate": 3.0468444513959492e-05,
"loss": 1.2812,
"step": 10000
},
{
"epoch": 0.39477798624140087,
"grad_norm": 0.40256965160369873,
"learning_rate": 3.0272933448033158e-05,
"loss": 1.282,
"step": 10100
},
{
"epoch": 0.398686679174484,
"grad_norm": 0.3741731345653534,
"learning_rate": 3.007742238210683e-05,
"loss": 1.2809,
"step": 10200
},
{
"epoch": 0.40259537210756724,
"grad_norm": 0.3729889690876007,
"learning_rate": 2.98819113161805e-05,
"loss": 1.2803,
"step": 10300
},
{
"epoch": 0.4065040650406504,
"grad_norm": 0.3093133270740509,
"learning_rate": 2.9686400250254164e-05,
"loss": 1.2791,
"step": 10400
},
{
"epoch": 0.4104127579737336,
"grad_norm": 0.4270256757736206,
"learning_rate": 2.9490889184327836e-05,
"loss": 1.2797,
"step": 10500
},
{
"epoch": 0.41432145090681677,
"grad_norm": 0.517166793346405,
"learning_rate": 2.92953781184015e-05,
"loss": 1.2834,
"step": 10600
},
{
"epoch": 0.4182301438398999,
"grad_norm": 0.38176167011260986,
"learning_rate": 2.909986705247517e-05,
"loss": 1.2829,
"step": 10700
},
{
"epoch": 0.42213883677298314,
"grad_norm": 0.4706205129623413,
"learning_rate": 2.8904355986548843e-05,
"loss": 1.2782,
"step": 10800
},
{
"epoch": 0.4260475297060663,
"grad_norm": 0.252225399017334,
"learning_rate": 2.8708844920622508e-05,
"loss": 1.2821,
"step": 10900
},
{
"epoch": 0.42995622263914945,
"grad_norm": 0.41000062227249146,
"learning_rate": 2.8513333854696177e-05,
"loss": 1.2822,
"step": 11000
},
{
"epoch": 0.43386491557223267,
"grad_norm": 0.585588812828064,
"learning_rate": 2.8317822788769842e-05,
"loss": 1.2827,
"step": 11100
},
{
"epoch": 0.4377736085053158,
"grad_norm": 0.39349380135536194,
"learning_rate": 2.8122311722843514e-05,
"loss": 1.2788,
"step": 11200
},
{
"epoch": 0.441682301438399,
"grad_norm": 0.713667094707489,
"learning_rate": 2.7926800656917183e-05,
"loss": 1.2809,
"step": 11300
},
{
"epoch": 0.4455909943714822,
"grad_norm": 0.45725512504577637,
"learning_rate": 2.773128959099085e-05,
"loss": 1.2807,
"step": 11400
},
{
"epoch": 0.44949968730456535,
"grad_norm": 0.47194209694862366,
"learning_rate": 2.753577852506452e-05,
"loss": 1.2805,
"step": 11500
},
{
"epoch": 0.4534083802376485,
"grad_norm": 0.3619079291820526,
"learning_rate": 2.7340267459138186e-05,
"loss": 1.2809,
"step": 11600
},
{
"epoch": 0.4573170731707317,
"grad_norm": 0.30102109909057617,
"learning_rate": 2.7144756393211855e-05,
"loss": 1.2781,
"step": 11700
},
{
"epoch": 0.4612257661038149,
"grad_norm": 0.5589666366577148,
"learning_rate": 2.6949245327285527e-05,
"loss": 1.2773,
"step": 11800
},
{
"epoch": 0.46513445903689804,
"grad_norm": 0.45673272013664246,
"learning_rate": 2.6753734261359193e-05,
"loss": 1.2804,
"step": 11900
},
{
"epoch": 0.46904315196998125,
"grad_norm": 0.445117712020874,
"learning_rate": 2.655822319543286e-05,
"loss": 1.2819,
"step": 12000
},
{
"epoch": 0.4729518449030644,
"grad_norm": 0.40492427349090576,
"learning_rate": 2.6362712129506534e-05,
"loss": 1.2785,
"step": 12100
},
{
"epoch": 0.47686053783614757,
"grad_norm": 0.4738054871559143,
"learning_rate": 2.61672010635802e-05,
"loss": 1.2787,
"step": 12200
},
{
"epoch": 0.4807692307692308,
"grad_norm": 0.335173636674881,
"learning_rate": 2.597168999765387e-05,
"loss": 1.2789,
"step": 12300
},
{
"epoch": 0.48467792370231394,
"grad_norm": 0.32067373394966125,
"learning_rate": 2.5776178931727533e-05,
"loss": 1.279,
"step": 12400
},
{
"epoch": 0.4885866166353971,
"grad_norm": 0.37375131249427795,
"learning_rate": 2.5580667865801205e-05,
"loss": 1.2793,
"step": 12500
},
{
"epoch": 0.4924953095684803,
"grad_norm": 0.35795482993125916,
"learning_rate": 2.5385156799874878e-05,
"loss": 1.2805,
"step": 12600
},
{
"epoch": 0.49640400250156347,
"grad_norm": 0.5317375063896179,
"learning_rate": 2.5189645733948543e-05,
"loss": 1.2786,
"step": 12700
},
{
"epoch": 0.5003126954346466,
"grad_norm": 0.5315755605697632,
"learning_rate": 2.4994134668022212e-05,
"loss": 1.2796,
"step": 12800
},
{
"epoch": 0.5042213883677298,
"grad_norm": 0.3573049008846283,
"learning_rate": 2.479862360209588e-05,
"loss": 1.2794,
"step": 12900
},
{
"epoch": 0.508130081300813,
"grad_norm": 0.5303148627281189,
"learning_rate": 2.460311253616955e-05,
"loss": 1.2775,
"step": 13000
},
{
"epoch": 0.5120387742338962,
"grad_norm": 0.48965954780578613,
"learning_rate": 2.4407601470243215e-05,
"loss": 1.2798,
"step": 13100
},
{
"epoch": 0.5159474671669794,
"grad_norm": 0.3331129252910614,
"learning_rate": 2.4212090404316887e-05,
"loss": 1.2772,
"step": 13200
},
{
"epoch": 0.5198561601000625,
"grad_norm": 0.4897904098033905,
"learning_rate": 2.4016579338390556e-05,
"loss": 1.2786,
"step": 13300
},
{
"epoch": 0.5237648530331457,
"grad_norm": 0.48047152161598206,
"learning_rate": 2.382106827246422e-05,
"loss": 1.2782,
"step": 13400
},
{
"epoch": 0.5276735459662288,
"grad_norm": 0.34335121512413025,
"learning_rate": 2.362555720653789e-05,
"loss": 1.2766,
"step": 13500
}
],
"logging_steps": 100,
"max_steps": 25584,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9482784473088000.0,
"train_batch_size": 48,
"trial_name": null,
"trial_params": null
}