|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9994447529150472, |
|
"eval_steps": 100, |
|
"global_step": 675, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.007403294466037387, |
|
"grad_norm": 0.6776805171272973, |
|
"learning_rate": 1.4705882352941177e-06, |
|
"loss": 1.2512, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.014806588932074774, |
|
"grad_norm": 0.44944873495655524, |
|
"learning_rate": 2.9411764705882355e-06, |
|
"loss": 1.2594, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02220988339811216, |
|
"grad_norm": 0.34624498626136563, |
|
"learning_rate": 4.411764705882353e-06, |
|
"loss": 1.2327, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.029613177864149548, |
|
"grad_norm": 0.3384934751143345, |
|
"learning_rate": 5.882352941176471e-06, |
|
"loss": 1.1823, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.037016472330186935, |
|
"grad_norm": 0.2415385954324663, |
|
"learning_rate": 7.352941176470589e-06, |
|
"loss": 1.1483, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04441976679622432, |
|
"grad_norm": 0.21956265650508958, |
|
"learning_rate": 8.823529411764707e-06, |
|
"loss": 1.1051, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05182306126226171, |
|
"grad_norm": 0.18350394805421755, |
|
"learning_rate": 1.0294117647058823e-05, |
|
"loss": 1.0659, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.059226355728299096, |
|
"grad_norm": 0.16210146685861945, |
|
"learning_rate": 1.1764705882352942e-05, |
|
"loss": 1.0381, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.06662965019433648, |
|
"grad_norm": 0.1607004827826561, |
|
"learning_rate": 1.323529411764706e-05, |
|
"loss": 1.0217, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.07403294466037387, |
|
"grad_norm": 0.16745984502215897, |
|
"learning_rate": 1.4705882352941179e-05, |
|
"loss": 1.038, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.08143623912641125, |
|
"grad_norm": 0.14843663256682382, |
|
"learning_rate": 1.6176470588235296e-05, |
|
"loss": 1.0276, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.08883953359244864, |
|
"grad_norm": 0.17168834832578023, |
|
"learning_rate": 1.7647058823529414e-05, |
|
"loss": 1.0032, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.09624282805848602, |
|
"grad_norm": 0.15126385928527755, |
|
"learning_rate": 1.911764705882353e-05, |
|
"loss": 0.9954, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.10364612252452342, |
|
"grad_norm": 0.1688834438402727, |
|
"learning_rate": 1.9999464266898485e-05, |
|
"loss": 0.9919, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.1110494169905608, |
|
"grad_norm": 0.20449596627344765, |
|
"learning_rate": 1.9993437928712977e-05, |
|
"loss": 0.9839, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.11845271145659819, |
|
"grad_norm": 0.17590208096396392, |
|
"learning_rate": 1.998071963486563e-05, |
|
"loss": 0.9714, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.12585600592263557, |
|
"grad_norm": 0.14935660715610657, |
|
"learning_rate": 1.9961317901970953e-05, |
|
"loss": 0.9577, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.13325930038867295, |
|
"grad_norm": 0.1656804473286765, |
|
"learning_rate": 1.993524572210807e-05, |
|
"loss": 0.9607, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.14066259485471036, |
|
"grad_norm": 0.20165204812335902, |
|
"learning_rate": 1.990252055412077e-05, |
|
"loss": 0.9538, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.14806588932074774, |
|
"grad_norm": 0.1672105719290636, |
|
"learning_rate": 1.9863164311926433e-05, |
|
"loss": 0.9842, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.14806588932074774, |
|
"eval_loss": 0.9855244755744934, |
|
"eval_runtime": 7.1036, |
|
"eval_samples_per_second": 18.019, |
|
"eval_steps_per_second": 2.252, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.15546918378678512, |
|
"grad_norm": 0.1594521756746442, |
|
"learning_rate": 1.981720334984174e-05, |
|
"loss": 0.9583, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.1628724782528225, |
|
"grad_norm": 0.17121067681141633, |
|
"learning_rate": 1.9764668444934853e-05, |
|
"loss": 0.9474, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.17027577271885988, |
|
"grad_norm": 0.15907478498991337, |
|
"learning_rate": 1.970559477641606e-05, |
|
"loss": 0.9207, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.1776790671848973, |
|
"grad_norm": 0.1606853101811812, |
|
"learning_rate": 1.9640021902080523e-05, |
|
"loss": 0.9532, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.18508236165093467, |
|
"grad_norm": 0.14842172195462983, |
|
"learning_rate": 1.9567993731818988e-05, |
|
"loss": 0.9512, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.19248565611697205, |
|
"grad_norm": 0.16039228542123013, |
|
"learning_rate": 1.9489558498214197e-05, |
|
"loss": 0.9443, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.19988895058300943, |
|
"grad_norm": 0.15683712656065646, |
|
"learning_rate": 1.9404768724242667e-05, |
|
"loss": 0.9257, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.20729224504904684, |
|
"grad_norm": 0.17249812910241194, |
|
"learning_rate": 1.931368118810346e-05, |
|
"loss": 0.9545, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.21469553951508422, |
|
"grad_norm": 0.15206299782477828, |
|
"learning_rate": 1.92163568851975e-05, |
|
"loss": 0.938, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.2220988339811216, |
|
"grad_norm": 0.16815584502250794, |
|
"learning_rate": 1.911286098728296e-05, |
|
"loss": 0.9285, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.22950212844715898, |
|
"grad_norm": 0.16122796157665575, |
|
"learning_rate": 1.900326279883392e-05, |
|
"loss": 0.9624, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.23690542291319638, |
|
"grad_norm": 0.15224168391003395, |
|
"learning_rate": 1.8887635710631716e-05, |
|
"loss": 0.9662, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.24430871737923376, |
|
"grad_norm": 0.14815411902225328, |
|
"learning_rate": 1.8766057150619865e-05, |
|
"loss": 0.9346, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.25171201184527114, |
|
"grad_norm": 0.15183932254655913, |
|
"learning_rate": 1.8638608532055635e-05, |
|
"loss": 0.9533, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.2591153063113085, |
|
"grad_norm": 0.1446345977964626, |
|
"learning_rate": 1.8505375198992856e-05, |
|
"loss": 0.9395, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.2665186007773459, |
|
"grad_norm": 0.14315212915090392, |
|
"learning_rate": 1.836644636913258e-05, |
|
"loss": 0.9099, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.2739218952433833, |
|
"grad_norm": 0.16987297421517, |
|
"learning_rate": 1.8221915074079764e-05, |
|
"loss": 0.9342, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.2813251897094207, |
|
"grad_norm": 0.17161893073243528, |
|
"learning_rate": 1.8071878097046064e-05, |
|
"loss": 0.9115, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.2887284841754581, |
|
"grad_norm": 0.1544233807044902, |
|
"learning_rate": 1.7916435908040413e-05, |
|
"loss": 0.9309, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.2961317786414955, |
|
"grad_norm": 0.14748037798669633, |
|
"learning_rate": 1.7755692596590778e-05, |
|
"loss": 0.9333, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.2961317786414955, |
|
"eval_loss": 0.9494131803512573, |
|
"eval_runtime": 7.1466, |
|
"eval_samples_per_second": 17.911, |
|
"eval_steps_per_second": 2.239, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.30353507310753286, |
|
"grad_norm": 0.1555651675700521, |
|
"learning_rate": 1.7589755802042188e-05, |
|
"loss": 0.9287, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.31093836757357024, |
|
"grad_norm": 0.16353203572144123, |
|
"learning_rate": 1.7418736641477636e-05, |
|
"loss": 0.9099, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.3183416620396076, |
|
"grad_norm": 0.15764904906951305, |
|
"learning_rate": 1.7242749635310222e-05, |
|
"loss": 0.913, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.325744956505645, |
|
"grad_norm": 0.1448977765793318, |
|
"learning_rate": 1.7061912630596252e-05, |
|
"loss": 0.9173, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.3331482509716824, |
|
"grad_norm": 0.15258137653598713, |
|
"learning_rate": 1.6876346722120747e-05, |
|
"loss": 0.9319, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.34055154543771976, |
|
"grad_norm": 0.1563094102159032, |
|
"learning_rate": 1.6686176171308125e-05, |
|
"loss": 0.955, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.3479548399037572, |
|
"grad_norm": 0.14423025166628695, |
|
"learning_rate": 1.6491528323012412e-05, |
|
"loss": 0.9132, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.3553581343697946, |
|
"grad_norm": 0.14263303346026474, |
|
"learning_rate": 1.6292533520242663e-05, |
|
"loss": 0.9157, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.36276142883583196, |
|
"grad_norm": 0.13731880620436607, |
|
"learning_rate": 1.6089325016880737e-05, |
|
"loss": 0.9058, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.37016472330186934, |
|
"grad_norm": 0.17337841623245412, |
|
"learning_rate": 1.588203888844982e-05, |
|
"loss": 0.9261, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.3775680177679067, |
|
"grad_norm": 0.1517393366410306, |
|
"learning_rate": 1.5670813940993504e-05, |
|
"loss": 0.8936, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.3849713122339441, |
|
"grad_norm": 0.14624943669912388, |
|
"learning_rate": 1.5455791618126407e-05, |
|
"loss": 0.8844, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.3923746066999815, |
|
"grad_norm": 0.14950642484342547, |
|
"learning_rate": 1.5237115906318565e-05, |
|
"loss": 0.9132, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.39977790116601886, |
|
"grad_norm": 0.15048654068633793, |
|
"learning_rate": 1.5014933238477069e-05, |
|
"loss": 0.8891, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.4071811956320563, |
|
"grad_norm": 0.1570568189193127, |
|
"learning_rate": 1.4789392395889468e-05, |
|
"loss": 0.9186, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.41458449009809367, |
|
"grad_norm": 0.15381562012177635, |
|
"learning_rate": 1.4560644408594602e-05, |
|
"loss": 0.8927, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.42198778456413105, |
|
"grad_norm": 0.1444417700490563, |
|
"learning_rate": 1.432884245424761e-05, |
|
"loss": 0.9081, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.42939107903016843, |
|
"grad_norm": 0.15807566475131207, |
|
"learning_rate": 1.4094141755546816e-05, |
|
"loss": 0.9389, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.4367943734962058, |
|
"grad_norm": 0.14968340220236578, |
|
"learning_rate": 1.3856699476291176e-05, |
|
"loss": 0.8926, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.4441976679622432, |
|
"grad_norm": 0.15781464037029244, |
|
"learning_rate": 1.3616674616137902e-05, |
|
"loss": 0.9161, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.4441976679622432, |
|
"eval_loss": 0.930830717086792, |
|
"eval_runtime": 7.1062, |
|
"eval_samples_per_second": 18.013, |
|
"eval_steps_per_second": 2.252, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.4516009624282806, |
|
"grad_norm": 0.15525393114371358, |
|
"learning_rate": 1.3374227904130724e-05, |
|
"loss": 0.9071, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.45900425689431795, |
|
"grad_norm": 0.1457498963275538, |
|
"learning_rate": 1.3129521691070108e-05, |
|
"loss": 0.8833, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.46640755136035533, |
|
"grad_norm": 0.14987682585253773, |
|
"learning_rate": 1.2882719840797473e-05, |
|
"loss": 0.9022, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.47381084582639277, |
|
"grad_norm": 0.15509542057237247, |
|
"learning_rate": 1.2633987620466229e-05, |
|
"loss": 0.8885, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.48121414029243015, |
|
"grad_norm": 0.14584510044567442, |
|
"learning_rate": 1.2383491589873122e-05, |
|
"loss": 0.8899, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.48861743475846753, |
|
"grad_norm": 0.14309664992352822, |
|
"learning_rate": 1.213139948992394e-05, |
|
"loss": 0.8998, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.4960207292245049, |
|
"grad_norm": 0.15130038484765435, |
|
"learning_rate": 1.187788013030837e-05, |
|
"loss": 0.8988, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.5034240236905423, |
|
"grad_norm": 0.1459259790309168, |
|
"learning_rate": 1.1623103276459086e-05, |
|
"loss": 0.9007, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.5108273181565797, |
|
"grad_norm": 0.14777529707412382, |
|
"learning_rate": 1.1367239535870913e-05, |
|
"loss": 0.8925, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.518230612622617, |
|
"grad_norm": 0.14724048128355402, |
|
"learning_rate": 1.1110460243856051e-05, |
|
"loss": 0.8785, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.5256339070886544, |
|
"grad_norm": 0.14527394357147336, |
|
"learning_rate": 1.085293734881197e-05, |
|
"loss": 0.8982, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.5330372015546918, |
|
"grad_norm": 0.14050877787386462, |
|
"learning_rate": 1.0594843297078736e-05, |
|
"loss": 0.9181, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.5404404960207292, |
|
"grad_norm": 0.15865485844086924, |
|
"learning_rate": 1.0336350917462925e-05, |
|
"loss": 0.9062, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.5478437904867666, |
|
"grad_norm": 0.13521171802296975, |
|
"learning_rate": 1.0077633305505402e-05, |
|
"loss": 0.8927, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.555247084952804, |
|
"grad_norm": 0.13603561195542918, |
|
"learning_rate": 9.818863707570476e-06, |
|
"loss": 0.9154, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.5626503794188414, |
|
"grad_norm": 0.13965992628983345, |
|
"learning_rate": 9.560215404834094e-06, |
|
"loss": 0.9016, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.5700536738848788, |
|
"grad_norm": 0.13666000951700663, |
|
"learning_rate": 9.30186159724869e-06, |
|
"loss": 0.8612, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.5774569683509162, |
|
"grad_norm": 0.13625895333241347, |
|
"learning_rate": 9.043975287562443e-06, |
|
"loss": 0.9001, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.5848602628169536, |
|
"grad_norm": 0.14371829685671048, |
|
"learning_rate": 8.786729165470584e-06, |
|
"loss": 0.8738, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.592263557282991, |
|
"grad_norm": 0.13916699255941037, |
|
"learning_rate": 8.530295491976338e-06, |
|
"loss": 0.8804, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.592263557282991, |
|
"eval_loss": 0.9166682958602905, |
|
"eval_runtime": 7.0621, |
|
"eval_samples_per_second": 18.125, |
|
"eval_steps_per_second": 2.266, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.5996668517490283, |
|
"grad_norm": 0.13004150614702822, |
|
"learning_rate": 8.274845984038916e-06, |
|
"loss": 0.8647, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.6070701462150657, |
|
"grad_norm": 0.14629481441132058, |
|
"learning_rate": 8.020551699585843e-06, |
|
"loss": 0.9007, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.6144734406811031, |
|
"grad_norm": 0.12877499400506406, |
|
"learning_rate": 7.76758292296659e-06, |
|
"loss": 0.8745, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.6218767351471405, |
|
"grad_norm": 0.13752238505053258, |
|
"learning_rate": 7.5161090509242005e-06, |
|
"loss": 0.8928, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.6292800296131779, |
|
"grad_norm": 0.14148995267833064, |
|
"learning_rate": 7.2662984791613186e-06, |
|
"loss": 0.8845, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.6366833240792152, |
|
"grad_norm": 0.14452849454657804, |
|
"learning_rate": 7.01831848957653e-06, |
|
"loss": 0.8991, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.6440866185452526, |
|
"grad_norm": 0.13297035448949285, |
|
"learning_rate": 6.772335138246548e-06, |
|
"loss": 0.8966, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.65148991301129, |
|
"grad_norm": 0.13999651657873824, |
|
"learning_rate": 6.528513144229256e-06, |
|
"loss": 0.8901, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.6588932074773274, |
|
"grad_norm": 0.13786802129264458, |
|
"learning_rate": 6.287015779262064e-06, |
|
"loss": 0.8988, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.6662965019433648, |
|
"grad_norm": 0.13362818972180024, |
|
"learning_rate": 6.048004758429451e-06, |
|
"loss": 0.8773, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.6736997964094021, |
|
"grad_norm": 0.14094460764095268, |
|
"learning_rate": 5.811640131872867e-06, |
|
"loss": 0.9022, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.6811030908754395, |
|
"grad_norm": 0.13177032254471754, |
|
"learning_rate": 5.578080177615575e-06, |
|
"loss": 0.8681, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.688506385341477, |
|
"grad_norm": 0.1447285259606947, |
|
"learning_rate": 5.347481295574141e-06, |
|
"loss": 0.8642, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.6959096798075144, |
|
"grad_norm": 0.16212399303569955, |
|
"learning_rate": 5.119997902827584e-06, |
|
"loss": 0.8768, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.7033129742735518, |
|
"grad_norm": 0.13407281956450473, |
|
"learning_rate": 4.8957823302142916e-06, |
|
"loss": 0.8861, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.7107162687395892, |
|
"grad_norm": 0.12431950401558224, |
|
"learning_rate": 4.674984720325961e-06, |
|
"loss": 0.8663, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.7181195632056265, |
|
"grad_norm": 0.13146748720425666, |
|
"learning_rate": 4.457752926966888e-06, |
|
"loss": 0.8618, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.7255228576716639, |
|
"grad_norm": 0.12584510121252945, |
|
"learning_rate": 4.244232416145839e-06, |
|
"loss": 0.8854, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.7329261521377013, |
|
"grad_norm": 0.12990134879461734, |
|
"learning_rate": 4.0345661686669745e-06, |
|
"loss": 0.8762, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.7403294466037387, |
|
"grad_norm": 0.13783047027951226, |
|
"learning_rate": 3.828894584384867e-06, |
|
"loss": 0.8859, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.7403294466037387, |
|
"eval_loss": 0.9078959226608276, |
|
"eval_runtime": 7.1187, |
|
"eval_samples_per_second": 17.981, |
|
"eval_steps_per_second": 2.248, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.747732741069776, |
|
"grad_norm": 0.12586000345704224, |
|
"learning_rate": 3.62735538818787e-06, |
|
"loss": 0.8688, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.7551360355358134, |
|
"grad_norm": 0.14104016395887148, |
|
"learning_rate": 3.4300835377726904e-06, |
|
"loss": 0.8703, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.7625393300018508, |
|
"grad_norm": 0.13533570426883107, |
|
"learning_rate": 3.2372111332720045e-06, |
|
"loss": 0.9138, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.7699426244678882, |
|
"grad_norm": 0.14074880883541166, |
|
"learning_rate": 3.048867328795588e-06, |
|
"loss": 0.8618, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.7773459189339256, |
|
"grad_norm": 0.13230663319443067, |
|
"learning_rate": 2.865178245944218e-06, |
|
"loss": 0.8604, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.784749213399963, |
|
"grad_norm": 0.1247899466464526, |
|
"learning_rate": 2.686266889354211e-06, |
|
"loss": 0.8859, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.7921525078660003, |
|
"grad_norm": 0.13575024760681823, |
|
"learning_rate": 2.5122530643292274e-06, |
|
"loss": 0.8905, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.7995558023320377, |
|
"grad_norm": 0.1357493128741245, |
|
"learning_rate": 2.3432532966144526e-06, |
|
"loss": 0.8852, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.8069590967980751, |
|
"grad_norm": 0.13498792821706115, |
|
"learning_rate": 2.1793807543668857e-06, |
|
"loss": 0.8828, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.8143623912641126, |
|
"grad_norm": 0.12019174745605471, |
|
"learning_rate": 2.0207451723739633e-06, |
|
"loss": 0.8742, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.82176568573015, |
|
"grad_norm": 0.12150186530956911, |
|
"learning_rate": 1.8674527785713247e-06, |
|
"loss": 0.8802, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.8291689801961873, |
|
"grad_norm": 0.13657600204630863, |
|
"learning_rate": 1.7196062229088606e-06, |
|
"loss": 0.8458, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.8365722746622247, |
|
"grad_norm": 0.14316460964604913, |
|
"learning_rate": 1.577304508612717e-06, |
|
"loss": 0.8772, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.8439755691282621, |
|
"grad_norm": 0.11702464461690702, |
|
"learning_rate": 1.4406429258892762e-06, |
|
"loss": 0.8978, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.8513788635942995, |
|
"grad_norm": 0.13649799424857104, |
|
"learning_rate": 1.3097129881154936e-06, |
|
"loss": 0.8679, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.8587821580603369, |
|
"grad_norm": 0.12879197044894133, |
|
"learning_rate": 1.1846023705583442e-06, |
|
"loss": 0.8611, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.8661854525263742, |
|
"grad_norm": 0.12611530487166148, |
|
"learning_rate": 1.065394851664394e-06, |
|
"loss": 0.8847, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.8735887469924116, |
|
"grad_norm": 0.12199166350158228, |
|
"learning_rate": 9.521702569588199e-07, |
|
"loss": 0.9041, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.880992041458449, |
|
"grad_norm": 0.13250148039242704, |
|
"learning_rate": 8.450044055914497e-07, |
|
"loss": 0.8703, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.8883953359244864, |
|
"grad_norm": 0.12335176677515089, |
|
"learning_rate": 7.439690595656013e-07, |
|
"loss": 0.8931, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.8883953359244864, |
|
"eval_loss": 0.904110312461853, |
|
"eval_runtime": 7.0453, |
|
"eval_samples_per_second": 18.168, |
|
"eval_steps_per_second": 2.271, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.8957986303905238, |
|
"grad_norm": 0.13803312908970108, |
|
"learning_rate": 6.491318756837417e-07, |
|
"loss": 0.8773, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.9032019248565611, |
|
"grad_norm": 0.11990945958525849, |
|
"learning_rate": 5.605563602421149e-07, |
|
"loss": 0.8506, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.9106052193225985, |
|
"grad_norm": 0.12252169940099521, |
|
"learning_rate": 4.783018265047179e-07, |
|
"loss": 0.9066, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.9180085137886359, |
|
"grad_norm": 0.12997936768176632, |
|
"learning_rate": 4.024233549850509e-07, |
|
"loss": 0.8924, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.9254118082546733, |
|
"grad_norm": 0.12018543528554308, |
|
"learning_rate": 3.329717565622825e-07, |
|
"loss": 0.8889, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.9328151027207107, |
|
"grad_norm": 0.12623553627779757, |
|
"learning_rate": 2.6999353845651113e-07, |
|
"loss": 0.8718, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.9402183971867482, |
|
"grad_norm": 0.12569602093691798, |
|
"learning_rate": 2.1353087308590314e-07, |
|
"loss": 0.8885, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.9476216916527855, |
|
"grad_norm": 0.12693697747520694, |
|
"learning_rate": 1.6362156982656085e-07, |
|
"loss": 0.8778, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.9550249861188229, |
|
"grad_norm": 0.11916388416111799, |
|
"learning_rate": 1.2029904969404482e-07, |
|
"loss": 0.8574, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.9624282805848603, |
|
"grad_norm": 0.13423693830897884, |
|
"learning_rate": 8.359232296349163e-08, |
|
"loss": 0.8633, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.9698315750508977, |
|
"grad_norm": 0.12941855976651295, |
|
"learning_rate": 5.3525969743324356e-08, |
|
"loss": 0.8779, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.9772348695169351, |
|
"grad_norm": 0.11997265846004904, |
|
"learning_rate": 3.012012351554017e-08, |
|
"loss": 0.8493, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.9846381639829724, |
|
"grad_norm": 0.13237634154671962, |
|
"learning_rate": 1.3390457653639221e-08, |
|
"loss": 0.8863, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.9920414584490098, |
|
"grad_norm": 0.13967439000954648, |
|
"learning_rate": 3.3481749271768726e-09, |
|
"loss": 0.8954, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.9994447529150472, |
|
"grad_norm": 0.12596999235093093, |
|
"learning_rate": 0.0, |
|
"loss": 0.861, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.9994447529150472, |
|
"step": 675, |
|
"total_flos": 1.6624354892709888e+17, |
|
"train_loss": 0.9221899901496039, |
|
"train_runtime": 5284.4545, |
|
"train_samples_per_second": 4.089, |
|
"train_steps_per_second": 0.128 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 675, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.6624354892709888e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|