0x1202's picture
Training in progress, step 200, checkpoint
40c7819 verified
raw
history blame
37.4 kB
{
"best_metric": 0.008054222911596298,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 1.46822665749025,
"eval_steps": 25,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00734113328745125,
"grad_norm": 1.0009183883666992,
"learning_rate": 2.9999999999999997e-05,
"loss": 2.7152,
"step": 1
},
{
"epoch": 0.00734113328745125,
"eval_loss": 2.739882469177246,
"eval_runtime": 11.5725,
"eval_samples_per_second": 4.321,
"eval_steps_per_second": 4.321,
"step": 1
},
{
"epoch": 0.0146822665749025,
"grad_norm": 1.0102258920669556,
"learning_rate": 5.9999999999999995e-05,
"loss": 2.6972,
"step": 2
},
{
"epoch": 0.02202339986235375,
"grad_norm": 0.9687394499778748,
"learning_rate": 8.999999999999999e-05,
"loss": 2.7558,
"step": 3
},
{
"epoch": 0.029364533149805,
"grad_norm": 0.7906656265258789,
"learning_rate": 0.00011999999999999999,
"loss": 2.5397,
"step": 4
},
{
"epoch": 0.03670566643725625,
"grad_norm": 0.4140726625919342,
"learning_rate": 0.00015,
"loss": 2.5202,
"step": 5
},
{
"epoch": 0.0440467997247075,
"grad_norm": 0.68717360496521,
"learning_rate": 0.00017999999999999998,
"loss": 2.5802,
"step": 6
},
{
"epoch": 0.05138793301215875,
"grad_norm": 0.9959529042243958,
"learning_rate": 0.00020999999999999998,
"loss": 2.563,
"step": 7
},
{
"epoch": 0.05872906629961,
"grad_norm": 0.5985053777694702,
"learning_rate": 0.00023999999999999998,
"loss": 2.5683,
"step": 8
},
{
"epoch": 0.06607019958706126,
"grad_norm": 0.2967154085636139,
"learning_rate": 0.00027,
"loss": 2.539,
"step": 9
},
{
"epoch": 0.0734113328745125,
"grad_norm": 0.28381672501564026,
"learning_rate": 0.0003,
"loss": 2.5398,
"step": 10
},
{
"epoch": 0.08075246616196376,
"grad_norm": 0.38264232873916626,
"learning_rate": 0.0002999794957488703,
"loss": 2.5249,
"step": 11
},
{
"epoch": 0.088093599449415,
"grad_norm": 0.33728712797164917,
"learning_rate": 0.0002999179886011389,
"loss": 2.4142,
"step": 12
},
{
"epoch": 0.09543473273686626,
"grad_norm": 0.26186007261276245,
"learning_rate": 0.0002998154953722457,
"loss": 2.3963,
"step": 13
},
{
"epoch": 0.1027758660243175,
"grad_norm": 0.26044395565986633,
"learning_rate": 0.00029967204408281613,
"loss": 2.4171,
"step": 14
},
{
"epoch": 0.11011699931176876,
"grad_norm": 0.33337321877479553,
"learning_rate": 0.00029948767395100045,
"loss": 2.3295,
"step": 15
},
{
"epoch": 0.11745813259922,
"grad_norm": 0.2713659703731537,
"learning_rate": 0.0002992624353817517,
"loss": 2.3165,
"step": 16
},
{
"epoch": 0.12479926588667126,
"grad_norm": 0.24857458472251892,
"learning_rate": 0.0002989963899530457,
"loss": 2.3045,
"step": 17
},
{
"epoch": 0.1321403991741225,
"grad_norm": 0.2671072781085968,
"learning_rate": 0.00029868961039904624,
"loss": 2.3119,
"step": 18
},
{
"epoch": 0.13948153246157374,
"grad_norm": 0.2695709466934204,
"learning_rate": 0.00029834218059022024,
"loss": 2.319,
"step": 19
},
{
"epoch": 0.146822665749025,
"grad_norm": 0.2995961010456085,
"learning_rate": 0.00029795419551040833,
"loss": 2.252,
"step": 20
},
{
"epoch": 0.15416379903647626,
"grad_norm": 0.2794254422187805,
"learning_rate": 0.00029752576123085736,
"loss": 2.2558,
"step": 21
},
{
"epoch": 0.1615049323239275,
"grad_norm": 0.31873247027397156,
"learning_rate": 0.0002970569948812214,
"loss": 2.2161,
"step": 22
},
{
"epoch": 0.16884606561137874,
"grad_norm": 0.3151024878025055,
"learning_rate": 0.0002965480246175399,
"loss": 2.2114,
"step": 23
},
{
"epoch": 0.17618719889883,
"grad_norm": 0.3369925022125244,
"learning_rate": 0.0002959989895872009,
"loss": 2.1251,
"step": 24
},
{
"epoch": 0.18352833218628126,
"grad_norm": 0.3629818260669708,
"learning_rate": 0.0002954100398908995,
"loss": 2.2003,
"step": 25
},
{
"epoch": 0.18352833218628126,
"eval_loss": 2.0463967323303223,
"eval_runtime": 11.771,
"eval_samples_per_second": 4.248,
"eval_steps_per_second": 4.248,
"step": 25
},
{
"epoch": 0.1908694654737325,
"grad_norm": 0.4075523316860199,
"learning_rate": 0.0002947813365416023,
"loss": 2.0395,
"step": 26
},
{
"epoch": 0.19821059876118377,
"grad_norm": 0.4278491139411926,
"learning_rate": 0.0002941130514205272,
"loss": 2.0627,
"step": 27
},
{
"epoch": 0.205551732048635,
"grad_norm": 0.5144158005714417,
"learning_rate": 0.0002934053672301536,
"loss": 2.1029,
"step": 28
},
{
"epoch": 0.21289286533608626,
"grad_norm": 0.5315491557121277,
"learning_rate": 0.00029265847744427303,
"loss": 2.0312,
"step": 29
},
{
"epoch": 0.2202339986235375,
"grad_norm": 0.5522644519805908,
"learning_rate": 0.00029187258625509513,
"loss": 2.0579,
"step": 30
},
{
"epoch": 0.22757513191098877,
"grad_norm": 0.5328534841537476,
"learning_rate": 0.00029104790851742417,
"loss": 1.9475,
"step": 31
},
{
"epoch": 0.23491626519844,
"grad_norm": 0.6033841371536255,
"learning_rate": 0.0002901846696899191,
"loss": 1.8248,
"step": 32
},
{
"epoch": 0.24225739848589126,
"grad_norm": 0.7932512760162354,
"learning_rate": 0.00028928310577345606,
"loss": 1.9608,
"step": 33
},
{
"epoch": 0.2495985317733425,
"grad_norm": 0.6182257533073425,
"learning_rate": 0.0002883434632466077,
"loss": 1.8797,
"step": 34
},
{
"epoch": 0.25693966506079374,
"grad_norm": 0.6742249131202698,
"learning_rate": 0.00028736599899825856,
"loss": 1.8248,
"step": 35
},
{
"epoch": 0.264280798348245,
"grad_norm": 0.7396733164787292,
"learning_rate": 0.00028635098025737434,
"loss": 1.8357,
"step": 36
},
{
"epoch": 0.27162193163569626,
"grad_norm": 0.701486349105835,
"learning_rate": 0.00028529868451994384,
"loss": 1.7344,
"step": 37
},
{
"epoch": 0.2789630649231475,
"grad_norm": 0.6000120639801025,
"learning_rate": 0.0002842093994731145,
"loss": 1.7778,
"step": 38
},
{
"epoch": 0.28630419821059877,
"grad_norm": 0.7575259804725647,
"learning_rate": 0.00028308342291654174,
"loss": 1.6843,
"step": 39
},
{
"epoch": 0.29364533149805,
"grad_norm": 0.8110001683235168,
"learning_rate": 0.00028192106268097334,
"loss": 1.6607,
"step": 40
},
{
"epoch": 0.3009864647855013,
"grad_norm": 0.7647180557250977,
"learning_rate": 0.00028072263654409154,
"loss": 1.7497,
"step": 41
},
{
"epoch": 0.3083275980729525,
"grad_norm": 0.785166323184967,
"learning_rate": 0.0002794884721436361,
"loss": 1.515,
"step": 42
},
{
"epoch": 0.31566873136040374,
"grad_norm": 1.9912952184677124,
"learning_rate": 0.00027821890688783083,
"loss": 1.553,
"step": 43
},
{
"epoch": 0.323009864647855,
"grad_norm": 0.9436890482902527,
"learning_rate": 0.0002769142878631403,
"loss": 1.644,
"step": 44
},
{
"epoch": 0.33035099793530626,
"grad_norm": 1.1021350622177124,
"learning_rate": 0.00027557497173937923,
"loss": 1.3351,
"step": 45
},
{
"epoch": 0.3376921312227575,
"grad_norm": 1.1297169923782349,
"learning_rate": 0.000274201324672203,
"loss": 1.4851,
"step": 46
},
{
"epoch": 0.34503326451020877,
"grad_norm": 1.3415935039520264,
"learning_rate": 0.00027279372220300385,
"loss": 1.4308,
"step": 47
},
{
"epoch": 0.35237439779766,
"grad_norm": 1.052236557006836,
"learning_rate": 0.0002713525491562421,
"loss": 1.3211,
"step": 48
},
{
"epoch": 0.3597155310851113,
"grad_norm": 1.047372579574585,
"learning_rate": 0.00026987819953423867,
"loss": 1.1593,
"step": 49
},
{
"epoch": 0.3670566643725625,
"grad_norm": 1.29693603515625,
"learning_rate": 0.00026837107640945905,
"loss": 1.2361,
"step": 50
},
{
"epoch": 0.3670566643725625,
"eval_loss": 1.266974925994873,
"eval_runtime": 11.7783,
"eval_samples_per_second": 4.245,
"eval_steps_per_second": 4.245,
"step": 50
},
{
"epoch": 0.37439779766001374,
"grad_norm": 1.1293262243270874,
"learning_rate": 0.0002668315918143169,
"loss": 1.1617,
"step": 51
},
{
"epoch": 0.381738930947465,
"grad_norm": 1.1484521627426147,
"learning_rate": 0.00026526016662852886,
"loss": 1.2379,
"step": 52
},
{
"epoch": 0.38908006423491626,
"grad_norm": 1.064557671546936,
"learning_rate": 0.00026365723046405023,
"loss": 1.1066,
"step": 53
},
{
"epoch": 0.39642119752236754,
"grad_norm": 0.9905532002449036,
"learning_rate": 0.0002620232215476231,
"loss": 1.1669,
"step": 54
},
{
"epoch": 0.40376233080981877,
"grad_norm": 1.251165509223938,
"learning_rate": 0.0002603585866009697,
"loss": 1.0054,
"step": 55
},
{
"epoch": 0.41110346409727,
"grad_norm": 1.0376931428909302,
"learning_rate": 0.00025866378071866334,
"loss": 1.0141,
"step": 56
},
{
"epoch": 0.4184445973847213,
"grad_norm": 1.1509113311767578,
"learning_rate": 0.00025693926724370956,
"loss": 0.8967,
"step": 57
},
{
"epoch": 0.4257857306721725,
"grad_norm": 1.2194794416427612,
"learning_rate": 0.00025518551764087326,
"loss": 1.0544,
"step": 58
},
{
"epoch": 0.43312686395962374,
"grad_norm": 0.999973475933075,
"learning_rate": 0.00025340301136778483,
"loss": 0.8905,
"step": 59
},
{
"epoch": 0.440467997247075,
"grad_norm": 1.0643336772918701,
"learning_rate": 0.00025159223574386114,
"loss": 1.0378,
"step": 60
},
{
"epoch": 0.44780913053452626,
"grad_norm": 1.0134408473968506,
"learning_rate": 0.0002497536858170772,
"loss": 0.8218,
"step": 61
},
{
"epoch": 0.45515026382197754,
"grad_norm": 1.4050616025924683,
"learning_rate": 0.00024788786422862526,
"loss": 0.8544,
"step": 62
},
{
"epoch": 0.46249139710942877,
"grad_norm": 1.4386414289474487,
"learning_rate": 0.00024599528107549745,
"loss": 0.8324,
"step": 63
},
{
"epoch": 0.46983253039688,
"grad_norm": 1.1594165563583374,
"learning_rate": 0.00024407645377103054,
"loss": 0.7453,
"step": 64
},
{
"epoch": 0.4771736636843313,
"grad_norm": 1.2135729789733887,
"learning_rate": 0.00024213190690345018,
"loss": 0.8184,
"step": 65
},
{
"epoch": 0.4845147969717825,
"grad_norm": 0.9763234257698059,
"learning_rate": 0.00024016217209245374,
"loss": 0.596,
"step": 66
},
{
"epoch": 0.49185593025923374,
"grad_norm": 1.1825246810913086,
"learning_rate": 0.00023816778784387094,
"loss": 0.6871,
"step": 67
},
{
"epoch": 0.499197063546685,
"grad_norm": 1.0060195922851562,
"learning_rate": 0.0002361492994024415,
"loss": 0.658,
"step": 68
},
{
"epoch": 0.5065381968341363,
"grad_norm": 1.87339186668396,
"learning_rate": 0.0002341072586027509,
"loss": 0.7342,
"step": 69
},
{
"epoch": 0.5138793301215875,
"grad_norm": 1.2143769264221191,
"learning_rate": 0.00023204222371836405,
"loss": 0.6153,
"step": 70
},
{
"epoch": 0.5212204634090388,
"grad_norm": 1.0467982292175293,
"learning_rate": 0.00022995475930919905,
"loss": 0.5277,
"step": 71
},
{
"epoch": 0.52856159669649,
"grad_norm": 1.0355846881866455,
"learning_rate": 0.00022784543606718227,
"loss": 0.5707,
"step": 72
},
{
"epoch": 0.5359027299839413,
"grad_norm": 0.8927159905433655,
"learning_rate": 0.00022571483066022657,
"loss": 0.3879,
"step": 73
},
{
"epoch": 0.5432438632713925,
"grad_norm": 1.427565097808838,
"learning_rate": 0.0002235635255745762,
"loss": 0.6126,
"step": 74
},
{
"epoch": 0.5505849965588437,
"grad_norm": 1.1537415981292725,
"learning_rate": 0.00022139210895556104,
"loss": 0.5787,
"step": 75
},
{
"epoch": 0.5505849965588437,
"eval_loss": 0.3803807497024536,
"eval_runtime": 11.7561,
"eval_samples_per_second": 4.253,
"eval_steps_per_second": 4.253,
"step": 75
},
{
"epoch": 0.557926129846295,
"grad_norm": 0.9431911706924438,
"learning_rate": 0.00021920117444680317,
"loss": 0.3773,
"step": 76
},
{
"epoch": 0.5652672631337463,
"grad_norm": 0.9953020215034485,
"learning_rate": 0.00021699132102792097,
"loss": 0.3676,
"step": 77
},
{
"epoch": 0.5726083964211975,
"grad_norm": 0.98787921667099,
"learning_rate": 0.0002147631528507739,
"loss": 0.3079,
"step": 78
},
{
"epoch": 0.5799495297086488,
"grad_norm": 1.191606879234314,
"learning_rate": 0.00021251727907429355,
"loss": 0.3129,
"step": 79
},
{
"epoch": 0.5872906629961,
"grad_norm": 1.0935895442962646,
"learning_rate": 0.0002102543136979454,
"loss": 0.3122,
"step": 80
},
{
"epoch": 0.5946317962835512,
"grad_norm": 1.4533487558364868,
"learning_rate": 0.0002079748753938678,
"loss": 0.5123,
"step": 81
},
{
"epoch": 0.6019729295710026,
"grad_norm": 1.176979422569275,
"learning_rate": 0.0002056795873377331,
"loss": 0.3833,
"step": 82
},
{
"epoch": 0.6093140628584538,
"grad_norm": 1.027469277381897,
"learning_rate": 0.00020336907703837748,
"loss": 0.2685,
"step": 83
},
{
"epoch": 0.616655196145905,
"grad_norm": 0.9938647747039795,
"learning_rate": 0.00020104397616624645,
"loss": 0.2836,
"step": 84
},
{
"epoch": 0.6239963294333563,
"grad_norm": 0.8680717349052429,
"learning_rate": 0.00019870492038070252,
"loss": 0.2036,
"step": 85
},
{
"epoch": 0.6313374627208075,
"grad_norm": 1.1820261478424072,
"learning_rate": 0.0001963525491562421,
"loss": 0.3018,
"step": 86
},
{
"epoch": 0.6386785960082588,
"grad_norm": 0.9207668900489807,
"learning_rate": 0.0001939875056076697,
"loss": 0.1357,
"step": 87
},
{
"epoch": 0.64601972929571,
"grad_norm": 0.6974514126777649,
"learning_rate": 0.00019161043631427666,
"loss": 0.1046,
"step": 88
},
{
"epoch": 0.6533608625831613,
"grad_norm": 1.2946507930755615,
"learning_rate": 0.00018922199114307294,
"loss": 0.2132,
"step": 89
},
{
"epoch": 0.6607019958706125,
"grad_norm": 0.9494311213493347,
"learning_rate": 0.00018682282307111987,
"loss": 0.1881,
"step": 90
},
{
"epoch": 0.6680431291580637,
"grad_norm": 0.921244204044342,
"learning_rate": 0.00018441358800701273,
"loss": 0.26,
"step": 91
},
{
"epoch": 0.675384262445515,
"grad_norm": 0.8400191068649292,
"learning_rate": 0.00018199494461156203,
"loss": 0.153,
"step": 92
},
{
"epoch": 0.6827253957329663,
"grad_norm": 0.7949177622795105,
"learning_rate": 0.000179567554117722,
"loss": 0.1471,
"step": 93
},
{
"epoch": 0.6900665290204175,
"grad_norm": 0.9048661589622498,
"learning_rate": 0.00017713208014981648,
"loss": 0.126,
"step": 94
},
{
"epoch": 0.6974076623078688,
"grad_norm": 0.8785199522972107,
"learning_rate": 0.00017468918854211007,
"loss": 0.1536,
"step": 95
},
{
"epoch": 0.70474879559532,
"grad_norm": 0.6925604939460754,
"learning_rate": 0.00017223954715677627,
"loss": 0.1041,
"step": 96
},
{
"epoch": 0.7120899288827712,
"grad_norm": 0.7400240898132324,
"learning_rate": 0.00016978382570131034,
"loss": 0.0924,
"step": 97
},
{
"epoch": 0.7194310621702226,
"grad_norm": 0.6650524139404297,
"learning_rate": 0.00016732269554543794,
"loss": 0.0788,
"step": 98
},
{
"epoch": 0.7267721954576738,
"grad_norm": 0.5950520038604736,
"learning_rate": 0.00016485682953756942,
"loss": 0.066,
"step": 99
},
{
"epoch": 0.734113328745125,
"grad_norm": 0.6176964640617371,
"learning_rate": 0.00016238690182084986,
"loss": 0.0628,
"step": 100
},
{
"epoch": 0.734113328745125,
"eval_loss": 0.1003895029425621,
"eval_runtime": 11.7465,
"eval_samples_per_second": 4.257,
"eval_steps_per_second": 4.257,
"step": 100
},
{
"epoch": 0.7414544620325763,
"grad_norm": 0.7126716375350952,
"learning_rate": 0.0001599135876488549,
"loss": 0.1003,
"step": 101
},
{
"epoch": 0.7487955953200275,
"grad_norm": 0.666622519493103,
"learning_rate": 0.00015743756320098332,
"loss": 0.0848,
"step": 102
},
{
"epoch": 0.7561367286074788,
"grad_norm": 1.0065160989761353,
"learning_rate": 0.0001549595053975962,
"loss": 0.1229,
"step": 103
},
{
"epoch": 0.76347786189493,
"grad_norm": 0.8639866709709167,
"learning_rate": 0.00015248009171495378,
"loss": 0.0971,
"step": 104
},
{
"epoch": 0.7708189951823813,
"grad_norm": 0.5344640016555786,
"learning_rate": 0.00015,
"loss": 0.0597,
"step": 105
},
{
"epoch": 0.7781601284698325,
"grad_norm": 0.7466907501220703,
"learning_rate": 0.00014751990828504622,
"loss": 0.0987,
"step": 106
},
{
"epoch": 0.7855012617572837,
"grad_norm": 0.5494584441184998,
"learning_rate": 0.00014504049460240375,
"loss": 0.0618,
"step": 107
},
{
"epoch": 0.7928423950447351,
"grad_norm": 0.6100556254386902,
"learning_rate": 0.00014256243679901663,
"loss": 0.0793,
"step": 108
},
{
"epoch": 0.8001835283321863,
"grad_norm": 0.6060194969177246,
"learning_rate": 0.00014008641235114508,
"loss": 0.0789,
"step": 109
},
{
"epoch": 0.8075246616196375,
"grad_norm": 0.6377370357513428,
"learning_rate": 0.00013761309817915014,
"loss": 0.077,
"step": 110
},
{
"epoch": 0.8148657949070888,
"grad_norm": 0.4830247461795807,
"learning_rate": 0.00013514317046243058,
"loss": 0.043,
"step": 111
},
{
"epoch": 0.82220692819454,
"grad_norm": 0.44580113887786865,
"learning_rate": 0.00013267730445456208,
"loss": 0.0416,
"step": 112
},
{
"epoch": 0.8295480614819912,
"grad_norm": 0.37075257301330566,
"learning_rate": 0.00013021617429868963,
"loss": 0.0312,
"step": 113
},
{
"epoch": 0.8368891947694426,
"grad_norm": 0.3857726752758026,
"learning_rate": 0.00012776045284322368,
"loss": 0.0332,
"step": 114
},
{
"epoch": 0.8442303280568938,
"grad_norm": 0.562850296497345,
"learning_rate": 0.00012531081145788987,
"loss": 0.0345,
"step": 115
},
{
"epoch": 0.851571461344345,
"grad_norm": 0.41303595900535583,
"learning_rate": 0.00012286791985018355,
"loss": 0.0312,
"step": 116
},
{
"epoch": 0.8589125946317963,
"grad_norm": 0.4234278202056885,
"learning_rate": 0.00012043244588227796,
"loss": 0.036,
"step": 117
},
{
"epoch": 0.8662537279192475,
"grad_norm": 0.46443456411361694,
"learning_rate": 0.00011800505538843798,
"loss": 0.0341,
"step": 118
},
{
"epoch": 0.8735948612066988,
"grad_norm": 0.41802355647087097,
"learning_rate": 0.00011558641199298727,
"loss": 0.0249,
"step": 119
},
{
"epoch": 0.88093599449415,
"grad_norm": 0.41087275743484497,
"learning_rate": 0.00011317717692888012,
"loss": 0.0234,
"step": 120
},
{
"epoch": 0.8882771277816013,
"grad_norm": 0.3621070683002472,
"learning_rate": 0.00011077800885692702,
"loss": 0.0249,
"step": 121
},
{
"epoch": 0.8956182610690525,
"grad_norm": 0.31254327297210693,
"learning_rate": 0.00010838956368572334,
"loss": 0.0223,
"step": 122
},
{
"epoch": 0.9029593943565037,
"grad_norm": 0.4205554127693176,
"learning_rate": 0.0001060124943923303,
"loss": 0.0219,
"step": 123
},
{
"epoch": 0.9103005276439551,
"grad_norm": 0.2622913122177124,
"learning_rate": 0.0001036474508437579,
"loss": 0.016,
"step": 124
},
{
"epoch": 0.9176416609314063,
"grad_norm": 0.32347017526626587,
"learning_rate": 0.00010129507961929748,
"loss": 0.022,
"step": 125
},
{
"epoch": 0.9176416609314063,
"eval_loss": 0.015302430838346481,
"eval_runtime": 11.7443,
"eval_samples_per_second": 4.257,
"eval_steps_per_second": 4.257,
"step": 125
},
{
"epoch": 0.9249827942188575,
"grad_norm": 0.3583570718765259,
"learning_rate": 9.895602383375353e-05,
"loss": 0.0211,
"step": 126
},
{
"epoch": 0.9323239275063088,
"grad_norm": 0.45600995421409607,
"learning_rate": 9.663092296162251e-05,
"loss": 0.0227,
"step": 127
},
{
"epoch": 0.93966506079376,
"grad_norm": 0.30763763189315796,
"learning_rate": 9.432041266226686e-05,
"loss": 0.0221,
"step": 128
},
{
"epoch": 0.9470061940812113,
"grad_norm": 0.271857887506485,
"learning_rate": 9.202512460613219e-05,
"loss": 0.0173,
"step": 129
},
{
"epoch": 0.9543473273686626,
"grad_norm": 0.33396950364112854,
"learning_rate": 8.97456863020546e-05,
"loss": 0.0198,
"step": 130
},
{
"epoch": 0.9616884606561138,
"grad_norm": 0.2286490797996521,
"learning_rate": 8.748272092570646e-05,
"loss": 0.018,
"step": 131
},
{
"epoch": 0.969029593943565,
"grad_norm": 0.28220534324645996,
"learning_rate": 8.523684714922608e-05,
"loss": 0.0173,
"step": 132
},
{
"epoch": 0.9763707272310163,
"grad_norm": 0.2242220640182495,
"learning_rate": 8.300867897207903e-05,
"loss": 0.0147,
"step": 133
},
{
"epoch": 0.9837118605184675,
"grad_norm": 0.17104695737361908,
"learning_rate": 8.079882555319684e-05,
"loss": 0.0137,
"step": 134
},
{
"epoch": 0.9910529938059188,
"grad_norm": 0.1608731746673584,
"learning_rate": 7.860789104443896e-05,
"loss": 0.014,
"step": 135
},
{
"epoch": 0.99839412709337,
"grad_norm": 0.18688522279262543,
"learning_rate": 7.643647442542382e-05,
"loss": 0.0169,
"step": 136
},
{
"epoch": 1.0057352603808214,
"grad_norm": 0.3710830807685852,
"learning_rate": 7.428516933977347e-05,
"loss": 0.0245,
"step": 137
},
{
"epoch": 1.0130763936682725,
"grad_norm": 0.18911249935626984,
"learning_rate": 7.215456393281776e-05,
"loss": 0.0126,
"step": 138
},
{
"epoch": 1.0204175269557239,
"grad_norm": 0.17865748703479767,
"learning_rate": 7.004524069080096e-05,
"loss": 0.0114,
"step": 139
},
{
"epoch": 1.027758660243175,
"grad_norm": 0.21352513134479523,
"learning_rate": 6.795777628163599e-05,
"loss": 0.013,
"step": 140
},
{
"epoch": 1.0350997935306263,
"grad_norm": 0.22643503546714783,
"learning_rate": 6.58927413972491e-05,
"loss": 0.0115,
"step": 141
},
{
"epoch": 1.0424409268180774,
"grad_norm": 0.13287460803985596,
"learning_rate": 6.385070059755846e-05,
"loss": 0.011,
"step": 142
},
{
"epoch": 1.0497820601055288,
"grad_norm": 0.16233468055725098,
"learning_rate": 6.183221215612904e-05,
"loss": 0.011,
"step": 143
},
{
"epoch": 1.05712319339298,
"grad_norm": 0.13603505492210388,
"learning_rate": 5.983782790754623e-05,
"loss": 0.0103,
"step": 144
},
{
"epoch": 1.0644643266804312,
"grad_norm": 0.16562862694263458,
"learning_rate": 5.786809309654982e-05,
"loss": 0.0124,
"step": 145
},
{
"epoch": 1.0718054599678826,
"grad_norm": 0.10689730197191238,
"learning_rate": 5.592354622896944e-05,
"loss": 0.0103,
"step": 146
},
{
"epoch": 1.079146593255334,
"grad_norm": 0.1163170337677002,
"learning_rate": 5.40047189245025e-05,
"loss": 0.0087,
"step": 147
},
{
"epoch": 1.086487726542785,
"grad_norm": 0.0866883397102356,
"learning_rate": 5.211213577137469e-05,
"loss": 0.0089,
"step": 148
},
{
"epoch": 1.0938288598302364,
"grad_norm": 0.210052490234375,
"learning_rate": 5.024631418292274e-05,
"loss": 0.0099,
"step": 149
},
{
"epoch": 1.1011699931176875,
"grad_norm": 0.09379356354475021,
"learning_rate": 4.840776425613886e-05,
"loss": 0.0085,
"step": 150
},
{
"epoch": 1.1011699931176875,
"eval_loss": 0.009465327486395836,
"eval_runtime": 11.7556,
"eval_samples_per_second": 4.253,
"eval_steps_per_second": 4.253,
"step": 150
},
{
"epoch": 1.1085111264051388,
"grad_norm": 0.1046932116150856,
"learning_rate": 4.659698863221513e-05,
"loss": 0.0092,
"step": 151
},
{
"epoch": 1.11585225969259,
"grad_norm": 0.1279962807893753,
"learning_rate": 4.481448235912671e-05,
"loss": 0.0088,
"step": 152
},
{
"epoch": 1.1231933929800413,
"grad_norm": 0.09951797872781754,
"learning_rate": 4.306073275629044e-05,
"loss": 0.009,
"step": 153
},
{
"epoch": 1.1305345262674926,
"grad_norm": 0.08270489424467087,
"learning_rate": 4.133621928133665e-05,
"loss": 0.009,
"step": 154
},
{
"epoch": 1.1378756595549437,
"grad_norm": 0.12509401142597198,
"learning_rate": 3.964141339903026e-05,
"loss": 0.009,
"step": 155
},
{
"epoch": 1.145216792842395,
"grad_norm": 0.09262503683567047,
"learning_rate": 3.797677845237696e-05,
"loss": 0.0093,
"step": 156
},
{
"epoch": 1.1525579261298462,
"grad_norm": 0.14835160970687866,
"learning_rate": 3.634276953594982e-05,
"loss": 0.0093,
"step": 157
},
{
"epoch": 1.1598990594172975,
"grad_norm": 0.1489514261484146,
"learning_rate": 3.473983337147118e-05,
"loss": 0.0087,
"step": 158
},
{
"epoch": 1.1672401927047489,
"grad_norm": 0.08459539711475372,
"learning_rate": 3.316840818568315e-05,
"loss": 0.0086,
"step": 159
},
{
"epoch": 1.1745813259922,
"grad_norm": 0.07776027172803879,
"learning_rate": 3.162892359054098e-05,
"loss": 0.0085,
"step": 160
},
{
"epoch": 1.1819224592796513,
"grad_norm": 0.10657629370689392,
"learning_rate": 3.0121800465761293e-05,
"loss": 0.0102,
"step": 161
},
{
"epoch": 1.1892635925671025,
"grad_norm": 0.11768706142902374,
"learning_rate": 2.8647450843757897e-05,
"loss": 0.0084,
"step": 162
},
{
"epoch": 1.1966047258545538,
"grad_norm": 0.06789965182542801,
"learning_rate": 2.7206277796996144e-05,
"loss": 0.0088,
"step": 163
},
{
"epoch": 1.2039458591420051,
"grad_norm": 0.0690593272447586,
"learning_rate": 2.5798675327796993e-05,
"loss": 0.0089,
"step": 164
},
{
"epoch": 1.2112869924294563,
"grad_norm": 0.0753282979130745,
"learning_rate": 2.4425028260620715e-05,
"loss": 0.0086,
"step": 165
},
{
"epoch": 1.2186281257169076,
"grad_norm": 0.08168316632509232,
"learning_rate": 2.3085712136859668e-05,
"loss": 0.0092,
"step": 166
},
{
"epoch": 1.2259692590043587,
"grad_norm": 0.07094614207744598,
"learning_rate": 2.178109311216913e-05,
"loss": 0.0086,
"step": 167
},
{
"epoch": 1.23331039229181,
"grad_norm": 0.07788494229316711,
"learning_rate": 2.0511527856363912e-05,
"loss": 0.0092,
"step": 168
},
{
"epoch": 1.2406515255792614,
"grad_norm": 0.0890539214015007,
"learning_rate": 1.927736345590839e-05,
"loss": 0.0091,
"step": 169
},
{
"epoch": 1.2479926588667125,
"grad_norm": 0.07291329652070999,
"learning_rate": 1.8078937319026654e-05,
"loss": 0.0088,
"step": 170
},
{
"epoch": 1.2553337921541639,
"grad_norm": 0.15383583307266235,
"learning_rate": 1.6916577083458228e-05,
"loss": 0.0099,
"step": 171
},
{
"epoch": 1.262674925441615,
"grad_norm": 0.11793084442615509,
"learning_rate": 1.579060052688548e-05,
"loss": 0.0084,
"step": 172
},
{
"epoch": 1.2700160587290663,
"grad_norm": 0.12637172639369965,
"learning_rate": 1.4701315480056164e-05,
"loss": 0.0081,
"step": 173
},
{
"epoch": 1.2773571920165177,
"grad_norm": 0.14355821907520294,
"learning_rate": 1.3649019742625623e-05,
"loss": 0.0081,
"step": 174
},
{
"epoch": 1.2846983253039688,
"grad_norm": 0.08072730153799057,
"learning_rate": 1.2634001001741373e-05,
"loss": 0.0074,
"step": 175
},
{
"epoch": 1.2846983253039688,
"eval_loss": 0.008246883749961853,
"eval_runtime": 11.7543,
"eval_samples_per_second": 4.254,
"eval_steps_per_second": 4.254,
"step": 175
},
{
"epoch": 1.29203945859142,
"grad_norm": 0.09285271167755127,
"learning_rate": 1.1656536753392287e-05,
"loss": 0.0077,
"step": 176
},
{
"epoch": 1.2993805918788712,
"grad_norm": 0.19393013417720795,
"learning_rate": 1.0716894226543953e-05,
"loss": 0.0078,
"step": 177
},
{
"epoch": 1.3067217251663226,
"grad_norm": 0.08033546060323715,
"learning_rate": 9.815330310080887e-06,
"loss": 0.0078,
"step": 178
},
{
"epoch": 1.3140628584537737,
"grad_norm": 0.07477666437625885,
"learning_rate": 8.952091482575824e-06,
"loss": 0.0078,
"step": 179
},
{
"epoch": 1.321403991741225,
"grad_norm": 0.11381042748689651,
"learning_rate": 8.127413744904804e-06,
"loss": 0.0088,
"step": 180
},
{
"epoch": 1.3287451250286764,
"grad_norm": 0.11595188826322556,
"learning_rate": 7.34152255572697e-06,
"loss": 0.008,
"step": 181
},
{
"epoch": 1.3360862583161275,
"grad_norm": 0.08916410803794861,
"learning_rate": 6.594632769846353e-06,
"loss": 0.008,
"step": 182
},
{
"epoch": 1.3434273916035788,
"grad_norm": 0.07762780040502548,
"learning_rate": 5.886948579472778e-06,
"loss": 0.0087,
"step": 183
},
{
"epoch": 1.3507685248910302,
"grad_norm": 0.09823337942361832,
"learning_rate": 5.218663458397715e-06,
"loss": 0.0078,
"step": 184
},
{
"epoch": 1.3581096581784813,
"grad_norm": 0.06783485412597656,
"learning_rate": 4.589960109100444e-06,
"loss": 0.0073,
"step": 185
},
{
"epoch": 1.3654507914659326,
"grad_norm": 0.06596047431230545,
"learning_rate": 4.001010412799138e-06,
"loss": 0.0078,
"step": 186
},
{
"epoch": 1.3727919247533837,
"grad_norm": 0.06541063636541367,
"learning_rate": 3.451975382460109e-06,
"loss": 0.0079,
"step": 187
},
{
"epoch": 1.380133058040835,
"grad_norm": 0.09579785168170929,
"learning_rate": 2.9430051187785962e-06,
"loss": 0.0078,
"step": 188
},
{
"epoch": 1.3874741913282862,
"grad_norm": 0.0694115087389946,
"learning_rate": 2.4742387691426445e-06,
"loss": 0.0078,
"step": 189
},
{
"epoch": 1.3948153246157375,
"grad_norm": 0.08039668947458267,
"learning_rate": 2.0458044895916513e-06,
"loss": 0.0086,
"step": 190
},
{
"epoch": 1.4021564579031889,
"grad_norm": 0.07761088013648987,
"learning_rate": 1.6578194097797258e-06,
"loss": 0.0081,
"step": 191
},
{
"epoch": 1.40949759119064,
"grad_norm": 0.06957986950874329,
"learning_rate": 1.3103896009537207e-06,
"loss": 0.008,
"step": 192
},
{
"epoch": 1.4168387244780913,
"grad_norm": 0.06713756173849106,
"learning_rate": 1.0036100469542786e-06,
"loss": 0.0085,
"step": 193
},
{
"epoch": 1.4241798577655427,
"grad_norm": 0.07192526757717133,
"learning_rate": 7.375646182482875e-07,
"loss": 0.0084,
"step": 194
},
{
"epoch": 1.4315209910529938,
"grad_norm": 0.09869284182786942,
"learning_rate": 5.123260489995229e-07,
"loss": 0.0087,
"step": 195
},
{
"epoch": 1.4388621243404451,
"grad_norm": 0.0659685879945755,
"learning_rate": 3.2795591718381975e-07,
"loss": 0.0082,
"step": 196
},
{
"epoch": 1.4462032576278963,
"grad_norm": 0.06561301648616791,
"learning_rate": 1.8450462775428942e-07,
"loss": 0.008,
"step": 197
},
{
"epoch": 1.4535443909153476,
"grad_norm": 0.06952385604381561,
"learning_rate": 8.201139886109264e-08,
"loss": 0.0091,
"step": 198
},
{
"epoch": 1.4608855242027987,
"grad_norm": 0.09199344366788864,
"learning_rate": 2.0504251129649374e-08,
"loss": 0.0088,
"step": 199
},
{
"epoch": 1.46822665749025,
"grad_norm": 0.08041491359472275,
"learning_rate": 0.0,
"loss": 0.009,
"step": 200
},
{
"epoch": 1.46822665749025,
"eval_loss": 0.008054222911596298,
"eval_runtime": 11.7496,
"eval_samples_per_second": 4.255,
"eval_steps_per_second": 4.255,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.5025410244241e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}