hf-100's picture
Upload folder using huggingface_hub
031d41f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.2167932662697598,
"eval_steps": 88,
"global_step": 264,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.000821186614658181,
"grad_norm": 0.480191707611084,
"learning_rate": 2e-05,
"loss": 1.332,
"step": 1
},
{
"epoch": 0.001642373229316362,
"grad_norm": 0.43048733472824097,
"learning_rate": 4e-05,
"loss": 1.2784,
"step": 2
},
{
"epoch": 0.002463559843974543,
"grad_norm": 0.33739173412323,
"learning_rate": 6e-05,
"loss": 1.3286,
"step": 3
},
{
"epoch": 0.003284746458632724,
"grad_norm": 0.432579904794693,
"learning_rate": 8e-05,
"loss": 1.3079,
"step": 4
},
{
"epoch": 0.0041059330732909054,
"grad_norm": 0.3490436375141144,
"learning_rate": 0.0001,
"loss": 1.2182,
"step": 5
},
{
"epoch": 0.004927119687949086,
"grad_norm": 0.20206260681152344,
"learning_rate": 9.997257268239166e-05,
"loss": 1.2828,
"step": 6
},
{
"epoch": 0.005748306302607267,
"grad_norm": 0.15355628728866577,
"learning_rate": 9.994514536478333e-05,
"loss": 1.1616,
"step": 7
},
{
"epoch": 0.006569492917265448,
"grad_norm": 0.16756780445575714,
"learning_rate": 9.9917718047175e-05,
"loss": 1.0935,
"step": 8
},
{
"epoch": 0.00739067953192363,
"grad_norm": 0.17429664731025696,
"learning_rate": 9.989029072956665e-05,
"loss": 0.9694,
"step": 9
},
{
"epoch": 0.008211866146581811,
"grad_norm": 0.22355175018310547,
"learning_rate": 9.986286341195832e-05,
"loss": 0.9407,
"step": 10
},
{
"epoch": 0.009033052761239991,
"grad_norm": 0.33407703042030334,
"learning_rate": 9.983543609434997e-05,
"loss": 0.7717,
"step": 11
},
{
"epoch": 0.009854239375898173,
"grad_norm": 0.47473278641700745,
"learning_rate": 9.980800877674164e-05,
"loss": 0.7763,
"step": 12
},
{
"epoch": 0.010675425990556354,
"grad_norm": 0.2812059819698334,
"learning_rate": 9.978058145913331e-05,
"loss": 0.6258,
"step": 13
},
{
"epoch": 0.011496612605214535,
"grad_norm": 0.23547925055027008,
"learning_rate": 9.975315414152496e-05,
"loss": 0.5968,
"step": 14
},
{
"epoch": 0.012317799219872716,
"grad_norm": 0.18453630805015564,
"learning_rate": 9.972572682391662e-05,
"loss": 0.5368,
"step": 15
},
{
"epoch": 0.013138985834530896,
"grad_norm": 0.16103577613830566,
"learning_rate": 9.969829950630828e-05,
"loss": 0.4909,
"step": 16
},
{
"epoch": 0.013960172449189078,
"grad_norm": 0.18651455640792847,
"learning_rate": 9.967087218869995e-05,
"loss": 0.5135,
"step": 17
},
{
"epoch": 0.01478135906384726,
"grad_norm": 0.11300642043352127,
"learning_rate": 9.96434448710916e-05,
"loss": 0.5069,
"step": 18
},
{
"epoch": 0.01560254567850544,
"grad_norm": 0.10415703803300858,
"learning_rate": 9.961601755348327e-05,
"loss": 0.4851,
"step": 19
},
{
"epoch": 0.016423732293163622,
"grad_norm": 0.11693017929792404,
"learning_rate": 9.958859023587493e-05,
"loss": 0.4625,
"step": 20
},
{
"epoch": 0.017244918907821802,
"grad_norm": 0.10035043954849243,
"learning_rate": 9.95611629182666e-05,
"loss": 0.4822,
"step": 21
},
{
"epoch": 0.018066105522479982,
"grad_norm": 0.10483390837907791,
"learning_rate": 9.953373560065826e-05,
"loss": 0.4342,
"step": 22
},
{
"epoch": 0.018887292137138165,
"grad_norm": 2.8405802249908447,
"learning_rate": 9.950630828304992e-05,
"loss": 0.4664,
"step": 23
},
{
"epoch": 0.019708478751796345,
"grad_norm": 0.13821998238563538,
"learning_rate": 9.947888096544159e-05,
"loss": 0.4468,
"step": 24
},
{
"epoch": 0.020529665366454525,
"grad_norm": 0.1991378366947174,
"learning_rate": 9.945145364783325e-05,
"loss": 0.4605,
"step": 25
},
{
"epoch": 0.02135085198111271,
"grad_norm": 0.07619134336709976,
"learning_rate": 9.942402633022491e-05,
"loss": 0.4597,
"step": 26
},
{
"epoch": 0.02217203859577089,
"grad_norm": 0.13373583555221558,
"learning_rate": 9.939659901261658e-05,
"loss": 0.4626,
"step": 27
},
{
"epoch": 0.02299322521042907,
"grad_norm": 0.09962721168994904,
"learning_rate": 9.936917169500823e-05,
"loss": 0.4638,
"step": 28
},
{
"epoch": 0.023814411825087253,
"grad_norm": 0.09395964443683624,
"learning_rate": 9.93417443773999e-05,
"loss": 0.4569,
"step": 29
},
{
"epoch": 0.024635598439745433,
"grad_norm": 0.09109952300786972,
"learning_rate": 9.931431705979157e-05,
"loss": 0.4439,
"step": 30
},
{
"epoch": 0.025456785054403613,
"grad_norm": 0.10370515286922455,
"learning_rate": 9.928688974218322e-05,
"loss": 0.4425,
"step": 31
},
{
"epoch": 0.026277971669061793,
"grad_norm": 0.2153477966785431,
"learning_rate": 9.925946242457488e-05,
"loss": 0.4503,
"step": 32
},
{
"epoch": 0.027099158283719976,
"grad_norm": 0.08772841095924377,
"learning_rate": 9.923203510696654e-05,
"loss": 0.419,
"step": 33
},
{
"epoch": 0.027920344898378156,
"grad_norm": 0.10951374471187592,
"learning_rate": 9.920460778935821e-05,
"loss": 0.4353,
"step": 34
},
{
"epoch": 0.028741531513036336,
"grad_norm": 0.09190870076417923,
"learning_rate": 9.917718047174987e-05,
"loss": 0.5196,
"step": 35
},
{
"epoch": 0.02956271812769452,
"grad_norm": 0.07667124271392822,
"learning_rate": 9.914975315414153e-05,
"loss": 0.4358,
"step": 36
},
{
"epoch": 0.0303839047423527,
"grad_norm": 0.1514267474412918,
"learning_rate": 9.912232583653319e-05,
"loss": 0.411,
"step": 37
},
{
"epoch": 0.03120509135701088,
"grad_norm": 0.09086549282073975,
"learning_rate": 9.909489851892486e-05,
"loss": 0.4003,
"step": 38
},
{
"epoch": 0.032026277971669063,
"grad_norm": 0.2616782486438751,
"learning_rate": 9.906747120131652e-05,
"loss": 0.4842,
"step": 39
},
{
"epoch": 0.032847464586327244,
"grad_norm": 0.11908283084630966,
"learning_rate": 9.904004388370818e-05,
"loss": 0.4143,
"step": 40
},
{
"epoch": 0.033668651200985424,
"grad_norm": 0.07770542800426483,
"learning_rate": 9.901261656609983e-05,
"loss": 0.3873,
"step": 41
},
{
"epoch": 0.034489837815643604,
"grad_norm": 0.08934606611728668,
"learning_rate": 9.89851892484915e-05,
"loss": 0.4235,
"step": 42
},
{
"epoch": 0.035311024430301784,
"grad_norm": 0.09303563088178635,
"learning_rate": 9.895776193088317e-05,
"loss": 0.4103,
"step": 43
},
{
"epoch": 0.036132211044959964,
"grad_norm": 0.08622181415557861,
"learning_rate": 9.893033461327482e-05,
"loss": 0.448,
"step": 44
},
{
"epoch": 0.03695339765961815,
"grad_norm": 0.08822862058877945,
"learning_rate": 9.890290729566649e-05,
"loss": 0.3855,
"step": 45
},
{
"epoch": 0.03777458427427633,
"grad_norm": 0.08557698875665665,
"learning_rate": 9.887547997805814e-05,
"loss": 0.3945,
"step": 46
},
{
"epoch": 0.03859577088893451,
"grad_norm": 0.07540106773376465,
"learning_rate": 9.884805266044981e-05,
"loss": 0.4192,
"step": 47
},
{
"epoch": 0.03941695750359269,
"grad_norm": 0.1023702397942543,
"learning_rate": 9.882062534284148e-05,
"loss": 0.4126,
"step": 48
},
{
"epoch": 0.04023814411825087,
"grad_norm": 0.07779772579669952,
"learning_rate": 9.879319802523313e-05,
"loss": 0.4244,
"step": 49
},
{
"epoch": 0.04105933073290905,
"grad_norm": 0.08826564252376556,
"learning_rate": 9.876577070762479e-05,
"loss": 0.415,
"step": 50
},
{
"epoch": 0.04188051734756724,
"grad_norm": 0.08254576474428177,
"learning_rate": 9.873834339001646e-05,
"loss": 0.4346,
"step": 51
},
{
"epoch": 0.04270170396222542,
"grad_norm": 0.08287151902914047,
"learning_rate": 9.871091607240812e-05,
"loss": 0.4142,
"step": 52
},
{
"epoch": 0.0435228905768836,
"grad_norm": 0.08196476101875305,
"learning_rate": 9.868348875479978e-05,
"loss": 0.3822,
"step": 53
},
{
"epoch": 0.04434407719154178,
"grad_norm": 0.08654092252254486,
"learning_rate": 9.865606143719145e-05,
"loss": 0.3937,
"step": 54
},
{
"epoch": 0.04516526380619996,
"grad_norm": 0.1102684736251831,
"learning_rate": 9.86286341195831e-05,
"loss": 0.4508,
"step": 55
},
{
"epoch": 0.04598645042085814,
"grad_norm": 0.08240954577922821,
"learning_rate": 9.860120680197478e-05,
"loss": 0.4069,
"step": 56
},
{
"epoch": 0.04680763703551632,
"grad_norm": 0.08542217314243317,
"learning_rate": 9.857377948436644e-05,
"loss": 0.5002,
"step": 57
},
{
"epoch": 0.047628823650174505,
"grad_norm": 0.08390172570943832,
"learning_rate": 9.854635216675809e-05,
"loss": 0.3851,
"step": 58
},
{
"epoch": 0.048450010264832685,
"grad_norm": 0.10694168508052826,
"learning_rate": 9.851892484914976e-05,
"loss": 0.4026,
"step": 59
},
{
"epoch": 0.049271196879490865,
"grad_norm": 0.0852806493639946,
"learning_rate": 9.849149753154143e-05,
"loss": 0.424,
"step": 60
},
{
"epoch": 0.050092383494149045,
"grad_norm": 0.15425831079483032,
"learning_rate": 9.846407021393308e-05,
"loss": 0.4307,
"step": 61
},
{
"epoch": 0.050913570108807225,
"grad_norm": 0.08546218276023865,
"learning_rate": 9.843664289632475e-05,
"loss": 0.4386,
"step": 62
},
{
"epoch": 0.051734756723465405,
"grad_norm": 0.08588795363903046,
"learning_rate": 9.84092155787164e-05,
"loss": 0.4433,
"step": 63
},
{
"epoch": 0.052555943338123585,
"grad_norm": 0.08901514858007431,
"learning_rate": 9.838178826110807e-05,
"loss": 0.4263,
"step": 64
},
{
"epoch": 0.05337712995278177,
"grad_norm": 0.17668095231056213,
"learning_rate": 9.835436094349974e-05,
"loss": 0.407,
"step": 65
},
{
"epoch": 0.05419831656743995,
"grad_norm": 0.081763856112957,
"learning_rate": 9.83269336258914e-05,
"loss": 0.374,
"step": 66
},
{
"epoch": 0.05501950318209813,
"grad_norm": 0.09063572436571121,
"learning_rate": 9.829950630828305e-05,
"loss": 0.3839,
"step": 67
},
{
"epoch": 0.05584068979675631,
"grad_norm": 0.08264392614364624,
"learning_rate": 9.827207899067472e-05,
"loss": 0.4676,
"step": 68
},
{
"epoch": 0.05666187641141449,
"grad_norm": 0.0827123150229454,
"learning_rate": 9.824465167306638e-05,
"loss": 0.3801,
"step": 69
},
{
"epoch": 0.05748306302607267,
"grad_norm": 0.07972189038991928,
"learning_rate": 9.821722435545804e-05,
"loss": 0.3758,
"step": 70
},
{
"epoch": 0.05830424964073085,
"grad_norm": 0.08738942444324493,
"learning_rate": 9.81897970378497e-05,
"loss": 0.3634,
"step": 71
},
{
"epoch": 0.05912543625538904,
"grad_norm": 0.08442792296409607,
"learning_rate": 9.816236972024136e-05,
"loss": 0.3803,
"step": 72
},
{
"epoch": 0.05994662287004722,
"grad_norm": 0.2751137316226959,
"learning_rate": 9.813494240263303e-05,
"loss": 0.3869,
"step": 73
},
{
"epoch": 0.0607678094847054,
"grad_norm": 0.19429296255111694,
"learning_rate": 9.81075150850247e-05,
"loss": 0.3992,
"step": 74
},
{
"epoch": 0.06158899609936358,
"grad_norm": 0.08392605930566788,
"learning_rate": 9.808008776741635e-05,
"loss": 0.361,
"step": 75
},
{
"epoch": 0.06241018271402176,
"grad_norm": 2.01043963432312,
"learning_rate": 9.8052660449808e-05,
"loss": 0.3787,
"step": 76
},
{
"epoch": 0.06323136932867994,
"grad_norm": 0.0808538943529129,
"learning_rate": 9.802523313219967e-05,
"loss": 0.4013,
"step": 77
},
{
"epoch": 0.06405255594333813,
"grad_norm": 0.07846518605947495,
"learning_rate": 9.799780581459134e-05,
"loss": 0.3808,
"step": 78
},
{
"epoch": 0.0648737425579963,
"grad_norm": 0.08152970671653748,
"learning_rate": 9.7970378496983e-05,
"loss": 0.4076,
"step": 79
},
{
"epoch": 0.06569492917265449,
"grad_norm": 0.07745791226625443,
"learning_rate": 9.794295117937466e-05,
"loss": 0.3981,
"step": 80
},
{
"epoch": 0.06651611578731266,
"grad_norm": 0.0842173770070076,
"learning_rate": 9.791552386176632e-05,
"loss": 0.4469,
"step": 81
},
{
"epoch": 0.06733730240197085,
"grad_norm": 0.07920663058757782,
"learning_rate": 9.788809654415799e-05,
"loss": 0.3988,
"step": 82
},
{
"epoch": 0.06815848901662903,
"grad_norm": 0.07899456471204758,
"learning_rate": 9.786066922654965e-05,
"loss": 0.4418,
"step": 83
},
{
"epoch": 0.06897967563128721,
"grad_norm": 0.07229727506637573,
"learning_rate": 9.783324190894131e-05,
"loss": 0.4016,
"step": 84
},
{
"epoch": 0.0698008622459454,
"grad_norm": 0.08062436431646347,
"learning_rate": 9.780581459133296e-05,
"loss": 0.3762,
"step": 85
},
{
"epoch": 0.07062204886060357,
"grad_norm": 0.08440960198640823,
"learning_rate": 9.777838727372464e-05,
"loss": 0.4362,
"step": 86
},
{
"epoch": 0.07144323547526175,
"grad_norm": 0.07544733583927155,
"learning_rate": 9.77509599561163e-05,
"loss": 0.4135,
"step": 87
},
{
"epoch": 0.07226442208991993,
"grad_norm": 0.1760656237602234,
"learning_rate": 9.772353263850797e-05,
"loss": 0.3887,
"step": 88
},
{
"epoch": 0.07226442208991993,
"eval_runtime": 494.3052,
"eval_samples_per_second": 0.399,
"eval_steps_per_second": 0.2,
"step": 88
},
{
"epoch": 0.07308560870457811,
"grad_norm": 0.08049104362726212,
"learning_rate": 9.769610532089962e-05,
"loss": 0.4272,
"step": 89
},
{
"epoch": 0.0739067953192363,
"grad_norm": 0.07617965340614319,
"learning_rate": 9.766867800329129e-05,
"loss": 0.3873,
"step": 90
},
{
"epoch": 0.07472798193389447,
"grad_norm": 0.07975462824106216,
"learning_rate": 9.764125068568296e-05,
"loss": 0.3903,
"step": 91
},
{
"epoch": 0.07554916854855266,
"grad_norm": 0.08299189805984497,
"learning_rate": 9.761382336807461e-05,
"loss": 0.404,
"step": 92
},
{
"epoch": 0.07637035516321083,
"grad_norm": 0.08578819036483765,
"learning_rate": 9.758639605046626e-05,
"loss": 0.3983,
"step": 93
},
{
"epoch": 0.07719154177786902,
"grad_norm": 0.10872071981430054,
"learning_rate": 9.755896873285793e-05,
"loss": 0.3553,
"step": 94
},
{
"epoch": 0.07801272839252721,
"grad_norm": 0.08121436834335327,
"learning_rate": 9.75315414152496e-05,
"loss": 0.408,
"step": 95
},
{
"epoch": 0.07883391500718538,
"grad_norm": 0.09175996482372284,
"learning_rate": 9.750411409764125e-05,
"loss": 0.4008,
"step": 96
},
{
"epoch": 0.07965510162184357,
"grad_norm": 0.08122789114713669,
"learning_rate": 9.747668678003292e-05,
"loss": 0.4045,
"step": 97
},
{
"epoch": 0.08047628823650174,
"grad_norm": 0.08402436226606369,
"learning_rate": 9.744925946242458e-05,
"loss": 0.3814,
"step": 98
},
{
"epoch": 0.08129747485115993,
"grad_norm": 0.08454861491918564,
"learning_rate": 9.742183214481624e-05,
"loss": 0.3904,
"step": 99
},
{
"epoch": 0.0821186614658181,
"grad_norm": 0.08125888556241989,
"learning_rate": 9.739440482720791e-05,
"loss": 0.3681,
"step": 100
},
{
"epoch": 0.08293984808047629,
"grad_norm": 0.08544078469276428,
"learning_rate": 9.736697750959957e-05,
"loss": 0.367,
"step": 101
},
{
"epoch": 0.08376103469513448,
"grad_norm": 0.07539089769124985,
"learning_rate": 9.733955019199122e-05,
"loss": 0.3821,
"step": 102
},
{
"epoch": 0.08458222130979265,
"grad_norm": 0.07529085874557495,
"learning_rate": 9.731212287438289e-05,
"loss": 0.4169,
"step": 103
},
{
"epoch": 0.08540340792445084,
"grad_norm": 0.07588034868240356,
"learning_rate": 9.728469555677456e-05,
"loss": 0.4211,
"step": 104
},
{
"epoch": 0.08622459453910901,
"grad_norm": 0.08019097149372101,
"learning_rate": 9.725726823916621e-05,
"loss": 0.4033,
"step": 105
},
{
"epoch": 0.0870457811537672,
"grad_norm": 0.07878712564706802,
"learning_rate": 9.722984092155788e-05,
"loss": 0.3842,
"step": 106
},
{
"epoch": 0.08786696776842537,
"grad_norm": 0.08083963394165039,
"learning_rate": 9.720241360394953e-05,
"loss": 0.3558,
"step": 107
},
{
"epoch": 0.08868815438308356,
"grad_norm": 0.08340411633253098,
"learning_rate": 9.71749862863412e-05,
"loss": 0.4037,
"step": 108
},
{
"epoch": 0.08950934099774174,
"grad_norm": 0.09317754209041595,
"learning_rate": 9.714755896873287e-05,
"loss": 0.3874,
"step": 109
},
{
"epoch": 0.09033052761239992,
"grad_norm": 0.07143125683069229,
"learning_rate": 9.712013165112452e-05,
"loss": 0.3441,
"step": 110
},
{
"epoch": 0.0911517142270581,
"grad_norm": 0.0909111350774765,
"learning_rate": 9.709270433351618e-05,
"loss": 0.3552,
"step": 111
},
{
"epoch": 0.09197290084171628,
"grad_norm": 0.08041603863239288,
"learning_rate": 9.706527701590785e-05,
"loss": 0.394,
"step": 112
},
{
"epoch": 0.09279408745637446,
"grad_norm": 0.08147992193698883,
"learning_rate": 9.703784969829951e-05,
"loss": 0.3916,
"step": 113
},
{
"epoch": 0.09361527407103264,
"grad_norm": 0.08365318924188614,
"learning_rate": 9.701042238069117e-05,
"loss": 0.4075,
"step": 114
},
{
"epoch": 0.09443646068569082,
"grad_norm": 0.083246149122715,
"learning_rate": 9.698299506308284e-05,
"loss": 0.3566,
"step": 115
},
{
"epoch": 0.09525764730034901,
"grad_norm": 0.0942649245262146,
"learning_rate": 9.695556774547449e-05,
"loss": 0.3516,
"step": 116
},
{
"epoch": 0.09607883391500718,
"grad_norm": 0.08110091835260391,
"learning_rate": 9.692814042786616e-05,
"loss": 0.3902,
"step": 117
},
{
"epoch": 0.09690002052966537,
"grad_norm": 0.43124014139175415,
"learning_rate": 9.690071311025783e-05,
"loss": 0.3956,
"step": 118
},
{
"epoch": 0.09772120714432354,
"grad_norm": 0.08204706013202667,
"learning_rate": 9.687328579264948e-05,
"loss": 0.3865,
"step": 119
},
{
"epoch": 0.09854239375898173,
"grad_norm": 0.08262762427330017,
"learning_rate": 9.684585847504113e-05,
"loss": 0.3777,
"step": 120
},
{
"epoch": 0.0993635803736399,
"grad_norm": 0.08233962953090668,
"learning_rate": 9.681843115743282e-05,
"loss": 0.3976,
"step": 121
},
{
"epoch": 0.10018476698829809,
"grad_norm": 0.7428120970726013,
"learning_rate": 9.679100383982447e-05,
"loss": 0.377,
"step": 122
},
{
"epoch": 0.10100595360295628,
"grad_norm": 0.07909400761127472,
"learning_rate": 9.676357652221614e-05,
"loss": 0.3654,
"step": 123
},
{
"epoch": 0.10182714021761445,
"grad_norm": 0.08714035898447037,
"learning_rate": 9.673614920460779e-05,
"loss": 0.4181,
"step": 124
},
{
"epoch": 0.10264832683227264,
"grad_norm": 0.08017311245203018,
"learning_rate": 9.670872188699946e-05,
"loss": 0.4052,
"step": 125
},
{
"epoch": 0.10346951344693081,
"grad_norm": 0.13821078836917877,
"learning_rate": 9.668129456939113e-05,
"loss": 0.3733,
"step": 126
},
{
"epoch": 0.104290700061589,
"grad_norm": 0.1609969437122345,
"learning_rate": 9.665386725178278e-05,
"loss": 0.3686,
"step": 127
},
{
"epoch": 0.10511188667624717,
"grad_norm": 0.1280309557914734,
"learning_rate": 9.662643993417444e-05,
"loss": 0.4036,
"step": 128
},
{
"epoch": 0.10593307329090536,
"grad_norm": 0.08747898787260056,
"learning_rate": 9.65990126165661e-05,
"loss": 0.3587,
"step": 129
},
{
"epoch": 0.10675425990556354,
"grad_norm": 0.1252209097146988,
"learning_rate": 9.657158529895777e-05,
"loss": 0.3626,
"step": 130
},
{
"epoch": 0.10757544652022172,
"grad_norm": 0.09374388307332993,
"learning_rate": 9.654415798134943e-05,
"loss": 0.3824,
"step": 131
},
{
"epoch": 0.1083966331348799,
"grad_norm": 0.08730709552764893,
"learning_rate": 9.65167306637411e-05,
"loss": 0.3827,
"step": 132
},
{
"epoch": 0.10921781974953808,
"grad_norm": 0.07719024270772934,
"learning_rate": 9.648930334613275e-05,
"loss": 0.3762,
"step": 133
},
{
"epoch": 0.11003900636419627,
"grad_norm": 0.08843278139829636,
"learning_rate": 9.646187602852442e-05,
"loss": 0.3843,
"step": 134
},
{
"epoch": 0.11086019297885444,
"grad_norm": 0.08568207919597626,
"learning_rate": 9.643444871091608e-05,
"loss": 0.3594,
"step": 135
},
{
"epoch": 0.11168137959351263,
"grad_norm": 0.08556952327489853,
"learning_rate": 9.640702139330774e-05,
"loss": 0.3886,
"step": 136
},
{
"epoch": 0.11250256620817081,
"grad_norm": 0.09016801416873932,
"learning_rate": 9.63795940756994e-05,
"loss": 0.4023,
"step": 137
},
{
"epoch": 0.11332375282282899,
"grad_norm": 0.08133590966463089,
"learning_rate": 9.635216675809106e-05,
"loss": 0.3634,
"step": 138
},
{
"epoch": 0.11414493943748717,
"grad_norm": 0.2211730182170868,
"learning_rate": 9.632473944048273e-05,
"loss": 0.3518,
"step": 139
},
{
"epoch": 0.11496612605214535,
"grad_norm": 0.08816584199666977,
"learning_rate": 9.629731212287438e-05,
"loss": 0.3727,
"step": 140
},
{
"epoch": 0.11578731266680353,
"grad_norm": 0.11618969589471817,
"learning_rate": 9.626988480526605e-05,
"loss": 0.4247,
"step": 141
},
{
"epoch": 0.1166084992814617,
"grad_norm": 0.09243030101060867,
"learning_rate": 9.62424574876577e-05,
"loss": 0.396,
"step": 142
},
{
"epoch": 0.11742968589611989,
"grad_norm": 0.08566376566886902,
"learning_rate": 9.621503017004937e-05,
"loss": 0.3852,
"step": 143
},
{
"epoch": 0.11825087251077808,
"grad_norm": 0.08220973610877991,
"learning_rate": 9.618760285244104e-05,
"loss": 0.3961,
"step": 144
},
{
"epoch": 0.11907205912543625,
"grad_norm": 0.08240345865488052,
"learning_rate": 9.61601755348327e-05,
"loss": 0.3518,
"step": 145
},
{
"epoch": 0.11989324574009444,
"grad_norm": 0.08472532778978348,
"learning_rate": 9.613274821722435e-05,
"loss": 0.3586,
"step": 146
},
{
"epoch": 0.12071443235475261,
"grad_norm": 0.08407485485076904,
"learning_rate": 9.610532089961602e-05,
"loss": 0.3797,
"step": 147
},
{
"epoch": 0.1215356189694108,
"grad_norm": 0.09284385293722153,
"learning_rate": 9.607789358200769e-05,
"loss": 0.3499,
"step": 148
},
{
"epoch": 0.12235680558406897,
"grad_norm": 0.08499818295240402,
"learning_rate": 9.605046626439934e-05,
"loss": 0.3722,
"step": 149
},
{
"epoch": 0.12317799219872716,
"grad_norm": 0.080271415412426,
"learning_rate": 9.602303894679101e-05,
"loss": 0.3871,
"step": 150
},
{
"epoch": 0.12399917881338535,
"grad_norm": 0.07850060611963272,
"learning_rate": 9.599561162918266e-05,
"loss": 0.3679,
"step": 151
},
{
"epoch": 0.12482036542804352,
"grad_norm": 0.07685016840696335,
"learning_rate": 9.596818431157433e-05,
"loss": 0.4078,
"step": 152
},
{
"epoch": 0.1256415520427017,
"grad_norm": 0.09402357786893845,
"learning_rate": 9.5940756993966e-05,
"loss": 0.3996,
"step": 153
},
{
"epoch": 0.12646273865735988,
"grad_norm": 0.08445476740598679,
"learning_rate": 9.591332967635765e-05,
"loss": 0.3841,
"step": 154
},
{
"epoch": 0.12728392527201807,
"grad_norm": 0.08233911544084549,
"learning_rate": 9.588590235874932e-05,
"loss": 0.3529,
"step": 155
},
{
"epoch": 0.12810511188667625,
"grad_norm": 0.07896068692207336,
"learning_rate": 9.585847504114099e-05,
"loss": 0.3585,
"step": 156
},
{
"epoch": 0.12892629850133444,
"grad_norm": 0.0822276696562767,
"learning_rate": 9.583104772353264e-05,
"loss": 0.3962,
"step": 157
},
{
"epoch": 0.1297474851159926,
"grad_norm": 0.07977598905563354,
"learning_rate": 9.580362040592431e-05,
"loss": 0.454,
"step": 158
},
{
"epoch": 0.1305686717306508,
"grad_norm": 0.0857616737484932,
"learning_rate": 9.577619308831597e-05,
"loss": 0.3954,
"step": 159
},
{
"epoch": 0.13138985834530897,
"grad_norm": 0.0874355211853981,
"learning_rate": 9.574876577070763e-05,
"loss": 0.3591,
"step": 160
},
{
"epoch": 0.13221104495996716,
"grad_norm": 0.07877468317747116,
"learning_rate": 9.57213384530993e-05,
"loss": 0.348,
"step": 161
},
{
"epoch": 0.13303223157462532,
"grad_norm": 0.08618593961000443,
"learning_rate": 9.569391113549096e-05,
"loss": 0.3723,
"step": 162
},
{
"epoch": 0.1338534181892835,
"grad_norm": 0.08144336938858032,
"learning_rate": 9.566648381788261e-05,
"loss": 0.4146,
"step": 163
},
{
"epoch": 0.1346746048039417,
"grad_norm": 0.07322760671377182,
"learning_rate": 9.563905650027428e-05,
"loss": 0.3107,
"step": 164
},
{
"epoch": 0.13549579141859988,
"grad_norm": 0.08007095754146576,
"learning_rate": 9.561162918266595e-05,
"loss": 0.4128,
"step": 165
},
{
"epoch": 0.13631697803325807,
"grad_norm": 0.09636646509170532,
"learning_rate": 9.55842018650576e-05,
"loss": 0.4089,
"step": 166
},
{
"epoch": 0.13713816464791623,
"grad_norm": 0.08381053060293198,
"learning_rate": 9.555677454744927e-05,
"loss": 0.3624,
"step": 167
},
{
"epoch": 0.13795935126257441,
"grad_norm": 0.07476504147052765,
"learning_rate": 9.552934722984092e-05,
"loss": 0.3906,
"step": 168
},
{
"epoch": 0.1387805378772326,
"grad_norm": 0.0901239663362503,
"learning_rate": 9.550191991223259e-05,
"loss": 0.3378,
"step": 169
},
{
"epoch": 0.1396017244918908,
"grad_norm": 0.0813356265425682,
"learning_rate": 9.547449259462426e-05,
"loss": 0.3627,
"step": 170
},
{
"epoch": 0.14042291110654898,
"grad_norm": 0.14319093525409698,
"learning_rate": 9.544706527701591e-05,
"loss": 0.3512,
"step": 171
},
{
"epoch": 0.14124409772120713,
"grad_norm": 0.13329866528511047,
"learning_rate": 9.541963795940757e-05,
"loss": 0.3809,
"step": 172
},
{
"epoch": 0.14206528433586532,
"grad_norm": 0.0815596953034401,
"learning_rate": 9.539221064179923e-05,
"loss": 0.3444,
"step": 173
},
{
"epoch": 0.1428864709505235,
"grad_norm": 0.08646956831216812,
"learning_rate": 9.53647833241909e-05,
"loss": 0.3699,
"step": 174
},
{
"epoch": 0.1437076575651817,
"grad_norm": 0.09374339133501053,
"learning_rate": 9.533735600658256e-05,
"loss": 0.3569,
"step": 175
},
{
"epoch": 0.14452884417983985,
"grad_norm": 0.0834718644618988,
"learning_rate": 9.530992868897422e-05,
"loss": 0.3708,
"step": 176
},
{
"epoch": 0.14452884417983985,
"eval_runtime": 493.9539,
"eval_samples_per_second": 0.399,
"eval_steps_per_second": 0.2,
"step": 176
},
{
"epoch": 0.14535003079449804,
"grad_norm": 0.08648290485143661,
"learning_rate": 9.528250137136588e-05,
"loss": 0.379,
"step": 177
},
{
"epoch": 0.14617121740915623,
"grad_norm": 0.08577203750610352,
"learning_rate": 9.525507405375755e-05,
"loss": 0.4368,
"step": 178
},
{
"epoch": 0.14699240402381442,
"grad_norm": 0.1023576483130455,
"learning_rate": 9.522764673614921e-05,
"loss": 0.3553,
"step": 179
},
{
"epoch": 0.1478135906384726,
"grad_norm": 0.08062634617090225,
"learning_rate": 9.520021941854087e-05,
"loss": 0.3616,
"step": 180
},
{
"epoch": 0.14863477725313076,
"grad_norm": 0.07487751543521881,
"learning_rate": 9.517279210093252e-05,
"loss": 0.4413,
"step": 181
},
{
"epoch": 0.14945596386778895,
"grad_norm": 0.07332492619752884,
"learning_rate": 9.514536478332419e-05,
"loss": 0.3166,
"step": 182
},
{
"epoch": 0.15027715048244714,
"grad_norm": 0.08658608049154282,
"learning_rate": 9.511793746571586e-05,
"loss": 0.332,
"step": 183
},
{
"epoch": 0.15109833709710532,
"grad_norm": 0.10461894422769547,
"learning_rate": 9.509051014810751e-05,
"loss": 0.3653,
"step": 184
},
{
"epoch": 0.1519195237117635,
"grad_norm": 0.19619494676589966,
"learning_rate": 9.506308283049918e-05,
"loss": 0.4224,
"step": 185
},
{
"epoch": 0.15274071032642167,
"grad_norm": 0.08313202857971191,
"learning_rate": 9.503565551289084e-05,
"loss": 0.3512,
"step": 186
},
{
"epoch": 0.15356189694107986,
"grad_norm": 0.08686342090368271,
"learning_rate": 9.50082281952825e-05,
"loss": 0.3352,
"step": 187
},
{
"epoch": 0.15438308355573804,
"grad_norm": 0.09020522236824036,
"learning_rate": 9.498080087767417e-05,
"loss": 0.4138,
"step": 188
},
{
"epoch": 0.15520427017039623,
"grad_norm": 0.0799839124083519,
"learning_rate": 9.495337356006583e-05,
"loss": 0.3436,
"step": 189
},
{
"epoch": 0.15602545678505442,
"grad_norm": 0.08851379156112671,
"learning_rate": 9.49259462424575e-05,
"loss": 0.3933,
"step": 190
},
{
"epoch": 0.15684664339971258,
"grad_norm": 0.08521082252264023,
"learning_rate": 9.489851892484916e-05,
"loss": 0.3667,
"step": 191
},
{
"epoch": 0.15766783001437076,
"grad_norm": 0.09019312262535095,
"learning_rate": 9.487109160724082e-05,
"loss": 0.3556,
"step": 192
},
{
"epoch": 0.15848901662902895,
"grad_norm": 0.08162654936313629,
"learning_rate": 9.484366428963248e-05,
"loss": 0.3864,
"step": 193
},
{
"epoch": 0.15931020324368714,
"grad_norm": 0.08963490277528763,
"learning_rate": 9.481623697202414e-05,
"loss": 0.391,
"step": 194
},
{
"epoch": 0.1601313898583453,
"grad_norm": 0.10057719051837921,
"learning_rate": 9.47888096544158e-05,
"loss": 0.3599,
"step": 195
},
{
"epoch": 0.16095257647300348,
"grad_norm": 0.1694149523973465,
"learning_rate": 9.476138233680747e-05,
"loss": 0.3612,
"step": 196
},
{
"epoch": 0.16177376308766167,
"grad_norm": 0.08255323767662048,
"learning_rate": 9.473395501919913e-05,
"loss": 0.3441,
"step": 197
},
{
"epoch": 0.16259494970231986,
"grad_norm": 0.09228333085775375,
"learning_rate": 9.470652770159078e-05,
"loss": 0.3432,
"step": 198
},
{
"epoch": 0.16341613631697804,
"grad_norm": 0.11736617982387543,
"learning_rate": 9.467910038398245e-05,
"loss": 0.3408,
"step": 199
},
{
"epoch": 0.1642373229316362,
"grad_norm": 0.16636626422405243,
"learning_rate": 9.465167306637412e-05,
"loss": 0.3639,
"step": 200
},
{
"epoch": 0.1650585095462944,
"grad_norm": 0.0912085771560669,
"learning_rate": 9.462424574876577e-05,
"loss": 0.4055,
"step": 201
},
{
"epoch": 0.16587969616095258,
"grad_norm": 0.08452475070953369,
"learning_rate": 9.459681843115744e-05,
"loss": 0.3497,
"step": 202
},
{
"epoch": 0.16670088277561076,
"grad_norm": 0.09225429594516754,
"learning_rate": 9.45693911135491e-05,
"loss": 0.3765,
"step": 203
},
{
"epoch": 0.16752206939026895,
"grad_norm": 0.08500406891107559,
"learning_rate": 9.454196379594076e-05,
"loss": 0.3622,
"step": 204
},
{
"epoch": 0.1683432560049271,
"grad_norm": 0.08277002722024918,
"learning_rate": 9.451453647833243e-05,
"loss": 0.3596,
"step": 205
},
{
"epoch": 0.1691644426195853,
"grad_norm": 0.0855122059583664,
"learning_rate": 9.448710916072408e-05,
"loss": 0.3495,
"step": 206
},
{
"epoch": 0.16998562923424348,
"grad_norm": 0.11409081518650055,
"learning_rate": 9.445968184311574e-05,
"loss": 0.3123,
"step": 207
},
{
"epoch": 0.17080681584890167,
"grad_norm": 0.08898866921663284,
"learning_rate": 9.443225452550741e-05,
"loss": 0.3383,
"step": 208
},
{
"epoch": 0.17162800246355983,
"grad_norm": 0.07967101782560349,
"learning_rate": 9.440482720789908e-05,
"loss": 0.3927,
"step": 209
},
{
"epoch": 0.17244918907821802,
"grad_norm": 0.08267655968666077,
"learning_rate": 9.437739989029073e-05,
"loss": 0.3503,
"step": 210
},
{
"epoch": 0.1732703756928762,
"grad_norm": 0.1024966835975647,
"learning_rate": 9.43499725726824e-05,
"loss": 0.343,
"step": 211
},
{
"epoch": 0.1740915623075344,
"grad_norm": 0.11825034767389297,
"learning_rate": 9.432254525507405e-05,
"loss": 0.3694,
"step": 212
},
{
"epoch": 0.17491274892219258,
"grad_norm": 0.08225111663341522,
"learning_rate": 9.429511793746572e-05,
"loss": 0.3625,
"step": 213
},
{
"epoch": 0.17573393553685074,
"grad_norm": 0.082975834608078,
"learning_rate": 9.426769061985739e-05,
"loss": 0.3692,
"step": 214
},
{
"epoch": 0.17655512215150893,
"grad_norm": 0.09659875184297562,
"learning_rate": 9.424026330224904e-05,
"loss": 0.3857,
"step": 215
},
{
"epoch": 0.1773763087661671,
"grad_norm": 0.08381886035203934,
"learning_rate": 9.42128359846407e-05,
"loss": 0.3658,
"step": 216
},
{
"epoch": 0.1781974953808253,
"grad_norm": 0.08097488433122635,
"learning_rate": 9.418540866703238e-05,
"loss": 0.3556,
"step": 217
},
{
"epoch": 0.1790186819954835,
"grad_norm": 0.08661879599094391,
"learning_rate": 9.415798134942403e-05,
"loss": 0.3495,
"step": 218
},
{
"epoch": 0.17983986861014165,
"grad_norm": 0.09621778875589371,
"learning_rate": 9.413055403181569e-05,
"loss": 0.3488,
"step": 219
},
{
"epoch": 0.18066105522479983,
"grad_norm": 0.0867924615740776,
"learning_rate": 9.410312671420735e-05,
"loss": 0.3496,
"step": 220
},
{
"epoch": 0.18148224183945802,
"grad_norm": 0.09928230196237564,
"learning_rate": 9.407569939659901e-05,
"loss": 0.411,
"step": 221
},
{
"epoch": 0.1823034284541162,
"grad_norm": 0.08545473217964172,
"learning_rate": 9.404827207899069e-05,
"loss": 0.3707,
"step": 222
},
{
"epoch": 0.18312461506877437,
"grad_norm": 0.1317296177148819,
"learning_rate": 9.402084476138234e-05,
"loss": 0.4021,
"step": 223
},
{
"epoch": 0.18394580168343255,
"grad_norm": 0.09009065479040146,
"learning_rate": 9.3993417443774e-05,
"loss": 0.3451,
"step": 224
},
{
"epoch": 0.18476698829809074,
"grad_norm": 0.08332253247499466,
"learning_rate": 9.396599012616567e-05,
"loss": 0.3457,
"step": 225
},
{
"epoch": 0.18558817491274893,
"grad_norm": 0.08737312257289886,
"learning_rate": 9.393856280855733e-05,
"loss": 0.3721,
"step": 226
},
{
"epoch": 0.1864093615274071,
"grad_norm": 0.07472239434719086,
"learning_rate": 9.391113549094899e-05,
"loss": 0.3542,
"step": 227
},
{
"epoch": 0.18723054814206527,
"grad_norm": 0.08319877088069916,
"learning_rate": 9.388370817334066e-05,
"loss": 0.361,
"step": 228
},
{
"epoch": 0.18805173475672346,
"grad_norm": 0.08192326873540878,
"learning_rate": 9.385628085573231e-05,
"loss": 0.344,
"step": 229
},
{
"epoch": 0.18887292137138165,
"grad_norm": 0.11502642929553986,
"learning_rate": 9.382885353812398e-05,
"loss": 0.363,
"step": 230
},
{
"epoch": 0.18969410798603983,
"grad_norm": 0.08567750453948975,
"learning_rate": 9.380142622051565e-05,
"loss": 0.3565,
"step": 231
},
{
"epoch": 0.19051529460069802,
"grad_norm": 0.08059141039848328,
"learning_rate": 9.37739989029073e-05,
"loss": 0.3722,
"step": 232
},
{
"epoch": 0.19133648121535618,
"grad_norm": 0.07670270651578903,
"learning_rate": 9.374657158529896e-05,
"loss": 0.3493,
"step": 233
},
{
"epoch": 0.19215766783001437,
"grad_norm": 0.08022642135620117,
"learning_rate": 9.371914426769062e-05,
"loss": 0.4142,
"step": 234
},
{
"epoch": 0.19297885444467255,
"grad_norm": 0.08015397936105728,
"learning_rate": 9.369171695008229e-05,
"loss": 0.3442,
"step": 235
},
{
"epoch": 0.19380004105933074,
"grad_norm": 0.08329442143440247,
"learning_rate": 9.366428963247395e-05,
"loss": 0.3587,
"step": 236
},
{
"epoch": 0.1946212276739889,
"grad_norm": 0.08311276882886887,
"learning_rate": 9.363686231486561e-05,
"loss": 0.3792,
"step": 237
},
{
"epoch": 0.1954424142886471,
"grad_norm": 0.1332862675189972,
"learning_rate": 9.360943499725727e-05,
"loss": 0.3451,
"step": 238
},
{
"epoch": 0.19626360090330527,
"grad_norm": 0.081804558634758,
"learning_rate": 9.358200767964894e-05,
"loss": 0.4212,
"step": 239
},
{
"epoch": 0.19708478751796346,
"grad_norm": 0.08397019654512405,
"learning_rate": 9.35545803620406e-05,
"loss": 0.3482,
"step": 240
},
{
"epoch": 0.19790597413262165,
"grad_norm": 0.09495637565851212,
"learning_rate": 9.352715304443226e-05,
"loss": 0.3432,
"step": 241
},
{
"epoch": 0.1987271607472798,
"grad_norm": 0.09187504649162292,
"learning_rate": 9.349972572682391e-05,
"loss": 0.3938,
"step": 242
},
{
"epoch": 0.199548347361938,
"grad_norm": 0.100834921002388,
"learning_rate": 9.347229840921558e-05,
"loss": 0.3567,
"step": 243
},
{
"epoch": 0.20036953397659618,
"grad_norm": 0.09292273223400116,
"learning_rate": 9.344487109160725e-05,
"loss": 0.3802,
"step": 244
},
{
"epoch": 0.20119072059125437,
"grad_norm": 0.07785986363887787,
"learning_rate": 9.34174437739989e-05,
"loss": 0.3832,
"step": 245
},
{
"epoch": 0.20201190720591256,
"grad_norm": 0.081189826130867,
"learning_rate": 9.339001645639057e-05,
"loss": 0.3448,
"step": 246
},
{
"epoch": 0.20283309382057071,
"grad_norm": 0.08209879696369171,
"learning_rate": 9.336258913878222e-05,
"loss": 0.3602,
"step": 247
},
{
"epoch": 0.2036542804352289,
"grad_norm": 0.0813421905040741,
"learning_rate": 9.333516182117389e-05,
"loss": 0.3477,
"step": 248
},
{
"epoch": 0.2044754670498871,
"grad_norm": 0.08188773691654205,
"learning_rate": 9.330773450356556e-05,
"loss": 0.3466,
"step": 249
},
{
"epoch": 0.20529665366454528,
"grad_norm": 0.09966633468866348,
"learning_rate": 9.328030718595721e-05,
"loss": 0.3776,
"step": 250
},
{
"epoch": 0.20611784027920346,
"grad_norm": 0.08154954016208649,
"learning_rate": 9.325287986834887e-05,
"loss": 0.3727,
"step": 251
},
{
"epoch": 0.20693902689386162,
"grad_norm": 0.07942931354045868,
"learning_rate": 9.322545255074055e-05,
"loss": 0.3468,
"step": 252
},
{
"epoch": 0.2077602135085198,
"grad_norm": 0.07944433391094208,
"learning_rate": 9.31980252331322e-05,
"loss": 0.3512,
"step": 253
},
{
"epoch": 0.208581400123178,
"grad_norm": 0.08637971431016922,
"learning_rate": 9.317059791552387e-05,
"loss": 0.3397,
"step": 254
},
{
"epoch": 0.20940258673783618,
"grad_norm": 0.08189195394515991,
"learning_rate": 9.314317059791553e-05,
"loss": 0.4105,
"step": 255
},
{
"epoch": 0.21022377335249434,
"grad_norm": 0.08190836012363434,
"learning_rate": 9.31157432803072e-05,
"loss": 0.346,
"step": 256
},
{
"epoch": 0.21104495996715253,
"grad_norm": 0.08331865072250366,
"learning_rate": 9.308831596269886e-05,
"loss": 0.3655,
"step": 257
},
{
"epoch": 0.21186614658181072,
"grad_norm": 0.0822620540857315,
"learning_rate": 9.306088864509052e-05,
"loss": 0.372,
"step": 258
},
{
"epoch": 0.2126873331964689,
"grad_norm": 0.08011777698993683,
"learning_rate": 9.303346132748217e-05,
"loss": 0.3304,
"step": 259
},
{
"epoch": 0.2135085198111271,
"grad_norm": 0.08395062386989594,
"learning_rate": 9.300603400987384e-05,
"loss": 0.3903,
"step": 260
},
{
"epoch": 0.21432970642578525,
"grad_norm": 0.08101452887058258,
"learning_rate": 9.297860669226551e-05,
"loss": 0.3579,
"step": 261
},
{
"epoch": 0.21515089304044344,
"grad_norm": 0.11504925042390823,
"learning_rate": 9.295117937465716e-05,
"loss": 0.3723,
"step": 262
},
{
"epoch": 0.21597207965510162,
"grad_norm": 0.0704338401556015,
"learning_rate": 9.292375205704883e-05,
"loss": 0.3341,
"step": 263
},
{
"epoch": 0.2167932662697598,
"grad_norm": 0.07944470643997192,
"learning_rate": 9.289632473944048e-05,
"loss": 0.3273,
"step": 264
},
{
"epoch": 0.2167932662697598,
"eval_runtime": 507.3366,
"eval_samples_per_second": 0.388,
"eval_steps_per_second": 0.195,
"step": 264
}
],
"logging_steps": 1,
"max_steps": 3651,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 88,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.1637146798818787e+19,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}