camembertv2-base-pawsx / trainer_state.json
wissamantoun's picture
Upload folder using huggingface_hub
6a6117d verified
{
"best_metric": 0.9225352112676056,
"best_model_checkpoint": "/scratch/camembertv2/runs/results/flue-PAWS-X/camembertv2-base-bf16-p2-17000/max_seq_length-148-gradient_accumulation_steps-2-precision-fp32-learning_rate-3e-05-epochs-6-lr_scheduler-linear-warmup_steps-0/SEED-1/checkpoint-15437",
"epoch": 5.999028340080971,
"eval_steps": 500,
"global_step": 18522,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.032388663967611336,
"grad_norm": 1.3617459535598755,
"learning_rate": 2.983803045027535e-05,
"loss": 0.6917,
"step": 100
},
{
"epoch": 0.06477732793522267,
"grad_norm": 1.1722190380096436,
"learning_rate": 2.96760609005507e-05,
"loss": 0.6863,
"step": 200
},
{
"epoch": 0.09716599190283401,
"grad_norm": 5.13236665725708,
"learning_rate": 2.9514091350826045e-05,
"loss": 0.5495,
"step": 300
},
{
"epoch": 0.12955465587044535,
"grad_norm": 11.99015998840332,
"learning_rate": 2.9352121801101394e-05,
"loss": 0.4849,
"step": 400
},
{
"epoch": 0.16194331983805668,
"grad_norm": 7.261778831481934,
"learning_rate": 2.9190152251376742e-05,
"loss": 0.4368,
"step": 500
},
{
"epoch": 0.19433198380566802,
"grad_norm": 8.82419490814209,
"learning_rate": 2.9028182701652093e-05,
"loss": 0.4046,
"step": 600
},
{
"epoch": 0.22672064777327935,
"grad_norm": 7.460156440734863,
"learning_rate": 2.8866213151927438e-05,
"loss": 0.3746,
"step": 700
},
{
"epoch": 0.2591093117408907,
"grad_norm": 17.467742919921875,
"learning_rate": 2.8704243602202786e-05,
"loss": 0.3493,
"step": 800
},
{
"epoch": 0.291497975708502,
"grad_norm": 16.750350952148438,
"learning_rate": 2.8542274052478135e-05,
"loss": 0.3583,
"step": 900
},
{
"epoch": 0.32388663967611336,
"grad_norm": 8.11487102508545,
"learning_rate": 2.8380304502753486e-05,
"loss": 0.3469,
"step": 1000
},
{
"epoch": 0.3562753036437247,
"grad_norm": 13.264874458312988,
"learning_rate": 2.821833495302883e-05,
"loss": 0.3359,
"step": 1100
},
{
"epoch": 0.38866396761133604,
"grad_norm": 38.216304779052734,
"learning_rate": 2.805636540330418e-05,
"loss": 0.32,
"step": 1200
},
{
"epoch": 0.42105263157894735,
"grad_norm": 3.897590398788452,
"learning_rate": 2.7894395853579527e-05,
"loss": 0.3134,
"step": 1300
},
{
"epoch": 0.4534412955465587,
"grad_norm": 19.601438522338867,
"learning_rate": 2.773242630385488e-05,
"loss": 0.3247,
"step": 1400
},
{
"epoch": 0.48582995951417,
"grad_norm": 6.229040145874023,
"learning_rate": 2.7570456754130224e-05,
"loss": 0.3283,
"step": 1500
},
{
"epoch": 0.5182186234817814,
"grad_norm": 8.433351516723633,
"learning_rate": 2.7408487204405572e-05,
"loss": 0.2851,
"step": 1600
},
{
"epoch": 0.5506072874493927,
"grad_norm": 29.696481704711914,
"learning_rate": 2.724651765468092e-05,
"loss": 0.3067,
"step": 1700
},
{
"epoch": 0.582995951417004,
"grad_norm": 9.64140796661377,
"learning_rate": 2.7084548104956272e-05,
"loss": 0.3072,
"step": 1800
},
{
"epoch": 0.6153846153846154,
"grad_norm": 2.7816696166992188,
"learning_rate": 2.6922578555231617e-05,
"loss": 0.2838,
"step": 1900
},
{
"epoch": 0.6477732793522267,
"grad_norm": 11.457731246948242,
"learning_rate": 2.6760609005506965e-05,
"loss": 0.2692,
"step": 2000
},
{
"epoch": 0.680161943319838,
"grad_norm": 10.894031524658203,
"learning_rate": 2.6598639455782313e-05,
"loss": 0.2969,
"step": 2100
},
{
"epoch": 0.7125506072874493,
"grad_norm": 8.756068229675293,
"learning_rate": 2.6436669906057665e-05,
"loss": 0.2982,
"step": 2200
},
{
"epoch": 0.7449392712550608,
"grad_norm": 6.366727352142334,
"learning_rate": 2.627470035633301e-05,
"loss": 0.2743,
"step": 2300
},
{
"epoch": 0.7773279352226721,
"grad_norm": 3.6022021770477295,
"learning_rate": 2.6112730806608358e-05,
"loss": 0.2497,
"step": 2400
},
{
"epoch": 0.8097165991902834,
"grad_norm": 9.339791297912598,
"learning_rate": 2.5950761256883706e-05,
"loss": 0.2617,
"step": 2500
},
{
"epoch": 0.8421052631578947,
"grad_norm": 0.46193593740463257,
"learning_rate": 2.5788791707159058e-05,
"loss": 0.2591,
"step": 2600
},
{
"epoch": 0.8744939271255061,
"grad_norm": 8.35823917388916,
"learning_rate": 2.5626822157434402e-05,
"loss": 0.2619,
"step": 2700
},
{
"epoch": 0.9068825910931174,
"grad_norm": 11.017343521118164,
"learning_rate": 2.546485260770975e-05,
"loss": 0.2571,
"step": 2800
},
{
"epoch": 0.9392712550607287,
"grad_norm": 12.998709678649902,
"learning_rate": 2.53028830579851e-05,
"loss": 0.2922,
"step": 2900
},
{
"epoch": 0.97165991902834,
"grad_norm": 10.282382011413574,
"learning_rate": 2.514091350826045e-05,
"loss": 0.2421,
"step": 3000
},
{
"epoch": 0.9998380566801619,
"eval_accuracy": 0.8958752515090543,
"eval_loss": 0.3331478238105774,
"eval_runtime": 4.4897,
"eval_samples_per_second": 442.795,
"eval_steps_per_second": 55.461,
"step": 3087
},
{
"epoch": 1.0040485829959513,
"grad_norm": 1.8964722156524658,
"learning_rate": 2.4978943958535795e-05,
"loss": 0.2629,
"step": 3100
},
{
"epoch": 1.0364372469635628,
"grad_norm": 20.548351287841797,
"learning_rate": 2.4816974408811143e-05,
"loss": 0.251,
"step": 3200
},
{
"epoch": 1.0688259109311742,
"grad_norm": 9.91510009765625,
"learning_rate": 2.465500485908649e-05,
"loss": 0.205,
"step": 3300
},
{
"epoch": 1.1012145748987854,
"grad_norm": 15.848530769348145,
"learning_rate": 2.4493035309361843e-05,
"loss": 0.1904,
"step": 3400
},
{
"epoch": 1.1336032388663968,
"grad_norm": 2.94976544380188,
"learning_rate": 2.4331065759637188e-05,
"loss": 0.2101,
"step": 3500
},
{
"epoch": 1.165991902834008,
"grad_norm": 2.3269777297973633,
"learning_rate": 2.4169096209912536e-05,
"loss": 0.1924,
"step": 3600
},
{
"epoch": 1.1983805668016194,
"grad_norm": 9.416550636291504,
"learning_rate": 2.4007126660187884e-05,
"loss": 0.2243,
"step": 3700
},
{
"epoch": 1.2307692307692308,
"grad_norm": 18.740257263183594,
"learning_rate": 2.3845157110463236e-05,
"loss": 0.204,
"step": 3800
},
{
"epoch": 1.263157894736842,
"grad_norm": 11.511466026306152,
"learning_rate": 2.368318756073858e-05,
"loss": 0.2211,
"step": 3900
},
{
"epoch": 1.2955465587044535,
"grad_norm": 48.617271423339844,
"learning_rate": 2.352121801101393e-05,
"loss": 0.2006,
"step": 4000
},
{
"epoch": 1.3279352226720649,
"grad_norm": 1.9806264638900757,
"learning_rate": 2.3359248461289277e-05,
"loss": 0.2331,
"step": 4100
},
{
"epoch": 1.360323886639676,
"grad_norm": 11.272978782653809,
"learning_rate": 2.319727891156463e-05,
"loss": 0.228,
"step": 4200
},
{
"epoch": 1.3927125506072875,
"grad_norm": 13.344253540039062,
"learning_rate": 2.3035309361839974e-05,
"loss": 0.2166,
"step": 4300
},
{
"epoch": 1.425101214574899,
"grad_norm": 6.339032173156738,
"learning_rate": 2.2873339812115322e-05,
"loss": 0.1978,
"step": 4400
},
{
"epoch": 1.45748987854251,
"grad_norm": 12.384753227233887,
"learning_rate": 2.271137026239067e-05,
"loss": 0.2137,
"step": 4500
},
{
"epoch": 1.4898785425101215,
"grad_norm": 2.5586955547332764,
"learning_rate": 2.254940071266602e-05,
"loss": 0.1965,
"step": 4600
},
{
"epoch": 1.522267206477733,
"grad_norm": 30.300275802612305,
"learning_rate": 2.2387431162941366e-05,
"loss": 0.2013,
"step": 4700
},
{
"epoch": 1.5546558704453441,
"grad_norm": 0.9059699773788452,
"learning_rate": 2.2225461613216715e-05,
"loss": 0.215,
"step": 4800
},
{
"epoch": 1.5870445344129553,
"grad_norm": 24.580528259277344,
"learning_rate": 2.2063492063492063e-05,
"loss": 0.2025,
"step": 4900
},
{
"epoch": 1.6194331983805668,
"grad_norm": 46.1649169921875,
"learning_rate": 2.1901522513767414e-05,
"loss": 0.1775,
"step": 5000
},
{
"epoch": 1.6518218623481782,
"grad_norm": 9.169315338134766,
"learning_rate": 2.173955296404276e-05,
"loss": 0.2034,
"step": 5100
},
{
"epoch": 1.6842105263157894,
"grad_norm": 22.808425903320312,
"learning_rate": 2.1577583414318108e-05,
"loss": 0.2338,
"step": 5200
},
{
"epoch": 1.7165991902834008,
"grad_norm": 0.972823977470398,
"learning_rate": 2.1415613864593456e-05,
"loss": 0.2079,
"step": 5300
},
{
"epoch": 1.7489878542510122,
"grad_norm": 12.798443794250488,
"learning_rate": 2.1253644314868807e-05,
"loss": 0.2155,
"step": 5400
},
{
"epoch": 1.7813765182186234,
"grad_norm": 14.221925735473633,
"learning_rate": 2.1091674765144152e-05,
"loss": 0.2016,
"step": 5500
},
{
"epoch": 1.8137651821862348,
"grad_norm": 16.415285110473633,
"learning_rate": 2.09297052154195e-05,
"loss": 0.187,
"step": 5600
},
{
"epoch": 1.8461538461538463,
"grad_norm": 3.618945598602295,
"learning_rate": 2.0767735665694852e-05,
"loss": 0.2213,
"step": 5700
},
{
"epoch": 1.8785425101214575,
"grad_norm": 39.05452346801758,
"learning_rate": 2.06057661159702e-05,
"loss": 0.1785,
"step": 5800
},
{
"epoch": 1.9109311740890689,
"grad_norm": 19.066164016723633,
"learning_rate": 2.0443796566245545e-05,
"loss": 0.2173,
"step": 5900
},
{
"epoch": 1.9433198380566803,
"grad_norm": 48.79111862182617,
"learning_rate": 2.0281827016520893e-05,
"loss": 0.1846,
"step": 6000
},
{
"epoch": 1.9757085020242915,
"grad_norm": 0.30500343441963196,
"learning_rate": 2.0119857466796245e-05,
"loss": 0.2054,
"step": 6100
},
{
"epoch": 2.0,
"eval_accuracy": 0.9094567404426559,
"eval_loss": 0.30133694410324097,
"eval_runtime": 4.0681,
"eval_samples_per_second": 488.676,
"eval_steps_per_second": 61.207,
"step": 6175
},
{
"epoch": 2.0080971659919027,
"grad_norm": 5.170982837677002,
"learning_rate": 1.9957887917071593e-05,
"loss": 0.1975,
"step": 6200
},
{
"epoch": 2.0404858299595143,
"grad_norm": 17.76828956604004,
"learning_rate": 1.9795918367346938e-05,
"loss": 0.1424,
"step": 6300
},
{
"epoch": 2.0728744939271255,
"grad_norm": 0.5401751399040222,
"learning_rate": 1.9633948817622286e-05,
"loss": 0.1564,
"step": 6400
},
{
"epoch": 2.1052631578947367,
"grad_norm": 0.1927350014448166,
"learning_rate": 1.9471979267897638e-05,
"loss": 0.1559,
"step": 6500
},
{
"epoch": 2.1376518218623484,
"grad_norm": 6.872354984283447,
"learning_rate": 1.9310009718172986e-05,
"loss": 0.1577,
"step": 6600
},
{
"epoch": 2.1700404858299596,
"grad_norm": 0.9195345640182495,
"learning_rate": 1.914804016844833e-05,
"loss": 0.1655,
"step": 6700
},
{
"epoch": 2.2024291497975708,
"grad_norm": 0.5888263583183289,
"learning_rate": 1.898607061872368e-05,
"loss": 0.1404,
"step": 6800
},
{
"epoch": 2.234817813765182,
"grad_norm": 20.80263900756836,
"learning_rate": 1.882410106899903e-05,
"loss": 0.1828,
"step": 6900
},
{
"epoch": 2.2672064777327936,
"grad_norm": 11.595118522644043,
"learning_rate": 1.866213151927438e-05,
"loss": 0.1768,
"step": 7000
},
{
"epoch": 2.299595141700405,
"grad_norm": 4.5267333984375,
"learning_rate": 1.8500161969549723e-05,
"loss": 0.1546,
"step": 7100
},
{
"epoch": 2.331983805668016,
"grad_norm": 0.5598276257514954,
"learning_rate": 1.833819241982507e-05,
"loss": 0.1605,
"step": 7200
},
{
"epoch": 2.3643724696356276,
"grad_norm": 1.0213172435760498,
"learning_rate": 1.8176222870100423e-05,
"loss": 0.1715,
"step": 7300
},
{
"epoch": 2.396761133603239,
"grad_norm": 4.324398040771484,
"learning_rate": 1.801425332037577e-05,
"loss": 0.1597,
"step": 7400
},
{
"epoch": 2.42914979757085,
"grad_norm": 0.6324844360351562,
"learning_rate": 1.7852283770651116e-05,
"loss": 0.1659,
"step": 7500
},
{
"epoch": 2.4615384615384617,
"grad_norm": 0.5160787105560303,
"learning_rate": 1.7690314220926464e-05,
"loss": 0.1799,
"step": 7600
},
{
"epoch": 2.493927125506073,
"grad_norm": 0.26999789476394653,
"learning_rate": 1.7528344671201816e-05,
"loss": 0.1347,
"step": 7700
},
{
"epoch": 2.526315789473684,
"grad_norm": 4.179189682006836,
"learning_rate": 1.7366375121477164e-05,
"loss": 0.1737,
"step": 7800
},
{
"epoch": 2.5587044534412957,
"grad_norm": 14.371667861938477,
"learning_rate": 1.720440557175251e-05,
"loss": 0.1825,
"step": 7900
},
{
"epoch": 2.591093117408907,
"grad_norm": 0.13881312310695648,
"learning_rate": 1.7042436022027857e-05,
"loss": 0.1757,
"step": 8000
},
{
"epoch": 2.623481781376518,
"grad_norm": 10.17100715637207,
"learning_rate": 1.688046647230321e-05,
"loss": 0.1501,
"step": 8100
},
{
"epoch": 2.6558704453441297,
"grad_norm": 43.74755859375,
"learning_rate": 1.6718496922578557e-05,
"loss": 0.1845,
"step": 8200
},
{
"epoch": 2.688259109311741,
"grad_norm": 17.117637634277344,
"learning_rate": 1.6556527372853902e-05,
"loss": 0.1745,
"step": 8300
},
{
"epoch": 2.720647773279352,
"grad_norm": 31.1845645904541,
"learning_rate": 1.639455782312925e-05,
"loss": 0.1664,
"step": 8400
},
{
"epoch": 2.753036437246964,
"grad_norm": 9.80735969543457,
"learning_rate": 1.6232588273404602e-05,
"loss": 0.1632,
"step": 8500
},
{
"epoch": 2.785425101214575,
"grad_norm": 9.704376220703125,
"learning_rate": 1.607061872367995e-05,
"loss": 0.1443,
"step": 8600
},
{
"epoch": 2.817813765182186,
"grad_norm": 4.445658206939697,
"learning_rate": 1.5908649173955295e-05,
"loss": 0.1775,
"step": 8700
},
{
"epoch": 2.850202429149798,
"grad_norm": 1.7219343185424805,
"learning_rate": 1.5746679624230643e-05,
"loss": 0.1581,
"step": 8800
},
{
"epoch": 2.882591093117409,
"grad_norm": 8.307001113891602,
"learning_rate": 1.5584710074505995e-05,
"loss": 0.1536,
"step": 8900
},
{
"epoch": 2.91497975708502,
"grad_norm": 9.363420486450195,
"learning_rate": 1.5422740524781343e-05,
"loss": 0.1862,
"step": 9000
},
{
"epoch": 2.9473684210526314,
"grad_norm": 20.355863571166992,
"learning_rate": 1.5260770975056688e-05,
"loss": 0.1679,
"step": 9100
},
{
"epoch": 2.979757085020243,
"grad_norm": 16.128374099731445,
"learning_rate": 1.5098801425332037e-05,
"loss": 0.1513,
"step": 9200
},
{
"epoch": 2.999838056680162,
"eval_accuracy": 0.9054325955734407,
"eval_loss": 0.40321749448776245,
"eval_runtime": 4.0611,
"eval_samples_per_second": 489.52,
"eval_steps_per_second": 61.313,
"step": 9262
},
{
"epoch": 3.0121457489878543,
"grad_norm": 2.3643200397491455,
"learning_rate": 1.4936831875607386e-05,
"loss": 0.1294,
"step": 9300
},
{
"epoch": 3.0445344129554655,
"grad_norm": 0.2633157968521118,
"learning_rate": 1.4774862325882734e-05,
"loss": 0.1198,
"step": 9400
},
{
"epoch": 3.076923076923077,
"grad_norm": 116.33301544189453,
"learning_rate": 1.4612892776158082e-05,
"loss": 0.1199,
"step": 9500
},
{
"epoch": 3.1093117408906883,
"grad_norm": 110.17882537841797,
"learning_rate": 1.445092322643343e-05,
"loss": 0.1101,
"step": 9600
},
{
"epoch": 3.1417004048582995,
"grad_norm": 66.73577117919922,
"learning_rate": 1.4288953676708779e-05,
"loss": 0.1634,
"step": 9700
},
{
"epoch": 3.174089068825911,
"grad_norm": 0.10323189944028854,
"learning_rate": 1.4126984126984127e-05,
"loss": 0.1326,
"step": 9800
},
{
"epoch": 3.2064777327935223,
"grad_norm": 0.39017683267593384,
"learning_rate": 1.3965014577259475e-05,
"loss": 0.1305,
"step": 9900
},
{
"epoch": 3.2388663967611335,
"grad_norm": 13.028061866760254,
"learning_rate": 1.3803045027534823e-05,
"loss": 0.1254,
"step": 10000
},
{
"epoch": 3.2712550607287447,
"grad_norm": 3.545095205307007,
"learning_rate": 1.3641075477810171e-05,
"loss": 0.1227,
"step": 10100
},
{
"epoch": 3.3036437246963564,
"grad_norm": 0.2068515121936798,
"learning_rate": 1.347910592808552e-05,
"loss": 0.1107,
"step": 10200
},
{
"epoch": 3.3360323886639676,
"grad_norm": 0.7598180174827576,
"learning_rate": 1.3317136378360868e-05,
"loss": 0.1431,
"step": 10300
},
{
"epoch": 3.3684210526315788,
"grad_norm": 0.3546907603740692,
"learning_rate": 1.3155166828636216e-05,
"loss": 0.128,
"step": 10400
},
{
"epoch": 3.4008097165991904,
"grad_norm": 1.108842372894287,
"learning_rate": 1.2993197278911564e-05,
"loss": 0.1247,
"step": 10500
},
{
"epoch": 3.4331983805668016,
"grad_norm": 31.11785316467285,
"learning_rate": 1.2831227729186914e-05,
"loss": 0.1293,
"step": 10600
},
{
"epoch": 3.465587044534413,
"grad_norm": 0.4772971272468567,
"learning_rate": 1.266925817946226e-05,
"loss": 0.136,
"step": 10700
},
{
"epoch": 3.4979757085020244,
"grad_norm": 3.384209632873535,
"learning_rate": 1.250728862973761e-05,
"loss": 0.1197,
"step": 10800
},
{
"epoch": 3.5303643724696356,
"grad_norm": 13.939515113830566,
"learning_rate": 1.2345319080012957e-05,
"loss": 0.1414,
"step": 10900
},
{
"epoch": 3.562753036437247,
"grad_norm": 0.4108864963054657,
"learning_rate": 1.2183349530288307e-05,
"loss": 0.107,
"step": 11000
},
{
"epoch": 3.5951417004048585,
"grad_norm": 12.29635238647461,
"learning_rate": 1.2021379980563655e-05,
"loss": 0.1357,
"step": 11100
},
{
"epoch": 3.6275303643724697,
"grad_norm": 49.79674530029297,
"learning_rate": 1.1859410430839003e-05,
"loss": 0.1333,
"step": 11200
},
{
"epoch": 3.659919028340081,
"grad_norm": 0.1119026318192482,
"learning_rate": 1.1697440881114352e-05,
"loss": 0.1311,
"step": 11300
},
{
"epoch": 3.6923076923076925,
"grad_norm": 0.2087603062391281,
"learning_rate": 1.15354713313897e-05,
"loss": 0.145,
"step": 11400
},
{
"epoch": 3.7246963562753037,
"grad_norm": 0.3955753743648529,
"learning_rate": 1.1373501781665048e-05,
"loss": 0.1225,
"step": 11500
},
{
"epoch": 3.757085020242915,
"grad_norm": 0.05121416971087456,
"learning_rate": 1.1211532231940396e-05,
"loss": 0.0975,
"step": 11600
},
{
"epoch": 3.7894736842105265,
"grad_norm": 21.439380645751953,
"learning_rate": 1.1049562682215744e-05,
"loss": 0.1398,
"step": 11700
},
{
"epoch": 3.8218623481781377,
"grad_norm": 1.1940436363220215,
"learning_rate": 1.0887593132491093e-05,
"loss": 0.1231,
"step": 11800
},
{
"epoch": 3.854251012145749,
"grad_norm": 12.916104316711426,
"learning_rate": 1.072562358276644e-05,
"loss": 0.1392,
"step": 11900
},
{
"epoch": 3.8866396761133606,
"grad_norm": 0.13800889253616333,
"learning_rate": 1.0563654033041789e-05,
"loss": 0.142,
"step": 12000
},
{
"epoch": 3.919028340080972,
"grad_norm": 0.10773531347513199,
"learning_rate": 1.0401684483317137e-05,
"loss": 0.1444,
"step": 12100
},
{
"epoch": 3.951417004048583,
"grad_norm": 4.032077312469482,
"learning_rate": 1.0239714933592485e-05,
"loss": 0.127,
"step": 12200
},
{
"epoch": 3.983805668016194,
"grad_norm": 0.21410343050956726,
"learning_rate": 1.0077745383867834e-05,
"loss": 0.1475,
"step": 12300
},
{
"epoch": 4.0,
"eval_accuracy": 0.9079476861167002,
"eval_loss": 0.35845422744750977,
"eval_runtime": 4.0822,
"eval_samples_per_second": 486.989,
"eval_steps_per_second": 60.996,
"step": 12350
},
{
"epoch": 4.016194331983805,
"grad_norm": 108.23912048339844,
"learning_rate": 9.915775834143182e-06,
"loss": 0.1174,
"step": 12400
},
{
"epoch": 4.048582995951417,
"grad_norm": 0.086638443171978,
"learning_rate": 9.75380628441853e-06,
"loss": 0.1094,
"step": 12500
},
{
"epoch": 4.080971659919029,
"grad_norm": 31.371986389160156,
"learning_rate": 9.591836734693878e-06,
"loss": 0.1086,
"step": 12600
},
{
"epoch": 4.113360323886639,
"grad_norm": 0.3369753658771515,
"learning_rate": 9.429867184969226e-06,
"loss": 0.0997,
"step": 12700
},
{
"epoch": 4.145748987854251,
"grad_norm": 4.937185287475586,
"learning_rate": 9.267897635244575e-06,
"loss": 0.1187,
"step": 12800
},
{
"epoch": 4.178137651821863,
"grad_norm": 0.08810489624738693,
"learning_rate": 9.105928085519923e-06,
"loss": 0.0714,
"step": 12900
},
{
"epoch": 4.2105263157894735,
"grad_norm": 2.339200496673584,
"learning_rate": 8.943958535795271e-06,
"loss": 0.1018,
"step": 13000
},
{
"epoch": 4.242914979757085,
"grad_norm": 0.8731828331947327,
"learning_rate": 8.78198898607062e-06,
"loss": 0.0783,
"step": 13100
},
{
"epoch": 4.275303643724697,
"grad_norm": 0.03349796682596207,
"learning_rate": 8.620019436345967e-06,
"loss": 0.076,
"step": 13200
},
{
"epoch": 4.3076923076923075,
"grad_norm": 0.3485426902770996,
"learning_rate": 8.458049886621316e-06,
"loss": 0.0891,
"step": 13300
},
{
"epoch": 4.340080971659919,
"grad_norm": 0.18010343611240387,
"learning_rate": 8.296080336896664e-06,
"loss": 0.1094,
"step": 13400
},
{
"epoch": 4.372469635627531,
"grad_norm": 0.1857542097568512,
"learning_rate": 8.134110787172012e-06,
"loss": 0.0976,
"step": 13500
},
{
"epoch": 4.4048582995951415,
"grad_norm": 0.5226219296455383,
"learning_rate": 7.97214123744736e-06,
"loss": 0.1342,
"step": 13600
},
{
"epoch": 4.437246963562753,
"grad_norm": 0.23230472207069397,
"learning_rate": 7.810171687722709e-06,
"loss": 0.0997,
"step": 13700
},
{
"epoch": 4.469635627530364,
"grad_norm": 0.15814034640789032,
"learning_rate": 7.648202137998057e-06,
"loss": 0.0924,
"step": 13800
},
{
"epoch": 4.502024291497976,
"grad_norm": 6.45848274230957,
"learning_rate": 7.486232588273405e-06,
"loss": 0.0867,
"step": 13900
},
{
"epoch": 4.534412955465587,
"grad_norm": 0.31769734621047974,
"learning_rate": 7.324263038548753e-06,
"loss": 0.1186,
"step": 14000
},
{
"epoch": 4.566801619433198,
"grad_norm": 5.155035495758057,
"learning_rate": 7.162293488824101e-06,
"loss": 0.0912,
"step": 14100
},
{
"epoch": 4.59919028340081,
"grad_norm": 43.147640228271484,
"learning_rate": 7.0003239390994495e-06,
"loss": 0.0967,
"step": 14200
},
{
"epoch": 4.631578947368421,
"grad_norm": 0.5134268999099731,
"learning_rate": 6.838354389374798e-06,
"loss": 0.0945,
"step": 14300
},
{
"epoch": 4.663967611336032,
"grad_norm": 0.2741609811782837,
"learning_rate": 6.676384839650146e-06,
"loss": 0.1012,
"step": 14400
},
{
"epoch": 4.696356275303644,
"grad_norm": 0.4370046854019165,
"learning_rate": 6.514415289925494e-06,
"loss": 0.123,
"step": 14500
},
{
"epoch": 4.728744939271255,
"grad_norm": 4.210660457611084,
"learning_rate": 6.352445740200842e-06,
"loss": 0.1047,
"step": 14600
},
{
"epoch": 4.761133603238866,
"grad_norm": 6.13052225112915,
"learning_rate": 6.190476190476191e-06,
"loss": 0.0966,
"step": 14700
},
{
"epoch": 4.793522267206478,
"grad_norm": 0.41901007294654846,
"learning_rate": 6.028506640751539e-06,
"loss": 0.1014,
"step": 14800
},
{
"epoch": 4.825910931174089,
"grad_norm": 0.28807559609413147,
"learning_rate": 5.866537091026887e-06,
"loss": 0.1042,
"step": 14900
},
{
"epoch": 4.8582995951417,
"grad_norm": 28.3045654296875,
"learning_rate": 5.704567541302235e-06,
"loss": 0.1027,
"step": 15000
},
{
"epoch": 4.890688259109312,
"grad_norm": 148.7666473388672,
"learning_rate": 5.542597991577583e-06,
"loss": 0.0804,
"step": 15100
},
{
"epoch": 4.923076923076923,
"grad_norm": 1.260237216949463,
"learning_rate": 5.380628441852932e-06,
"loss": 0.1201,
"step": 15200
},
{
"epoch": 4.955465587044534,
"grad_norm": 0.15356577932834625,
"learning_rate": 5.21865889212828e-06,
"loss": 0.1002,
"step": 15300
},
{
"epoch": 4.987854251012146,
"grad_norm": 0.7852919697761536,
"learning_rate": 5.056689342403628e-06,
"loss": 0.1202,
"step": 15400
},
{
"epoch": 4.9998380566801615,
"eval_accuracy": 0.9225352112676056,
"eval_loss": 0.3642682433128357,
"eval_runtime": 4.0522,
"eval_samples_per_second": 490.597,
"eval_steps_per_second": 61.448,
"step": 15437
},
{
"epoch": 5.020242914979757,
"grad_norm": 0.10688459873199463,
"learning_rate": 4.894719792678976e-06,
"loss": 0.1028,
"step": 15500
},
{
"epoch": 5.052631578947368,
"grad_norm": 20.821151733398438,
"learning_rate": 4.7327502429543244e-06,
"loss": 0.0861,
"step": 15600
},
{
"epoch": 5.08502024291498,
"grad_norm": 0.06313851475715637,
"learning_rate": 4.5707806932296735e-06,
"loss": 0.0749,
"step": 15700
},
{
"epoch": 5.117408906882591,
"grad_norm": 5.4235734939575195,
"learning_rate": 4.408811143505022e-06,
"loss": 0.0756,
"step": 15800
},
{
"epoch": 5.149797570850202,
"grad_norm": 0.07610571384429932,
"learning_rate": 4.24684159378037e-06,
"loss": 0.0821,
"step": 15900
},
{
"epoch": 5.182186234817814,
"grad_norm": 189.49761962890625,
"learning_rate": 4.084872044055718e-06,
"loss": 0.0694,
"step": 16000
},
{
"epoch": 5.2145748987854255,
"grad_norm": 0.05370008572936058,
"learning_rate": 3.922902494331066e-06,
"loss": 0.0767,
"step": 16100
},
{
"epoch": 5.246963562753036,
"grad_norm": 0.05699535831809044,
"learning_rate": 3.7609329446064145e-06,
"loss": 0.0777,
"step": 16200
},
{
"epoch": 5.279352226720648,
"grad_norm": 0.04399504140019417,
"learning_rate": 3.5989633948817623e-06,
"loss": 0.0753,
"step": 16300
},
{
"epoch": 5.3117408906882595,
"grad_norm": 0.09438109397888184,
"learning_rate": 3.4369938451571105e-06,
"loss": 0.0759,
"step": 16400
},
{
"epoch": 5.34412955465587,
"grad_norm": 0.10693158209323883,
"learning_rate": 3.2750242954324587e-06,
"loss": 0.0627,
"step": 16500
},
{
"epoch": 5.376518218623482,
"grad_norm": 0.16652892529964447,
"learning_rate": 3.113054745707807e-06,
"loss": 0.0738,
"step": 16600
},
{
"epoch": 5.4089068825910935,
"grad_norm": 17.245641708374023,
"learning_rate": 2.951085195983155e-06,
"loss": 0.0806,
"step": 16700
},
{
"epoch": 5.441295546558704,
"grad_norm": 0.10547757893800735,
"learning_rate": 2.7891156462585034e-06,
"loss": 0.0671,
"step": 16800
},
{
"epoch": 5.473684210526316,
"grad_norm": 1.494895100593567,
"learning_rate": 2.6271460965338516e-06,
"loss": 0.0733,
"step": 16900
},
{
"epoch": 5.506072874493928,
"grad_norm": 0.5026708841323853,
"learning_rate": 2.4651765468091998e-06,
"loss": 0.0579,
"step": 17000
},
{
"epoch": 5.538461538461538,
"grad_norm": 0.033295173197984695,
"learning_rate": 2.303206997084548e-06,
"loss": 0.0665,
"step": 17100
},
{
"epoch": 5.57085020242915,
"grad_norm": 38.09762954711914,
"learning_rate": 2.141237447359896e-06,
"loss": 0.076,
"step": 17200
},
{
"epoch": 5.603238866396762,
"grad_norm": 6.565536022186279,
"learning_rate": 1.9792678976352444e-06,
"loss": 0.0695,
"step": 17300
},
{
"epoch": 5.635627530364372,
"grad_norm": 0.03646261617541313,
"learning_rate": 1.817298347910593e-06,
"loss": 0.0941,
"step": 17400
},
{
"epoch": 5.668016194331984,
"grad_norm": 2.2503933906555176,
"learning_rate": 1.6553287981859412e-06,
"loss": 0.0992,
"step": 17500
},
{
"epoch": 5.700404858299595,
"grad_norm": 0.26883581280708313,
"learning_rate": 1.4933592484612894e-06,
"loss": 0.0849,
"step": 17600
},
{
"epoch": 5.732793522267206,
"grad_norm": 0.1083364486694336,
"learning_rate": 1.3313896987366376e-06,
"loss": 0.0798,
"step": 17700
},
{
"epoch": 5.765182186234818,
"grad_norm": 1.5160281658172607,
"learning_rate": 1.1694201490119858e-06,
"loss": 0.0535,
"step": 17800
},
{
"epoch": 5.797570850202429,
"grad_norm": 0.054747115820646286,
"learning_rate": 1.007450599287334e-06,
"loss": 0.0828,
"step": 17900
},
{
"epoch": 5.82995951417004,
"grad_norm": 0.062284424901008606,
"learning_rate": 8.454810495626823e-07,
"loss": 0.0816,
"step": 18000
},
{
"epoch": 5.862348178137652,
"grad_norm": 0.07957690209150314,
"learning_rate": 6.835114998380305e-07,
"loss": 0.0873,
"step": 18100
},
{
"epoch": 5.894736842105263,
"grad_norm": 18.025510787963867,
"learning_rate": 5.215419501133787e-07,
"loss": 0.0786,
"step": 18200
},
{
"epoch": 5.9271255060728745,
"grad_norm": 3.286792755126953,
"learning_rate": 3.5957240038872693e-07,
"loss": 0.0761,
"step": 18300
},
{
"epoch": 5.959514170040486,
"grad_norm": 0.04595565423369408,
"learning_rate": 1.9760285066407517e-07,
"loss": 0.0551,
"step": 18400
},
{
"epoch": 5.991902834008097,
"grad_norm": 0.07384829223155975,
"learning_rate": 3.563330093942339e-08,
"loss": 0.092,
"step": 18500
},
{
"epoch": 5.999028340080971,
"eval_accuracy": 0.9164989939637826,
"eval_loss": 0.4001348912715912,
"eval_runtime": 4.0447,
"eval_samples_per_second": 491.512,
"eval_steps_per_second": 61.563,
"step": 18522
},
{
"epoch": 5.999028340080971,
"step": 18522,
"total_flos": 1.33712370278538e+16,
"train_loss": 0.17084743083006051,
"train_runtime": 2225.7449,
"train_samples_per_second": 133.166,
"train_steps_per_second": 8.322
}
],
"logging_steps": 100,
"max_steps": 18522,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.33712370278538e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}