oh-dcft-v3.1-llama-3.1-405b / trainer_state.json
gsmyrnis's picture
End of training
9d13c68 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1266,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.023696682464454975,
"grad_norm": 30.705133295132807,
"learning_rate": 5e-06,
"loss": 0.5563,
"step": 10
},
{
"epoch": 0.04739336492890995,
"grad_norm": 2.043478075324726,
"learning_rate": 5e-06,
"loss": 0.4838,
"step": 20
},
{
"epoch": 0.07109004739336493,
"grad_norm": 1.3358743818486332,
"learning_rate": 5e-06,
"loss": 0.4503,
"step": 30
},
{
"epoch": 0.0947867298578199,
"grad_norm": 0.7538335151640748,
"learning_rate": 5e-06,
"loss": 0.429,
"step": 40
},
{
"epoch": 0.11848341232227488,
"grad_norm": 0.7811530617140079,
"learning_rate": 5e-06,
"loss": 0.4108,
"step": 50
},
{
"epoch": 0.14218009478672985,
"grad_norm": 0.9317427620124435,
"learning_rate": 5e-06,
"loss": 0.4018,
"step": 60
},
{
"epoch": 0.16587677725118483,
"grad_norm": 0.6243367032731518,
"learning_rate": 5e-06,
"loss": 0.3908,
"step": 70
},
{
"epoch": 0.1895734597156398,
"grad_norm": 0.6176921128719449,
"learning_rate": 5e-06,
"loss": 0.3833,
"step": 80
},
{
"epoch": 0.2132701421800948,
"grad_norm": 0.4314255330259147,
"learning_rate": 5e-06,
"loss": 0.3777,
"step": 90
},
{
"epoch": 0.23696682464454977,
"grad_norm": 0.5197854521683744,
"learning_rate": 5e-06,
"loss": 0.3704,
"step": 100
},
{
"epoch": 0.26066350710900477,
"grad_norm": 1.002573805206526,
"learning_rate": 5e-06,
"loss": 0.3662,
"step": 110
},
{
"epoch": 0.2843601895734597,
"grad_norm": 0.5881621358357682,
"learning_rate": 5e-06,
"loss": 0.3658,
"step": 120
},
{
"epoch": 0.3080568720379147,
"grad_norm": 0.5217282145946449,
"learning_rate": 5e-06,
"loss": 0.3629,
"step": 130
},
{
"epoch": 0.33175355450236965,
"grad_norm": 0.6091442268483355,
"learning_rate": 5e-06,
"loss": 0.3652,
"step": 140
},
{
"epoch": 0.35545023696682465,
"grad_norm": 0.8005863736511483,
"learning_rate": 5e-06,
"loss": 0.3531,
"step": 150
},
{
"epoch": 0.3791469194312796,
"grad_norm": 0.6150361006740718,
"learning_rate": 5e-06,
"loss": 0.3498,
"step": 160
},
{
"epoch": 0.4028436018957346,
"grad_norm": 0.6278490509821739,
"learning_rate": 5e-06,
"loss": 0.3507,
"step": 170
},
{
"epoch": 0.4265402843601896,
"grad_norm": 0.8241804964154705,
"learning_rate": 5e-06,
"loss": 0.348,
"step": 180
},
{
"epoch": 0.45023696682464454,
"grad_norm": 0.6555844743575341,
"learning_rate": 5e-06,
"loss": 0.3476,
"step": 190
},
{
"epoch": 0.47393364928909953,
"grad_norm": 0.5422180070121905,
"learning_rate": 5e-06,
"loss": 0.3427,
"step": 200
},
{
"epoch": 0.4976303317535545,
"grad_norm": 0.5207523671441107,
"learning_rate": 5e-06,
"loss": 0.3447,
"step": 210
},
{
"epoch": 0.5213270142180095,
"grad_norm": 0.45313363674774493,
"learning_rate": 5e-06,
"loss": 0.3408,
"step": 220
},
{
"epoch": 0.5450236966824644,
"grad_norm": 0.5162603087798662,
"learning_rate": 5e-06,
"loss": 0.341,
"step": 230
},
{
"epoch": 0.5687203791469194,
"grad_norm": 0.46580112576483534,
"learning_rate": 5e-06,
"loss": 0.3335,
"step": 240
},
{
"epoch": 0.5924170616113744,
"grad_norm": 0.608794869219161,
"learning_rate": 5e-06,
"loss": 0.3345,
"step": 250
},
{
"epoch": 0.6161137440758294,
"grad_norm": 0.523854370493108,
"learning_rate": 5e-06,
"loss": 0.3308,
"step": 260
},
{
"epoch": 0.6398104265402843,
"grad_norm": 0.5706943490018692,
"learning_rate": 5e-06,
"loss": 0.3361,
"step": 270
},
{
"epoch": 0.6635071090047393,
"grad_norm": 0.45300138137727003,
"learning_rate": 5e-06,
"loss": 0.3326,
"step": 280
},
{
"epoch": 0.6872037914691943,
"grad_norm": 0.45102429427517104,
"learning_rate": 5e-06,
"loss": 0.3302,
"step": 290
},
{
"epoch": 0.7109004739336493,
"grad_norm": 0.5137700452326517,
"learning_rate": 5e-06,
"loss": 0.3289,
"step": 300
},
{
"epoch": 0.7345971563981043,
"grad_norm": 0.6127249666946868,
"learning_rate": 5e-06,
"loss": 0.3316,
"step": 310
},
{
"epoch": 0.7582938388625592,
"grad_norm": 0.44932409996775663,
"learning_rate": 5e-06,
"loss": 0.326,
"step": 320
},
{
"epoch": 0.7819905213270142,
"grad_norm": 0.5532587149282668,
"learning_rate": 5e-06,
"loss": 0.3296,
"step": 330
},
{
"epoch": 0.8056872037914692,
"grad_norm": 0.43366234423399613,
"learning_rate": 5e-06,
"loss": 0.3292,
"step": 340
},
{
"epoch": 0.8293838862559242,
"grad_norm": 0.6322058991355771,
"learning_rate": 5e-06,
"loss": 0.3294,
"step": 350
},
{
"epoch": 0.8530805687203792,
"grad_norm": 0.46695206519297977,
"learning_rate": 5e-06,
"loss": 0.3302,
"step": 360
},
{
"epoch": 0.8767772511848341,
"grad_norm": 0.5812611546801564,
"learning_rate": 5e-06,
"loss": 0.3273,
"step": 370
},
{
"epoch": 0.9004739336492891,
"grad_norm": 0.5621804321411954,
"learning_rate": 5e-06,
"loss": 0.3192,
"step": 380
},
{
"epoch": 0.9241706161137441,
"grad_norm": 0.4679602140385812,
"learning_rate": 5e-06,
"loss": 0.3214,
"step": 390
},
{
"epoch": 0.9478672985781991,
"grad_norm": 0.5333407829184633,
"learning_rate": 5e-06,
"loss": 0.3204,
"step": 400
},
{
"epoch": 0.9715639810426541,
"grad_norm": 0.43987965909268334,
"learning_rate": 5e-06,
"loss": 0.3214,
"step": 410
},
{
"epoch": 0.995260663507109,
"grad_norm": 0.6709211874509404,
"learning_rate": 5e-06,
"loss": 0.3182,
"step": 420
},
{
"epoch": 1.0,
"eval_loss": 0.3187981843948364,
"eval_runtime": 41.0925,
"eval_samples_per_second": 276.547,
"eval_steps_per_second": 1.095,
"step": 422
},
{
"epoch": 1.018957345971564,
"grad_norm": 0.5760447436307622,
"learning_rate": 5e-06,
"loss": 0.287,
"step": 430
},
{
"epoch": 1.042654028436019,
"grad_norm": 0.6762165069988788,
"learning_rate": 5e-06,
"loss": 0.2762,
"step": 440
},
{
"epoch": 1.066350710900474,
"grad_norm": 0.47402123924703626,
"learning_rate": 5e-06,
"loss": 0.2734,
"step": 450
},
{
"epoch": 1.0900473933649288,
"grad_norm": 0.5516286942729635,
"learning_rate": 5e-06,
"loss": 0.2769,
"step": 460
},
{
"epoch": 1.113744075829384,
"grad_norm": 0.5287265469429161,
"learning_rate": 5e-06,
"loss": 0.273,
"step": 470
},
{
"epoch": 1.1374407582938388,
"grad_norm": 0.6297802215905517,
"learning_rate": 5e-06,
"loss": 0.2763,
"step": 480
},
{
"epoch": 1.161137440758294,
"grad_norm": 0.5655517444516921,
"learning_rate": 5e-06,
"loss": 0.2779,
"step": 490
},
{
"epoch": 1.1848341232227488,
"grad_norm": 0.4960690379887489,
"learning_rate": 5e-06,
"loss": 0.2745,
"step": 500
},
{
"epoch": 1.2085308056872037,
"grad_norm": 0.4549491415898102,
"learning_rate": 5e-06,
"loss": 0.2754,
"step": 510
},
{
"epoch": 1.2322274881516588,
"grad_norm": 0.4752122430144407,
"learning_rate": 5e-06,
"loss": 0.2731,
"step": 520
},
{
"epoch": 1.2559241706161137,
"grad_norm": 0.4647993470821906,
"learning_rate": 5e-06,
"loss": 0.2739,
"step": 530
},
{
"epoch": 1.2796208530805688,
"grad_norm": 0.5109279201523487,
"learning_rate": 5e-06,
"loss": 0.2714,
"step": 540
},
{
"epoch": 1.3033175355450237,
"grad_norm": 0.5840908685437068,
"learning_rate": 5e-06,
"loss": 0.2737,
"step": 550
},
{
"epoch": 1.3270142180094786,
"grad_norm": 0.5246063574530767,
"learning_rate": 5e-06,
"loss": 0.2751,
"step": 560
},
{
"epoch": 1.3507109004739337,
"grad_norm": 0.44184294378239247,
"learning_rate": 5e-06,
"loss": 0.2745,
"step": 570
},
{
"epoch": 1.3744075829383886,
"grad_norm": 0.5913058737082955,
"learning_rate": 5e-06,
"loss": 0.2734,
"step": 580
},
{
"epoch": 1.3981042654028437,
"grad_norm": 0.5008265601868412,
"learning_rate": 5e-06,
"loss": 0.2738,
"step": 590
},
{
"epoch": 1.4218009478672986,
"grad_norm": 0.49995889605138555,
"learning_rate": 5e-06,
"loss": 0.2744,
"step": 600
},
{
"epoch": 1.4454976303317535,
"grad_norm": 0.45557075455533524,
"learning_rate": 5e-06,
"loss": 0.2721,
"step": 610
},
{
"epoch": 1.4691943127962086,
"grad_norm": 0.6379507686940746,
"learning_rate": 5e-06,
"loss": 0.2732,
"step": 620
},
{
"epoch": 1.4928909952606635,
"grad_norm": 0.4583258359143169,
"learning_rate": 5e-06,
"loss": 0.2725,
"step": 630
},
{
"epoch": 1.5165876777251186,
"grad_norm": 0.5749182699938892,
"learning_rate": 5e-06,
"loss": 0.2665,
"step": 640
},
{
"epoch": 1.5402843601895735,
"grad_norm": 0.5326772009600166,
"learning_rate": 5e-06,
"loss": 0.2697,
"step": 650
},
{
"epoch": 1.5639810426540284,
"grad_norm": 0.5654654670246291,
"learning_rate": 5e-06,
"loss": 0.2673,
"step": 660
},
{
"epoch": 1.5876777251184833,
"grad_norm": 0.7750526195773435,
"learning_rate": 5e-06,
"loss": 0.274,
"step": 670
},
{
"epoch": 1.6113744075829384,
"grad_norm": 0.5315008234518865,
"learning_rate": 5e-06,
"loss": 0.2702,
"step": 680
},
{
"epoch": 1.6350710900473935,
"grad_norm": 0.5749838515161616,
"learning_rate": 5e-06,
"loss": 0.2695,
"step": 690
},
{
"epoch": 1.6587677725118484,
"grad_norm": 0.5001274305498948,
"learning_rate": 5e-06,
"loss": 0.2686,
"step": 700
},
{
"epoch": 1.6824644549763033,
"grad_norm": 0.5123878344164223,
"learning_rate": 5e-06,
"loss": 0.2707,
"step": 710
},
{
"epoch": 1.7061611374407581,
"grad_norm": 0.5614887710303913,
"learning_rate": 5e-06,
"loss": 0.2669,
"step": 720
},
{
"epoch": 1.7298578199052133,
"grad_norm": 0.549426396684614,
"learning_rate": 5e-06,
"loss": 0.2658,
"step": 730
},
{
"epoch": 1.7535545023696684,
"grad_norm": 0.6106476140869552,
"learning_rate": 5e-06,
"loss": 0.2687,
"step": 740
},
{
"epoch": 1.7772511848341233,
"grad_norm": 0.44864012484374616,
"learning_rate": 5e-06,
"loss": 0.2704,
"step": 750
},
{
"epoch": 1.8009478672985781,
"grad_norm": 0.6259517201369036,
"learning_rate": 5e-06,
"loss": 0.2673,
"step": 760
},
{
"epoch": 1.824644549763033,
"grad_norm": 0.5150026298786189,
"learning_rate": 5e-06,
"loss": 0.2693,
"step": 770
},
{
"epoch": 1.8483412322274881,
"grad_norm": 0.5457767160105244,
"learning_rate": 5e-06,
"loss": 0.2652,
"step": 780
},
{
"epoch": 1.8720379146919433,
"grad_norm": 0.4576520211880369,
"learning_rate": 5e-06,
"loss": 0.264,
"step": 790
},
{
"epoch": 1.8957345971563981,
"grad_norm": 0.42780791505078525,
"learning_rate": 5e-06,
"loss": 0.2707,
"step": 800
},
{
"epoch": 1.919431279620853,
"grad_norm": 0.48992225159350095,
"learning_rate": 5e-06,
"loss": 0.2645,
"step": 810
},
{
"epoch": 1.943127962085308,
"grad_norm": 0.5410708029490111,
"learning_rate": 5e-06,
"loss": 0.2627,
"step": 820
},
{
"epoch": 1.966824644549763,
"grad_norm": 0.4336435751671359,
"learning_rate": 5e-06,
"loss": 0.2663,
"step": 830
},
{
"epoch": 1.9905213270142181,
"grad_norm": 0.43877727937635,
"learning_rate": 5e-06,
"loss": 0.2676,
"step": 840
},
{
"epoch": 2.0,
"eval_loss": 0.30145883560180664,
"eval_runtime": 41.7379,
"eval_samples_per_second": 272.27,
"eval_steps_per_second": 1.078,
"step": 844
},
{
"epoch": 2.014218009478673,
"grad_norm": 0.6524222083837387,
"learning_rate": 5e-06,
"loss": 0.2369,
"step": 850
},
{
"epoch": 2.037914691943128,
"grad_norm": 0.5787525251465172,
"learning_rate": 5e-06,
"loss": 0.2195,
"step": 860
},
{
"epoch": 2.061611374407583,
"grad_norm": 0.5756101918178816,
"learning_rate": 5e-06,
"loss": 0.2199,
"step": 870
},
{
"epoch": 2.085308056872038,
"grad_norm": 0.6236522012957664,
"learning_rate": 5e-06,
"loss": 0.2204,
"step": 880
},
{
"epoch": 2.109004739336493,
"grad_norm": 0.6419600675102759,
"learning_rate": 5e-06,
"loss": 0.2207,
"step": 890
},
{
"epoch": 2.132701421800948,
"grad_norm": 0.6691668192668253,
"learning_rate": 5e-06,
"loss": 0.2205,
"step": 900
},
{
"epoch": 2.156398104265403,
"grad_norm": 0.5662613639218179,
"learning_rate": 5e-06,
"loss": 0.2202,
"step": 910
},
{
"epoch": 2.1800947867298577,
"grad_norm": 0.5285961633074623,
"learning_rate": 5e-06,
"loss": 0.2213,
"step": 920
},
{
"epoch": 2.2037914691943126,
"grad_norm": 0.5182873734926402,
"learning_rate": 5e-06,
"loss": 0.2237,
"step": 930
},
{
"epoch": 2.227488151658768,
"grad_norm": 0.455467292575364,
"learning_rate": 5e-06,
"loss": 0.2196,
"step": 940
},
{
"epoch": 2.251184834123223,
"grad_norm": 0.5910920449175188,
"learning_rate": 5e-06,
"loss": 0.2213,
"step": 950
},
{
"epoch": 2.2748815165876777,
"grad_norm": 0.48913238274264176,
"learning_rate": 5e-06,
"loss": 0.22,
"step": 960
},
{
"epoch": 2.2985781990521326,
"grad_norm": 0.48506465561903783,
"learning_rate": 5e-06,
"loss": 0.2209,
"step": 970
},
{
"epoch": 2.322274881516588,
"grad_norm": 0.4777608016950058,
"learning_rate": 5e-06,
"loss": 0.225,
"step": 980
},
{
"epoch": 2.345971563981043,
"grad_norm": 0.5663901860164571,
"learning_rate": 5e-06,
"loss": 0.2236,
"step": 990
},
{
"epoch": 2.3696682464454977,
"grad_norm": 0.5218376695680845,
"learning_rate": 5e-06,
"loss": 0.2216,
"step": 1000
},
{
"epoch": 2.3933649289099526,
"grad_norm": 0.5699143314581089,
"learning_rate": 5e-06,
"loss": 0.2233,
"step": 1010
},
{
"epoch": 2.4170616113744074,
"grad_norm": 0.4742020161888442,
"learning_rate": 5e-06,
"loss": 0.2228,
"step": 1020
},
{
"epoch": 2.4407582938388623,
"grad_norm": 0.5183880143591837,
"learning_rate": 5e-06,
"loss": 0.2226,
"step": 1030
},
{
"epoch": 2.4644549763033177,
"grad_norm": 0.6480574727941906,
"learning_rate": 5e-06,
"loss": 0.2211,
"step": 1040
},
{
"epoch": 2.4881516587677726,
"grad_norm": 0.5046467998532,
"learning_rate": 5e-06,
"loss": 0.2225,
"step": 1050
},
{
"epoch": 2.5118483412322274,
"grad_norm": 0.6234765711742589,
"learning_rate": 5e-06,
"loss": 0.2207,
"step": 1060
},
{
"epoch": 2.5355450236966823,
"grad_norm": 0.5463355131695685,
"learning_rate": 5e-06,
"loss": 0.2224,
"step": 1070
},
{
"epoch": 2.5592417061611377,
"grad_norm": 0.5045207519726893,
"learning_rate": 5e-06,
"loss": 0.2193,
"step": 1080
},
{
"epoch": 2.5829383886255926,
"grad_norm": 0.5385436231173875,
"learning_rate": 5e-06,
"loss": 0.2215,
"step": 1090
},
{
"epoch": 2.6066350710900474,
"grad_norm": 0.5203528739245922,
"learning_rate": 5e-06,
"loss": 0.2239,
"step": 1100
},
{
"epoch": 2.6303317535545023,
"grad_norm": 0.515758932015822,
"learning_rate": 5e-06,
"loss": 0.225,
"step": 1110
},
{
"epoch": 2.654028436018957,
"grad_norm": 0.5245283280931154,
"learning_rate": 5e-06,
"loss": 0.2217,
"step": 1120
},
{
"epoch": 2.677725118483412,
"grad_norm": 0.47961811011847694,
"learning_rate": 5e-06,
"loss": 0.2223,
"step": 1130
},
{
"epoch": 2.7014218009478674,
"grad_norm": 0.532667073838913,
"learning_rate": 5e-06,
"loss": 0.2202,
"step": 1140
},
{
"epoch": 2.7251184834123223,
"grad_norm": 0.4731691182426918,
"learning_rate": 5e-06,
"loss": 0.2203,
"step": 1150
},
{
"epoch": 2.748815165876777,
"grad_norm": 0.5085530590505501,
"learning_rate": 5e-06,
"loss": 0.2195,
"step": 1160
},
{
"epoch": 2.772511848341232,
"grad_norm": 0.5212827222451933,
"learning_rate": 5e-06,
"loss": 0.2201,
"step": 1170
},
{
"epoch": 2.7962085308056874,
"grad_norm": 0.6101309650612445,
"learning_rate": 5e-06,
"loss": 0.2241,
"step": 1180
},
{
"epoch": 2.8199052132701423,
"grad_norm": 0.524976392694722,
"learning_rate": 5e-06,
"loss": 0.2185,
"step": 1190
},
{
"epoch": 2.843601895734597,
"grad_norm": 0.5268018676437338,
"learning_rate": 5e-06,
"loss": 0.2218,
"step": 1200
},
{
"epoch": 2.867298578199052,
"grad_norm": 0.4815524564659738,
"learning_rate": 5e-06,
"loss": 0.2211,
"step": 1210
},
{
"epoch": 2.890995260663507,
"grad_norm": 0.5166341199930529,
"learning_rate": 5e-06,
"loss": 0.2194,
"step": 1220
},
{
"epoch": 2.914691943127962,
"grad_norm": 0.4522595493688504,
"learning_rate": 5e-06,
"loss": 0.2258,
"step": 1230
},
{
"epoch": 2.938388625592417,
"grad_norm": 0.5232915579904739,
"learning_rate": 5e-06,
"loss": 0.2218,
"step": 1240
},
{
"epoch": 2.962085308056872,
"grad_norm": 0.5580173773855611,
"learning_rate": 5e-06,
"loss": 0.2211,
"step": 1250
},
{
"epoch": 2.985781990521327,
"grad_norm": 0.5398857489449794,
"learning_rate": 5e-06,
"loss": 0.2241,
"step": 1260
},
{
"epoch": 3.0,
"eval_loss": 0.30088362097740173,
"eval_runtime": 40.2088,
"eval_samples_per_second": 282.624,
"eval_steps_per_second": 1.119,
"step": 1266
},
{
"epoch": 3.0,
"step": 1266,
"total_flos": 2120597152727040.0,
"train_loss": 0.2836323574444317,
"train_runtime": 7986.5453,
"train_samples_per_second": 81.101,
"train_steps_per_second": 0.159
}
],
"logging_steps": 10,
"max_steps": 1266,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2120597152727040.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}