borakaragul's picture
Upload folder using huggingface_hub
8488d55 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 100,
"global_step": 833,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.012004801920768308,
"grad_norm": 0.49814867973327637,
"learning_rate": 9.879951980792317e-05,
"loss": 2.3071,
"step": 10
},
{
"epoch": 0.024009603841536616,
"grad_norm": 0.4931434690952301,
"learning_rate": 9.759903961584634e-05,
"loss": 2.1442,
"step": 20
},
{
"epoch": 0.03601440576230492,
"grad_norm": 0.9165758490562439,
"learning_rate": 9.639855942376951e-05,
"loss": 2.0314,
"step": 30
},
{
"epoch": 0.04801920768307323,
"grad_norm": 0.4459473192691803,
"learning_rate": 9.519807923169268e-05,
"loss": 2.0196,
"step": 40
},
{
"epoch": 0.060024009603841535,
"grad_norm": 0.7976562976837158,
"learning_rate": 9.399759903961585e-05,
"loss": 1.9785,
"step": 50
},
{
"epoch": 0.07202881152460984,
"grad_norm": 0.6203379034996033,
"learning_rate": 9.279711884753903e-05,
"loss": 1.9363,
"step": 60
},
{
"epoch": 0.08403361344537816,
"grad_norm": 0.5313595533370972,
"learning_rate": 9.159663865546218e-05,
"loss": 1.9209,
"step": 70
},
{
"epoch": 0.09603841536614646,
"grad_norm": 0.6249675750732422,
"learning_rate": 9.039615846338536e-05,
"loss": 1.986,
"step": 80
},
{
"epoch": 0.10804321728691477,
"grad_norm": 0.5185940265655518,
"learning_rate": 8.919567827130852e-05,
"loss": 1.9521,
"step": 90
},
{
"epoch": 0.12004801920768307,
"grad_norm": 0.4526331126689911,
"learning_rate": 8.79951980792317e-05,
"loss": 1.8528,
"step": 100
},
{
"epoch": 0.12004801920768307,
"eval_loss": 1.8883458375930786,
"eval_runtime": 38.5366,
"eval_samples_per_second": 25.949,
"eval_steps_per_second": 4.334,
"step": 100
},
{
"epoch": 0.13205282112845138,
"grad_norm": 0.3635704815387726,
"learning_rate": 8.679471788715487e-05,
"loss": 1.8359,
"step": 110
},
{
"epoch": 0.14405762304921968,
"grad_norm": 0.4598451554775238,
"learning_rate": 8.559423769507804e-05,
"loss": 1.9101,
"step": 120
},
{
"epoch": 0.15606242496998798,
"grad_norm": 0.43790119886398315,
"learning_rate": 8.43937575030012e-05,
"loss": 1.8116,
"step": 130
},
{
"epoch": 0.16806722689075632,
"grad_norm": 0.43906617164611816,
"learning_rate": 8.319327731092437e-05,
"loss": 1.8962,
"step": 140
},
{
"epoch": 0.18007202881152462,
"grad_norm": 0.43742913007736206,
"learning_rate": 8.199279711884754e-05,
"loss": 1.8382,
"step": 150
},
{
"epoch": 0.19207683073229292,
"grad_norm": 0.4784979224205017,
"learning_rate": 8.079231692677071e-05,
"loss": 1.8842,
"step": 160
},
{
"epoch": 0.20408163265306123,
"grad_norm": 0.4014301598072052,
"learning_rate": 7.959183673469388e-05,
"loss": 1.863,
"step": 170
},
{
"epoch": 0.21608643457382953,
"grad_norm": 0.5035635828971863,
"learning_rate": 7.839135654261706e-05,
"loss": 1.8979,
"step": 180
},
{
"epoch": 0.22809123649459784,
"grad_norm": 0.534414529800415,
"learning_rate": 7.719087635054022e-05,
"loss": 1.9105,
"step": 190
},
{
"epoch": 0.24009603841536614,
"grad_norm": 0.4861285388469696,
"learning_rate": 7.599039615846338e-05,
"loss": 1.7796,
"step": 200
},
{
"epoch": 0.24009603841536614,
"eval_loss": 1.8688795566558838,
"eval_runtime": 38.5301,
"eval_samples_per_second": 25.954,
"eval_steps_per_second": 4.334,
"step": 200
},
{
"epoch": 0.25210084033613445,
"grad_norm": 0.34365198016166687,
"learning_rate": 7.478991596638657e-05,
"loss": 1.84,
"step": 210
},
{
"epoch": 0.26410564225690275,
"grad_norm": 0.42961230874061584,
"learning_rate": 7.358943577430972e-05,
"loss": 1.9065,
"step": 220
},
{
"epoch": 0.27611044417767105,
"grad_norm": 0.389285683631897,
"learning_rate": 7.23889555822329e-05,
"loss": 1.8709,
"step": 230
},
{
"epoch": 0.28811524609843936,
"grad_norm": 0.4626094698905945,
"learning_rate": 7.118847539015606e-05,
"loss": 1.8709,
"step": 240
},
{
"epoch": 0.30012004801920766,
"grad_norm": 0.46512117981910706,
"learning_rate": 6.998799519807924e-05,
"loss": 1.8201,
"step": 250
},
{
"epoch": 0.31212484993997597,
"grad_norm": 0.5081538558006287,
"learning_rate": 6.878751500600241e-05,
"loss": 1.8761,
"step": 260
},
{
"epoch": 0.3241296518607443,
"grad_norm": 0.7665489315986633,
"learning_rate": 6.758703481392558e-05,
"loss": 1.8218,
"step": 270
},
{
"epoch": 0.33613445378151263,
"grad_norm": 0.4920073449611664,
"learning_rate": 6.638655462184874e-05,
"loss": 1.9478,
"step": 280
},
{
"epoch": 0.34813925570228094,
"grad_norm": 0.5283176302909851,
"learning_rate": 6.518607442977191e-05,
"loss": 1.8343,
"step": 290
},
{
"epoch": 0.36014405762304924,
"grad_norm": 0.5958182215690613,
"learning_rate": 6.398559423769508e-05,
"loss": 1.8715,
"step": 300
},
{
"epoch": 0.36014405762304924,
"eval_loss": 1.8636847734451294,
"eval_runtime": 38.4834,
"eval_samples_per_second": 25.985,
"eval_steps_per_second": 4.34,
"step": 300
},
{
"epoch": 0.37214885954381755,
"grad_norm": 0.4687075912952423,
"learning_rate": 6.278511404561825e-05,
"loss": 1.8762,
"step": 310
},
{
"epoch": 0.38415366146458585,
"grad_norm": 0.5822851657867432,
"learning_rate": 6.158463385354142e-05,
"loss": 1.8943,
"step": 320
},
{
"epoch": 0.39615846338535415,
"grad_norm": 0.3719615042209625,
"learning_rate": 6.038415366146459e-05,
"loss": 1.8973,
"step": 330
},
{
"epoch": 0.40816326530612246,
"grad_norm": 0.36546796560287476,
"learning_rate": 5.918367346938776e-05,
"loss": 1.894,
"step": 340
},
{
"epoch": 0.42016806722689076,
"grad_norm": 0.38791441917419434,
"learning_rate": 5.7983193277310935e-05,
"loss": 1.8221,
"step": 350
},
{
"epoch": 0.43217286914765907,
"grad_norm": 0.40212976932525635,
"learning_rate": 5.6782713085234096e-05,
"loss": 1.8472,
"step": 360
},
{
"epoch": 0.44417767106842737,
"grad_norm": 0.43383002281188965,
"learning_rate": 5.558223289315727e-05,
"loss": 1.9356,
"step": 370
},
{
"epoch": 0.4561824729891957,
"grad_norm": 0.7097159028053284,
"learning_rate": 5.438175270108043e-05,
"loss": 1.9236,
"step": 380
},
{
"epoch": 0.468187274909964,
"grad_norm": 0.9919241666793823,
"learning_rate": 5.31812725090036e-05,
"loss": 1.8988,
"step": 390
},
{
"epoch": 0.4801920768307323,
"grad_norm": 0.365247517824173,
"learning_rate": 5.1980792316926776e-05,
"loss": 1.9492,
"step": 400
},
{
"epoch": 0.4801920768307323,
"eval_loss": 1.8609040975570679,
"eval_runtime": 38.8163,
"eval_samples_per_second": 25.762,
"eval_steps_per_second": 4.302,
"step": 400
},
{
"epoch": 0.4921968787515006,
"grad_norm": 0.44502708315849304,
"learning_rate": 5.078031212484994e-05,
"loss": 1.8592,
"step": 410
},
{
"epoch": 0.5042016806722689,
"grad_norm": 0.5130265951156616,
"learning_rate": 4.957983193277311e-05,
"loss": 1.9291,
"step": 420
},
{
"epoch": 0.5162064825930373,
"grad_norm": 0.4663325250148773,
"learning_rate": 4.837935174069628e-05,
"loss": 1.858,
"step": 430
},
{
"epoch": 0.5282112845138055,
"grad_norm": 0.7875126600265503,
"learning_rate": 4.717887154861945e-05,
"loss": 1.882,
"step": 440
},
{
"epoch": 0.5402160864345739,
"grad_norm": 0.5712985396385193,
"learning_rate": 4.5978391356542624e-05,
"loss": 1.8705,
"step": 450
},
{
"epoch": 0.5522208883553421,
"grad_norm": 0.48575717210769653,
"learning_rate": 4.477791116446579e-05,
"loss": 1.9747,
"step": 460
},
{
"epoch": 0.5642256902761105,
"grad_norm": 0.521267831325531,
"learning_rate": 4.3577430972388954e-05,
"loss": 1.8128,
"step": 470
},
{
"epoch": 0.5762304921968787,
"grad_norm": 0.43215957283973694,
"learning_rate": 4.237695078031212e-05,
"loss": 1.8486,
"step": 480
},
{
"epoch": 0.5882352941176471,
"grad_norm": 0.4327848255634308,
"learning_rate": 4.11764705882353e-05,
"loss": 1.8379,
"step": 490
},
{
"epoch": 0.6002400960384153,
"grad_norm": 0.5328503847122192,
"learning_rate": 3.9975990396158466e-05,
"loss": 1.8718,
"step": 500
},
{
"epoch": 0.6002400960384153,
"eval_loss": 1.8586153984069824,
"eval_runtime": 38.4963,
"eval_samples_per_second": 25.977,
"eval_steps_per_second": 4.338,
"step": 500
},
{
"epoch": 0.6122448979591837,
"grad_norm": 0.5828935503959656,
"learning_rate": 3.8775510204081634e-05,
"loss": 1.8329,
"step": 510
},
{
"epoch": 0.6242496998799519,
"grad_norm": 0.7918308973312378,
"learning_rate": 3.75750300120048e-05,
"loss": 1.8729,
"step": 520
},
{
"epoch": 0.6362545018007203,
"grad_norm": 0.40867704153060913,
"learning_rate": 3.637454981992797e-05,
"loss": 1.8365,
"step": 530
},
{
"epoch": 0.6482593037214885,
"grad_norm": 0.426268994808197,
"learning_rate": 3.517406962785114e-05,
"loss": 1.8463,
"step": 540
},
{
"epoch": 0.6602641056422569,
"grad_norm": 0.8128913044929504,
"learning_rate": 3.3973589435774314e-05,
"loss": 1.9473,
"step": 550
},
{
"epoch": 0.6722689075630253,
"grad_norm": 0.35315391421318054,
"learning_rate": 3.277310924369748e-05,
"loss": 1.8123,
"step": 560
},
{
"epoch": 0.6842737094837935,
"grad_norm": 0.5528764724731445,
"learning_rate": 3.157262905162065e-05,
"loss": 1.8663,
"step": 570
},
{
"epoch": 0.6962785114045619,
"grad_norm": 0.4432496726512909,
"learning_rate": 3.037214885954382e-05,
"loss": 1.8923,
"step": 580
},
{
"epoch": 0.7082833133253301,
"grad_norm": 0.415711373090744,
"learning_rate": 2.917166866746699e-05,
"loss": 1.8289,
"step": 590
},
{
"epoch": 0.7202881152460985,
"grad_norm": 0.42969274520874023,
"learning_rate": 2.797118847539016e-05,
"loss": 1.8472,
"step": 600
},
{
"epoch": 0.7202881152460985,
"eval_loss": 1.8567453622817993,
"eval_runtime": 38.6325,
"eval_samples_per_second": 25.885,
"eval_steps_per_second": 4.323,
"step": 600
},
{
"epoch": 0.7322929171668667,
"grad_norm": 0.5071336030960083,
"learning_rate": 2.6770708283313327e-05,
"loss": 1.8568,
"step": 610
},
{
"epoch": 0.7442977190876351,
"grad_norm": 0.4259433150291443,
"learning_rate": 2.5570228091236498e-05,
"loss": 1.8289,
"step": 620
},
{
"epoch": 0.7563025210084033,
"grad_norm": 0.522114634513855,
"learning_rate": 2.4369747899159663e-05,
"loss": 1.9236,
"step": 630
},
{
"epoch": 0.7683073229291717,
"grad_norm": 0.4996345341205597,
"learning_rate": 2.3169267707082835e-05,
"loss": 1.8676,
"step": 640
},
{
"epoch": 0.78031212484994,
"grad_norm": 0.5358065962791443,
"learning_rate": 2.1968787515006003e-05,
"loss": 1.9048,
"step": 650
},
{
"epoch": 0.7923169267707083,
"grad_norm": 0.4080793261528015,
"learning_rate": 2.076830732292917e-05,
"loss": 1.9375,
"step": 660
},
{
"epoch": 0.8043217286914766,
"grad_norm": 0.4291868209838867,
"learning_rate": 1.9567827130852343e-05,
"loss": 1.8131,
"step": 670
},
{
"epoch": 0.8163265306122449,
"grad_norm": 0.4137159287929535,
"learning_rate": 1.836734693877551e-05,
"loss": 1.8438,
"step": 680
},
{
"epoch": 0.8283313325330132,
"grad_norm": 0.46769535541534424,
"learning_rate": 1.7166866746698683e-05,
"loss": 1.8608,
"step": 690
},
{
"epoch": 0.8403361344537815,
"grad_norm": 0.3832717537879944,
"learning_rate": 1.5966386554621848e-05,
"loss": 1.7676,
"step": 700
},
{
"epoch": 0.8403361344537815,
"eval_loss": 1.8551292419433594,
"eval_runtime": 38.7937,
"eval_samples_per_second": 25.777,
"eval_steps_per_second": 4.305,
"step": 700
},
{
"epoch": 0.8523409363745498,
"grad_norm": 0.6028046011924744,
"learning_rate": 1.4765906362545018e-05,
"loss": 1.8747,
"step": 710
},
{
"epoch": 0.8643457382953181,
"grad_norm": 0.4539136290550232,
"learning_rate": 1.3565426170468188e-05,
"loss": 1.8388,
"step": 720
},
{
"epoch": 0.8763505402160864,
"grad_norm": 0.5354043245315552,
"learning_rate": 1.2364945978391356e-05,
"loss": 1.8492,
"step": 730
},
{
"epoch": 0.8883553421368547,
"grad_norm": 0.5466801524162292,
"learning_rate": 1.1164465786314526e-05,
"loss": 1.8355,
"step": 740
},
{
"epoch": 0.9003601440576231,
"grad_norm": 0.7095188498497009,
"learning_rate": 9.963985594237696e-06,
"loss": 1.8558,
"step": 750
},
{
"epoch": 0.9123649459783914,
"grad_norm": 0.5179396867752075,
"learning_rate": 8.763505402160866e-06,
"loss": 1.9008,
"step": 760
},
{
"epoch": 0.9243697478991597,
"grad_norm": 0.41170573234558105,
"learning_rate": 7.563025210084033e-06,
"loss": 1.772,
"step": 770
},
{
"epoch": 0.936374549819928,
"grad_norm": 0.4213010370731354,
"learning_rate": 6.362545018007203e-06,
"loss": 1.8478,
"step": 780
},
{
"epoch": 0.9483793517406963,
"grad_norm": 0.42276838421821594,
"learning_rate": 5.162064825930372e-06,
"loss": 1.8858,
"step": 790
},
{
"epoch": 0.9603841536614646,
"grad_norm": 0.4341106712818146,
"learning_rate": 3.9615846338535415e-06,
"loss": 1.8898,
"step": 800
},
{
"epoch": 0.9603841536614646,
"eval_loss": 1.8548470735549927,
"eval_runtime": 38.7458,
"eval_samples_per_second": 25.809,
"eval_steps_per_second": 4.31,
"step": 800
},
{
"epoch": 0.9723889555822329,
"grad_norm": 0.42987948656082153,
"learning_rate": 2.7611044417767106e-06,
"loss": 1.8521,
"step": 810
},
{
"epoch": 0.9843937575030012,
"grad_norm": 0.5700000524520874,
"learning_rate": 1.56062424969988e-06,
"loss": 1.8504,
"step": 820
},
{
"epoch": 0.9963985594237695,
"grad_norm": 0.4523591101169586,
"learning_rate": 3.601440576230492e-07,
"loss": 1.9274,
"step": 830
}
],
"logging_steps": 10,
"max_steps": 833,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.3742818208228966e+17,
"train_batch_size": 12,
"trial_name": null,
"trial_params": null
}