agentlans's picture
Upload 13 files
a6ca6ab verified
{
"best_metric": 0.02727937512099743,
"best_model_checkpoint": "deberta-v3-xsmall-zyda-2-transformed-readability-new/checkpoint-40767",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 40767,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03679446611229671,
"grad_norm": 1.7569845914840698,
"learning_rate": 4.9386758898128385e-05,
"loss": 0.0913,
"step": 500
},
{
"epoch": 0.07358893222459342,
"grad_norm": 1.4311065673828125,
"learning_rate": 4.877351779625678e-05,
"loss": 0.0531,
"step": 1000
},
{
"epoch": 0.11038339833689013,
"grad_norm": 0.627588152885437,
"learning_rate": 4.8160276694385164e-05,
"loss": 0.0463,
"step": 1500
},
{
"epoch": 0.14717786444918685,
"grad_norm": 1.7137391567230225,
"learning_rate": 4.754703559251355e-05,
"loss": 0.0419,
"step": 2000
},
{
"epoch": 0.18397233056148354,
"grad_norm": 0.31713053584098816,
"learning_rate": 4.693379449064194e-05,
"loss": 0.0404,
"step": 2500
},
{
"epoch": 0.22076679667378027,
"grad_norm": 0.9696204662322998,
"learning_rate": 4.632055338877033e-05,
"loss": 0.0389,
"step": 3000
},
{
"epoch": 0.257561262786077,
"grad_norm": 0.7807271480560303,
"learning_rate": 4.570731228689872e-05,
"loss": 0.0383,
"step": 3500
},
{
"epoch": 0.2943557288983737,
"grad_norm": 0.7852717638015747,
"learning_rate": 4.509407118502711e-05,
"loss": 0.0369,
"step": 4000
},
{
"epoch": 0.3311501950106704,
"grad_norm": 0.4416508674621582,
"learning_rate": 4.448083008315549e-05,
"loss": 0.0363,
"step": 4500
},
{
"epoch": 0.3679446611229671,
"grad_norm": 0.6548042297363281,
"learning_rate": 4.386758898128389e-05,
"loss": 0.0347,
"step": 5000
},
{
"epoch": 0.40473912723526384,
"grad_norm": 0.4651222825050354,
"learning_rate": 4.325434787941227e-05,
"loss": 0.0349,
"step": 5500
},
{
"epoch": 0.44153359334756054,
"grad_norm": 0.6483996510505676,
"learning_rate": 4.264110677754066e-05,
"loss": 0.0332,
"step": 6000
},
{
"epoch": 0.47832805945985724,
"grad_norm": 0.6971497535705566,
"learning_rate": 4.202786567566905e-05,
"loss": 0.0339,
"step": 6500
},
{
"epoch": 0.515122525572154,
"grad_norm": 0.6818335652351379,
"learning_rate": 4.141462457379743e-05,
"loss": 0.033,
"step": 7000
},
{
"epoch": 0.5519169916844506,
"grad_norm": 0.26788613200187683,
"learning_rate": 4.080138347192582e-05,
"loss": 0.0324,
"step": 7500
},
{
"epoch": 0.5887114577967474,
"grad_norm": 0.5651209950447083,
"learning_rate": 4.018814237005421e-05,
"loss": 0.0327,
"step": 8000
},
{
"epoch": 0.625505923909044,
"grad_norm": 0.526560366153717,
"learning_rate": 3.95749012681826e-05,
"loss": 0.0323,
"step": 8500
},
{
"epoch": 0.6623003900213408,
"grad_norm": 0.5062350630760193,
"learning_rate": 3.896166016631099e-05,
"loss": 0.0317,
"step": 9000
},
{
"epoch": 0.6990948561336375,
"grad_norm": 1.0407236814498901,
"learning_rate": 3.834841906443938e-05,
"loss": 0.0313,
"step": 9500
},
{
"epoch": 0.7358893222459342,
"grad_norm": 0.2916816473007202,
"learning_rate": 3.773517796256776e-05,
"loss": 0.0309,
"step": 10000
},
{
"epoch": 0.7726837883582309,
"grad_norm": 0.6440730690956116,
"learning_rate": 3.712193686069616e-05,
"loss": 0.031,
"step": 10500
},
{
"epoch": 0.8094782544705277,
"grad_norm": 0.3350902199745178,
"learning_rate": 3.650869575882454e-05,
"loss": 0.03,
"step": 11000
},
{
"epoch": 0.8462727205828243,
"grad_norm": 0.40470924973487854,
"learning_rate": 3.589545465695293e-05,
"loss": 0.0303,
"step": 11500
},
{
"epoch": 0.8830671866951211,
"grad_norm": 0.5961210131645203,
"learning_rate": 3.528221355508132e-05,
"loss": 0.0298,
"step": 12000
},
{
"epoch": 0.9198616528074177,
"grad_norm": 0.36755993962287903,
"learning_rate": 3.466897245320971e-05,
"loss": 0.0303,
"step": 12500
},
{
"epoch": 0.9566561189197145,
"grad_norm": 0.3088076114654541,
"learning_rate": 3.405573135133809e-05,
"loss": 0.03,
"step": 13000
},
{
"epoch": 0.9934505850320112,
"grad_norm": 0.4544014632701874,
"learning_rate": 3.344249024946648e-05,
"loss": 0.0297,
"step": 13500
},
{
"epoch": 1.0,
"eval_loss": 0.03021918050944805,
"eval_mse": 0.03021917968962894,
"eval_runtime": 50.6672,
"eval_samples_per_second": 986.831,
"eval_steps_per_second": 123.354,
"step": 13589
},
{
"epoch": 1.030245051144308,
"grad_norm": 0.4768720269203186,
"learning_rate": 3.282924914759487e-05,
"loss": 0.0272,
"step": 14000
},
{
"epoch": 1.0670395172566045,
"grad_norm": 0.4547971189022064,
"learning_rate": 3.221600804572326e-05,
"loss": 0.0268,
"step": 14500
},
{
"epoch": 1.1038339833689013,
"grad_norm": 0.3559289276599884,
"learning_rate": 3.160276694385165e-05,
"loss": 0.0259,
"step": 15000
},
{
"epoch": 1.140628449481198,
"grad_norm": 0.5827292203903198,
"learning_rate": 3.098952584198003e-05,
"loss": 0.0267,
"step": 15500
},
{
"epoch": 1.1774229155934948,
"grad_norm": 0.46800583600997925,
"learning_rate": 3.0376284740108423e-05,
"loss": 0.0265,
"step": 16000
},
{
"epoch": 1.2142173817057915,
"grad_norm": 0.39976394176483154,
"learning_rate": 2.976304363823681e-05,
"loss": 0.0267,
"step": 16500
},
{
"epoch": 1.2510118478180883,
"grad_norm": 0.3947591185569763,
"learning_rate": 2.91498025363652e-05,
"loss": 0.0272,
"step": 17000
},
{
"epoch": 1.2878063139303848,
"grad_norm": 0.2981387674808502,
"learning_rate": 2.8536561434493587e-05,
"loss": 0.0262,
"step": 17500
},
{
"epoch": 1.3246007800426816,
"grad_norm": 0.6237615346908569,
"learning_rate": 2.7923320332621977e-05,
"loss": 0.0268,
"step": 18000
},
{
"epoch": 1.3613952461549783,
"grad_norm": 0.6127618551254272,
"learning_rate": 2.7310079230750363e-05,
"loss": 0.0265,
"step": 18500
},
{
"epoch": 1.398189712267275,
"grad_norm": 0.4457475244998932,
"learning_rate": 2.6696838128878755e-05,
"loss": 0.0266,
"step": 19000
},
{
"epoch": 1.4349841783795716,
"grad_norm": 0.32558730244636536,
"learning_rate": 2.6083597027007138e-05,
"loss": 0.0265,
"step": 19500
},
{
"epoch": 1.4717786444918683,
"grad_norm": 0.321478009223938,
"learning_rate": 2.5470355925135524e-05,
"loss": 0.0262,
"step": 20000
},
{
"epoch": 1.508573110604165,
"grad_norm": 0.42566895484924316,
"learning_rate": 2.4857114823263916e-05,
"loss": 0.0256,
"step": 20500
},
{
"epoch": 1.5453675767164619,
"grad_norm": 0.382902592420578,
"learning_rate": 2.4243873721392306e-05,
"loss": 0.0264,
"step": 21000
},
{
"epoch": 1.5821620428287586,
"grad_norm": 0.45045822858810425,
"learning_rate": 2.3630632619520692e-05,
"loss": 0.0253,
"step": 21500
},
{
"epoch": 1.6189565089410554,
"grad_norm": 0.317402184009552,
"learning_rate": 2.301739151764908e-05,
"loss": 0.0252,
"step": 22000
},
{
"epoch": 1.6557509750533521,
"grad_norm": 0.43501928448677063,
"learning_rate": 2.2404150415777467e-05,
"loss": 0.0253,
"step": 22500
},
{
"epoch": 1.6925454411656486,
"grad_norm": 0.20946815609931946,
"learning_rate": 2.1790909313905856e-05,
"loss": 0.0256,
"step": 23000
},
{
"epoch": 1.7293399072779454,
"grad_norm": 0.4793068766593933,
"learning_rate": 2.1177668212034242e-05,
"loss": 0.0253,
"step": 23500
},
{
"epoch": 1.7661343733902422,
"grad_norm": 0.5443638563156128,
"learning_rate": 2.056442711016263e-05,
"loss": 0.0253,
"step": 24000
},
{
"epoch": 1.8029288395025387,
"grad_norm": 0.8820445537567139,
"learning_rate": 1.995118600829102e-05,
"loss": 0.0248,
"step": 24500
},
{
"epoch": 1.8397233056148354,
"grad_norm": 0.6084474921226501,
"learning_rate": 1.933794490641941e-05,
"loss": 0.0251,
"step": 25000
},
{
"epoch": 1.8765177717271322,
"grad_norm": 0.42228248715400696,
"learning_rate": 1.8724703804547796e-05,
"loss": 0.025,
"step": 25500
},
{
"epoch": 1.913312237839429,
"grad_norm": 0.4069421887397766,
"learning_rate": 1.8111462702676185e-05,
"loss": 0.0246,
"step": 26000
},
{
"epoch": 1.9501067039517257,
"grad_norm": 0.5044609904289246,
"learning_rate": 1.7498221600804575e-05,
"loss": 0.0249,
"step": 26500
},
{
"epoch": 1.9869011700640224,
"grad_norm": 0.25344032049179077,
"learning_rate": 1.688498049893296e-05,
"loss": 0.0249,
"step": 27000
},
{
"epoch": 2.0,
"eval_loss": 0.02789381518959999,
"eval_mse": 0.027893818228878035,
"eval_runtime": 58.0401,
"eval_samples_per_second": 861.474,
"eval_steps_per_second": 107.684,
"step": 27178
},
{
"epoch": 2.023695636176319,
"grad_norm": 0.2608506679534912,
"learning_rate": 1.627173939706135e-05,
"loss": 0.023,
"step": 27500
},
{
"epoch": 2.060490102288616,
"grad_norm": 0.25890249013900757,
"learning_rate": 1.565849829518974e-05,
"loss": 0.0226,
"step": 28000
},
{
"epoch": 2.0972845684009127,
"grad_norm": 0.577273428440094,
"learning_rate": 1.5045257193318127e-05,
"loss": 0.0225,
"step": 28500
},
{
"epoch": 2.134079034513209,
"grad_norm": 0.3473275303840637,
"learning_rate": 1.4432016091446513e-05,
"loss": 0.0228,
"step": 29000
},
{
"epoch": 2.1708735006255058,
"grad_norm": 0.2392967790365219,
"learning_rate": 1.38187749895749e-05,
"loss": 0.0223,
"step": 29500
},
{
"epoch": 2.2076679667378025,
"grad_norm": 0.38996148109436035,
"learning_rate": 1.320553388770329e-05,
"loss": 0.0225,
"step": 30000
},
{
"epoch": 2.2444624328500993,
"grad_norm": 0.8742114305496216,
"learning_rate": 1.2592292785831677e-05,
"loss": 0.0227,
"step": 30500
},
{
"epoch": 2.281256898962396,
"grad_norm": 0.2552475929260254,
"learning_rate": 1.1979051683960066e-05,
"loss": 0.0223,
"step": 31000
},
{
"epoch": 2.318051365074693,
"grad_norm": 0.3917369842529297,
"learning_rate": 1.1365810582088454e-05,
"loss": 0.0222,
"step": 31500
},
{
"epoch": 2.3548458311869895,
"grad_norm": 0.31826546788215637,
"learning_rate": 1.0752569480216842e-05,
"loss": 0.0221,
"step": 32000
},
{
"epoch": 2.3916402972992863,
"grad_norm": 0.3128163516521454,
"learning_rate": 1.0139328378345231e-05,
"loss": 0.0226,
"step": 32500
},
{
"epoch": 2.428434763411583,
"grad_norm": 0.4479590058326721,
"learning_rate": 9.526087276473619e-06,
"loss": 0.0222,
"step": 33000
},
{
"epoch": 2.46522922952388,
"grad_norm": 0.2909683883190155,
"learning_rate": 8.912846174602008e-06,
"loss": 0.0217,
"step": 33500
},
{
"epoch": 2.5020236956361765,
"grad_norm": 0.30918726325035095,
"learning_rate": 8.299605072730394e-06,
"loss": 0.0222,
"step": 34000
},
{
"epoch": 2.5388181617484733,
"grad_norm": 0.36887994408607483,
"learning_rate": 7.686363970858783e-06,
"loss": 0.0217,
"step": 34500
},
{
"epoch": 2.5756126278607696,
"grad_norm": 0.38799503445625305,
"learning_rate": 7.073122868987171e-06,
"loss": 0.0227,
"step": 35000
},
{
"epoch": 2.6124070939730664,
"grad_norm": 0.21477651596069336,
"learning_rate": 6.459881767115559e-06,
"loss": 0.0219,
"step": 35500
},
{
"epoch": 2.649201560085363,
"grad_norm": 0.367152601480484,
"learning_rate": 5.846640665243948e-06,
"loss": 0.0219,
"step": 36000
},
{
"epoch": 2.68599602619766,
"grad_norm": 0.2770203649997711,
"learning_rate": 5.233399563372335e-06,
"loss": 0.022,
"step": 36500
},
{
"epoch": 2.7227904923099566,
"grad_norm": 0.31007689237594604,
"learning_rate": 4.620158461500724e-06,
"loss": 0.022,
"step": 37000
},
{
"epoch": 2.7595849584222534,
"grad_norm": 0.27211979031562805,
"learning_rate": 4.006917359629112e-06,
"loss": 0.0217,
"step": 37500
},
{
"epoch": 2.79637942453455,
"grad_norm": 0.36633121967315674,
"learning_rate": 3.3936762577575e-06,
"loss": 0.022,
"step": 38000
},
{
"epoch": 2.8331738906468464,
"grad_norm": 0.3557955324649811,
"learning_rate": 2.7804351558858883e-06,
"loss": 0.0211,
"step": 38500
},
{
"epoch": 2.869968356759143,
"grad_norm": 0.2332288920879364,
"learning_rate": 2.1671940540142763e-06,
"loss": 0.0218,
"step": 39000
},
{
"epoch": 2.90676282287144,
"grad_norm": 0.179446280002594,
"learning_rate": 1.5539529521426646e-06,
"loss": 0.0213,
"step": 39500
},
{
"epoch": 2.9435572889837367,
"grad_norm": 0.7559341192245483,
"learning_rate": 9.407118502710525e-07,
"loss": 0.0215,
"step": 40000
},
{
"epoch": 2.9803517550960335,
"grad_norm": 0.2412876933813095,
"learning_rate": 3.2747074839944075e-07,
"loss": 0.0218,
"step": 40500
},
{
"epoch": 3.0,
"eval_loss": 0.02727937512099743,
"eval_mse": 0.027279374637490018,
"eval_runtime": 54.5217,
"eval_samples_per_second": 917.066,
"eval_steps_per_second": 114.633,
"step": 40767
},
{
"epoch": 3.0,
"step": 40767,
"total_flos": 4.296607448400461e+16,
"train_loss": 0.028245126846964373,
"train_runtime": 5158.77,
"train_samples_per_second": 505.739,
"train_steps_per_second": 7.902
}
],
"logging_steps": 500,
"max_steps": 40767,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.296607448400461e+16,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}