model / trainer_state.json
blazarev's picture
Model save
f4fc65a verified
raw
history blame
60.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.99775617053104,
"eval_steps": 100,
"global_step": 1002,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0029917726252804786,
"grad_norm": 12.0,
"learning_rate": 4.95049504950495e-09,
"logits/chosen": -17.95128631591797,
"logits/rejected": -17.9676513671875,
"logps/chosen": -470.5423889160156,
"logps/rejected": -476.4247131347656,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.029917726252804786,
"grad_norm": 12.75,
"learning_rate": 4.950495049504951e-08,
"logits/chosen": -18.26405906677246,
"logits/rejected": -18.187000274658203,
"logps/chosen": -499.1011962890625,
"logps/rejected": -476.3055419921875,
"loss": 0.6925,
"rewards/accuracies": 0.1753472238779068,
"rewards/chosen": -0.0009479601285420358,
"rewards/margins": 0.0018414496444165707,
"rewards/rejected": -0.002789410063996911,
"step": 10
},
{
"epoch": 0.05983545250560957,
"grad_norm": 14.5,
"learning_rate": 9.900990099009901e-08,
"logits/chosen": -18.466373443603516,
"logits/rejected": -18.416664123535156,
"logps/chosen": -518.89794921875,
"logps/rejected": -501.88494873046875,
"loss": 0.6937,
"rewards/accuracies": 0.48124998807907104,
"rewards/chosen": 0.004138890188187361,
"rewards/margins": 0.0013234459329396486,
"rewards/rejected": 0.0028154447209089994,
"step": 20
},
{
"epoch": 0.08975317875841436,
"grad_norm": 12.125,
"learning_rate": 1.485148514851485e-07,
"logits/chosen": -18.632816314697266,
"logits/rejected": -18.364212036132812,
"logps/chosen": -518.1107177734375,
"logps/rejected": -474.103271484375,
"loss": 0.6978,
"rewards/accuracies": 0.44218748807907104,
"rewards/chosen": -0.0032092046458274126,
"rewards/margins": -0.005843925289809704,
"rewards/rejected": 0.0026347211096435785,
"step": 30
},
{
"epoch": 0.11967090501121914,
"grad_norm": 12.1875,
"learning_rate": 1.9801980198019803e-07,
"logits/chosen": -18.572345733642578,
"logits/rejected": -18.411455154418945,
"logps/chosen": -499.1954650878906,
"logps/rejected": -475.9703674316406,
"loss": 0.6935,
"rewards/accuracies": 0.484375,
"rewards/chosen": -0.007920954376459122,
"rewards/margins": 0.003416565014049411,
"rewards/rejected": -0.011337519623339176,
"step": 40
},
{
"epoch": 0.14958863126402394,
"grad_norm": 14.0625,
"learning_rate": 2.475247524752475e-07,
"logits/chosen": -18.346946716308594,
"logits/rejected": -18.163400650024414,
"logps/chosen": -488.15997314453125,
"logps/rejected": -466.6705017089844,
"loss": 0.6953,
"rewards/accuracies": 0.4781250059604645,
"rewards/chosen": -0.0012789719039574265,
"rewards/margins": -0.00010849386308109388,
"rewards/rejected": -0.0011704775970429182,
"step": 50
},
{
"epoch": 0.17950635751682872,
"grad_norm": 14.375,
"learning_rate": 2.97029702970297e-07,
"logits/chosen": -18.437816619873047,
"logits/rejected": -18.432828903198242,
"logps/chosen": -494.74176025390625,
"logps/rejected": -490.072021484375,
"loss": 0.6937,
"rewards/accuracies": 0.48124998807907104,
"rewards/chosen": 0.005937687586992979,
"rewards/margins": 0.003491427283734083,
"rewards/rejected": 0.0024462605360895395,
"step": 60
},
{
"epoch": 0.2094240837696335,
"grad_norm": 12.9375,
"learning_rate": 3.465346534653465e-07,
"logits/chosen": -18.613513946533203,
"logits/rejected": -18.44839859008789,
"logps/chosen": -496.1435546875,
"logps/rejected": -476.2933044433594,
"loss": 0.6983,
"rewards/accuracies": 0.45781248807907104,
"rewards/chosen": -0.006354107521474361,
"rewards/margins": -0.00639874953776598,
"rewards/rejected": 4.464127050596289e-05,
"step": 70
},
{
"epoch": 0.2393418100224383,
"grad_norm": 13.0,
"learning_rate": 3.9603960396039606e-07,
"logits/chosen": -18.565101623535156,
"logits/rejected": -18.38371467590332,
"logps/chosen": -515.5604858398438,
"logps/rejected": -464.8475646972656,
"loss": 0.6953,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.010281904600560665,
"rewards/margins": 0.0007311844383366406,
"rewards/rejected": -0.01101309061050415,
"step": 80
},
{
"epoch": 0.26925953627524307,
"grad_norm": 12.75,
"learning_rate": 4.4554455445544555e-07,
"logits/chosen": -18.506877899169922,
"logits/rejected": -18.355941772460938,
"logps/chosen": -496.4579162597656,
"logps/rejected": -481.5419921875,
"loss": 0.696,
"rewards/accuracies": 0.4765625,
"rewards/chosen": -0.008175070397555828,
"rewards/margins": -0.0013799158623442054,
"rewards/rejected": -0.006795152090489864,
"step": 90
},
{
"epoch": 0.2991772625280479,
"grad_norm": 13.25,
"learning_rate": 4.95049504950495e-07,
"logits/chosen": -18.576337814331055,
"logits/rejected": -18.462890625,
"logps/chosen": -507.07696533203125,
"logps/rejected": -471.42303466796875,
"loss": 0.6955,
"rewards/accuracies": 0.4859375059604645,
"rewards/chosen": 0.007285754196345806,
"rewards/margins": 0.0004216151428408921,
"rewards/rejected": 0.0068641407415270805,
"step": 100
},
{
"epoch": 0.2991772625280479,
"eval_logits/chosen": -18.482393264770508,
"eval_logits/rejected": -18.36368751525879,
"eval_logps/chosen": -494.2335510253906,
"eval_logps/rejected": -478.78997802734375,
"eval_loss": 0.6958388686180115,
"eval_rewards/accuracies": 0.4701046347618103,
"eval_rewards/chosen": -0.001653533661738038,
"eval_rewards/margins": -0.0008236159919761121,
"eval_rewards/rejected": -0.0008299172623082995,
"eval_runtime": 844.1668,
"eval_samples_per_second": 6.335,
"eval_steps_per_second": 0.792,
"step": 100
},
{
"epoch": 0.32909498878085264,
"grad_norm": 14.25,
"learning_rate": 4.998769137458909e-07,
"logits/chosen": -18.438594818115234,
"logits/rejected": -18.30778694152832,
"logps/chosen": -488.8753967285156,
"logps/rejected": -469.21240234375,
"loss": 0.693,
"rewards/accuracies": 0.49687498807907104,
"rewards/chosen": -0.0037526420783251524,
"rewards/margins": 0.005193626508116722,
"rewards/rejected": -0.008946266956627369,
"step": 110
},
{
"epoch": 0.35901271503365745,
"grad_norm": 13.375,
"learning_rate": 4.994515860008407e-07,
"logits/chosen": -18.361738204956055,
"logits/rejected": -18.35593032836914,
"logps/chosen": -502.665771484375,
"logps/rejected": -485.8645935058594,
"loss": 0.6939,
"rewards/accuracies": 0.4921875,
"rewards/chosen": -0.011272026225924492,
"rewards/margins": 0.0029675140976905823,
"rewards/rejected": -0.0142395393922925,
"step": 120
},
{
"epoch": 0.3889304412864622,
"grad_norm": 14.4375,
"learning_rate": 4.987230141051212e-07,
"logits/chosen": -18.523672103881836,
"logits/rejected": -18.332469940185547,
"logps/chosen": -497.9520568847656,
"logps/rejected": -494.5643615722656,
"loss": 0.6936,
"rewards/accuracies": 0.48124998807907104,
"rewards/chosen": 0.0009459417196922004,
"rewards/margins": 0.003786131041124463,
"rewards/rejected": -0.0028401894960552454,
"step": 130
},
{
"epoch": 0.418848167539267,
"grad_norm": 13.6875,
"learning_rate": 4.976920837422805e-07,
"logits/chosen": -18.39594078063965,
"logits/rejected": -18.280231475830078,
"logps/chosen": -499.1576232910156,
"logps/rejected": -480.56964111328125,
"loss": 0.692,
"rewards/accuracies": 0.504687488079071,
"rewards/chosen": -0.008311725221574306,
"rewards/margins": 0.006742014549672604,
"rewards/rejected": -0.01505373977124691,
"step": 140
},
{
"epoch": 0.4487658937920718,
"grad_norm": 13.5,
"learning_rate": 4.963600481559129e-07,
"logits/chosen": -18.300243377685547,
"logits/rejected": -18.194604873657227,
"logps/chosen": -495.46173095703125,
"logps/rejected": -483.1366271972656,
"loss": 0.6985,
"rewards/accuracies": 0.4671874940395355,
"rewards/chosen": -0.014861522242426872,
"rewards/margins": -0.005774814635515213,
"rewards/rejected": -0.009086708538234234,
"step": 150
},
{
"epoch": 0.4786836200448766,
"grad_norm": 14.9375,
"learning_rate": 4.947285266261623e-07,
"logits/chosen": -18.28691864013672,
"logits/rejected": -18.09464454650879,
"logps/chosen": -501.8270568847656,
"logps/rejected": -481.210693359375,
"loss": 0.6892,
"rewards/accuracies": 0.542187511920929,
"rewards/chosen": 0.004282023757696152,
"rewards/margins": 0.012691694311797619,
"rewards/rejected": -0.008409671485424042,
"step": 160
},
{
"epoch": 0.5086013462976814,
"grad_norm": 12.375,
"learning_rate": 4.927995025012548e-07,
"logits/chosen": -18.599102020263672,
"logits/rejected": -18.47110366821289,
"logps/chosen": -497.13409423828125,
"logps/rejected": -462.9967346191406,
"loss": 0.6976,
"rewards/accuracies": 0.47187501192092896,
"rewards/chosen": -0.015234975144267082,
"rewards/margins": -0.004649547394365072,
"rewards/rejected": -0.010585429146885872,
"step": 170
},
{
"epoch": 0.5385190725504861,
"grad_norm": 18.0,
"learning_rate": 4.905753207864547e-07,
"logits/chosen": -18.513076782226562,
"logits/rejected": -18.358755111694336,
"logps/chosen": -499.5277404785156,
"logps/rejected": -484.95782470703125,
"loss": 0.6963,
"rewards/accuracies": 0.47968751192092896,
"rewards/chosen": -0.002763888565823436,
"rewards/margins": -0.0016130201984196901,
"rewards/rejected": -0.0011508697643876076,
"step": 180
},
{
"epoch": 0.5684367988032909,
"grad_norm": 14.0625,
"learning_rate": 4.880586852933747e-07,
"logits/chosen": -18.47112464904785,
"logits/rejected": -18.298198699951172,
"logps/chosen": -504.328857421875,
"logps/rejected": -473.43359375,
"loss": 0.6939,
"rewards/accuracies": 0.512499988079071,
"rewards/chosen": -0.0022486697416752577,
"rewards/margins": 0.00311539089307189,
"rewards/rejected": -0.005364061798900366,
"step": 190
},
{
"epoch": 0.5983545250560958,
"grad_norm": 13.9375,
"learning_rate": 4.852526553531054e-07,
"logits/chosen": -18.492422103881836,
"logits/rejected": -18.396549224853516,
"logps/chosen": -498.78741455078125,
"logps/rejected": -481.09405517578125,
"loss": 0.6906,
"rewards/accuracies": 0.4781250059604645,
"rewards/chosen": -0.0009043412283062935,
"rewards/margins": 0.010298773646354675,
"rewards/rejected": -0.01120311301201582,
"step": 200
},
{
"epoch": 0.5983545250560958,
"eval_logits/chosen": -18.480627059936523,
"eval_logits/rejected": -18.362462997436523,
"eval_logps/chosen": -494.2452697753906,
"eval_logps/rejected": -478.79736328125,
"eval_loss": 0.6961907148361206,
"eval_rewards/accuracies": 0.4744020998477936,
"eval_rewards/chosen": -0.0028220494277775288,
"eval_rewards/margins": -0.001257332507520914,
"eval_rewards/rejected": -0.0015647169202566147,
"eval_runtime": 842.3412,
"eval_samples_per_second": 6.349,
"eval_steps_per_second": 0.794,
"step": 200
},
{
"epoch": 0.6282722513089005,
"grad_norm": 13.6875,
"learning_rate": 4.8216064209716e-07,
"logits/chosen": -18.475950241088867,
"logits/rejected": -18.323816299438477,
"logps/chosen": -510.2359313964844,
"logps/rejected": -491.6826171875,
"loss": 0.6954,
"rewards/accuracies": 0.4703125059604645,
"rewards/chosen": -0.00869015883654356,
"rewards/margins": 0.0011458725202828646,
"rewards/rejected": -0.009836031123995781,
"step": 210
},
{
"epoch": 0.6581899775617053,
"grad_norm": 13.75,
"learning_rate": 4.787864043107546e-07,
"logits/chosen": -18.483421325683594,
"logits/rejected": -18.315481185913086,
"logps/chosen": -505.24383544921875,
"logps/rejected": -480.8597717285156,
"loss": 0.6945,
"rewards/accuracies": 0.4984374940395355,
"rewards/chosen": -0.007081210613250732,
"rewards/margins": 0.002295692218467593,
"rewards/rejected": -0.009376903995871544,
"step": 220
},
{
"epoch": 0.6881077038145101,
"grad_norm": 12.3125,
"learning_rate": 4.751340438634669e-07,
"logits/chosen": -18.307737350463867,
"logits/rejected": -18.23043441772461,
"logps/chosen": -492.05596923828125,
"logps/rejected": -478.38238525390625,
"loss": 0.6932,
"rewards/accuracies": 0.4828124940395355,
"rewards/chosen": -0.005866049323230982,
"rewards/margins": 0.005141937639564276,
"rewards/rejected": -0.011007988825440407,
"step": 230
},
{
"epoch": 0.7180254300673149,
"grad_norm": 13.8125,
"learning_rate": 4.712080007228254e-07,
"logits/chosen": -18.248706817626953,
"logits/rejected": -18.240238189697266,
"logps/chosen": -498.770263671875,
"logps/rejected": -494.4297790527344,
"loss": 0.6899,
"rewards/accuracies": 0.5015624761581421,
"rewards/chosen": -0.0009179557673633099,
"rewards/margins": 0.01158793456852436,
"rewards/rejected": -0.012505888938903809,
"step": 240
},
{
"epoch": 0.7479431563201197,
"grad_norm": 15.5625,
"learning_rate": 4.670130475568927e-07,
"logits/chosen": -18.496627807617188,
"logits/rejected": -18.331436157226562,
"logps/chosen": -501.021728515625,
"logps/rejected": -485.9256896972656,
"loss": 0.693,
"rewards/accuracies": 0.4859375059604645,
"rewards/chosen": 0.0024902846198529005,
"rewards/margins": 0.005465887952595949,
"rewards/rejected": -0.002975603099912405,
"step": 250
},
{
"epoch": 0.7778608825729244,
"grad_norm": 12.625,
"learning_rate": 4.6255428393240354e-07,
"logits/chosen": -18.292850494384766,
"logits/rejected": -18.20071029663086,
"logps/chosen": -479.3639221191406,
"logps/rejected": -463.76776123046875,
"loss": 0.6947,
"rewards/accuracies": 0.484375,
"rewards/chosen": -0.019604574888944626,
"rewards/margins": 0.0011741012567654252,
"rewards/rejected": -0.020778674632310867,
"step": 260
},
{
"epoch": 0.8077786088257293,
"grad_norm": 13.4375,
"learning_rate": 4.578371301155106e-07,
"logits/chosen": -18.332660675048828,
"logits/rejected": -18.25689697265625,
"logps/chosen": -497.0006408691406,
"logps/rejected": -493.8580017089844,
"loss": 0.6925,
"rewards/accuracies": 0.4937500059604645,
"rewards/chosen": -0.017484821379184723,
"rewards/margins": 0.006399748381227255,
"rewards/rejected": -0.023884572088718414,
"step": 270
},
{
"epoch": 0.837696335078534,
"grad_norm": 13.625,
"learning_rate": 4.528673204826744e-07,
"logits/chosen": -18.48120880126953,
"logits/rejected": -18.390308380126953,
"logps/chosen": -513.1058959960938,
"logps/rejected": -496.99578857421875,
"loss": 0.6956,
"rewards/accuracies": 0.4515624940395355,
"rewards/chosen": -0.023203711956739426,
"rewards/margins": 0.0008115891250781715,
"rewards/rejected": -0.02401530183851719,
"step": 280
},
{
"epoch": 0.8676140613313388,
"grad_norm": 15.3125,
"learning_rate": 4.476508965497067e-07,
"logits/chosen": -18.398761749267578,
"logits/rejected": -18.283004760742188,
"logps/chosen": -498.1253967285156,
"logps/rejected": -489.4436950683594,
"loss": 0.697,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": -0.015816329047083855,
"rewards/margins": -0.002141923177987337,
"rewards/rejected": -0.013674402609467506,
"step": 290
},
{
"epoch": 0.8975317875841436,
"grad_norm": 15.0,
"learning_rate": 4.421941996274423e-07,
"logits/chosen": -18.34426498413086,
"logits/rejected": -18.241085052490234,
"logps/chosen": -527.1797485351562,
"logps/rejected": -507.3211975097656,
"loss": 0.6985,
"rewards/accuracies": 0.4593749940395355,
"rewards/chosen": -0.020052501931786537,
"rewards/margins": -0.005515648517757654,
"rewards/rejected": -0.014536852948367596,
"step": 300
},
{
"epoch": 0.8975317875841436,
"eval_logits/chosen": -18.48088836669922,
"eval_logits/rejected": -18.36236000061035,
"eval_logps/chosen": -494.4387512207031,
"eval_logps/rejected": -478.9952392578125,
"eval_loss": 0.6959420442581177,
"eval_rewards/accuracies": 0.4738415479660034,
"eval_rewards/chosen": -0.022165408357977867,
"eval_rewards/margins": -0.000810055120382458,
"eval_rewards/rejected": -0.02135535702109337,
"eval_runtime": 842.4585,
"eval_samples_per_second": 6.348,
"eval_steps_per_second": 0.794,
"step": 300
},
{
"epoch": 0.9274495138369484,
"grad_norm": 14.0625,
"learning_rate": 4.3650386311296715e-07,
"logits/chosen": -18.639049530029297,
"logits/rejected": -18.473182678222656,
"logps/chosen": -519.571533203125,
"logps/rejected": -493.08416748046875,
"loss": 0.7007,
"rewards/accuracies": 0.44218748807907104,
"rewards/chosen": -0.020600352436304092,
"rewards/margins": -0.009581638500094414,
"rewards/rejected": -0.011018714867532253,
"step": 310
},
{
"epoch": 0.9573672400897532,
"grad_norm": 14.625,
"learning_rate": 4.305868044257734e-07,
"logits/chosen": -18.35852813720703,
"logits/rejected": -18.237619400024414,
"logps/chosen": -494.0035705566406,
"logps/rejected": -474.61175537109375,
"loss": 0.6962,
"rewards/accuracies": 0.4921875,
"rewards/chosen": -0.03291434049606323,
"rewards/margins": -0.0014624244067817926,
"rewards/rejected": -0.031451914459466934,
"step": 320
},
{
"epoch": 0.9872849663425579,
"grad_norm": 15.0,
"learning_rate": 4.244502165986448e-07,
"logits/chosen": -18.50986099243164,
"logits/rejected": -18.264389038085938,
"logps/chosen": -487.98284912109375,
"logps/rejected": -478.84002685546875,
"loss": 0.6944,
"rewards/accuracies": 0.4859375059604645,
"rewards/chosen": -0.018264159560203552,
"rewards/margins": 0.0021764684934169054,
"rewards/rejected": -0.02044062688946724,
"step": 330
},
{
"epoch": 1.0172026925953628,
"grad_norm": 13.0625,
"learning_rate": 4.1810155953349445e-07,
"logits/chosen": -18.608068466186523,
"logits/rejected": -18.53306007385254,
"logps/chosen": -499.0755310058594,
"logps/rejected": -490.98486328125,
"loss": 0.6936,
"rewards/accuracies": 0.47343748807907104,
"rewards/chosen": -0.01962715946137905,
"rewards/margins": 0.00391949201002717,
"rewards/rejected": -0.023546651005744934,
"step": 340
},
{
"epoch": 1.0471204188481675,
"grad_norm": 12.1875,
"learning_rate": 4.115485509327851e-07,
"logits/chosen": -18.467256546020508,
"logits/rejected": -18.209827423095703,
"logps/chosen": -505.059814453125,
"logps/rejected": -475.1127014160156,
"loss": 0.6934,
"rewards/accuracies": 0.4937500059604645,
"rewards/chosen": -0.009037208743393421,
"rewards/margins": 0.004168554209172726,
"rewards/rejected": -0.013205763883888721,
"step": 350
},
{
"epoch": 1.0770381451009723,
"grad_norm": 12.4375,
"learning_rate": 4.0479915691755583e-07,
"logits/chosen": -18.381351470947266,
"logits/rejected": -18.23720359802246,
"logps/chosen": -503.64044189453125,
"logps/rejected": -486.0143127441406,
"loss": 0.7002,
"rewards/accuracies": 0.4703125059604645,
"rewards/chosen": -0.015495906583964825,
"rewards/margins": -0.009308911859989166,
"rewards/rejected": -0.006186993792653084,
"step": 360
},
{
"epoch": 1.106955871353777,
"grad_norm": 13.8125,
"learning_rate": 3.9786158234346e-07,
"logits/chosen": -18.671598434448242,
"logits/rejected": -18.52102279663086,
"logps/chosen": -491.046142578125,
"logps/rejected": -473.78057861328125,
"loss": 0.692,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": -0.0006091233226470649,
"rewards/margins": 0.007144009228795767,
"rewards/rejected": -0.007753132376819849,
"step": 370
},
{
"epoch": 1.136873597606582,
"grad_norm": 14.375,
"learning_rate": 3.90744260826588e-07,
"logits/chosen": -18.341663360595703,
"logits/rejected": -18.282123565673828,
"logps/chosen": -503.53521728515625,
"logps/rejected": -478.75726318359375,
"loss": 0.6975,
"rewards/accuracies": 0.4828124940395355,
"rewards/chosen": -0.014238146133720875,
"rewards/margins": -0.004205497447401285,
"rewards/rejected": -0.010032649151980877,
"step": 380
},
{
"epoch": 1.1667913238593868,
"grad_norm": 13.75,
"learning_rate": 3.834558444911977e-07,
"logits/chosen": -18.506235122680664,
"logits/rejected": -18.454498291015625,
"logps/chosen": -501.6888122558594,
"logps/rejected": -492.0193786621094,
"loss": 0.6937,
"rewards/accuracies": 0.4671874940395355,
"rewards/chosen": -0.013215325772762299,
"rewards/margins": 0.004076224751770496,
"rewards/rejected": -0.01729154959321022,
"step": 390
},
{
"epoch": 1.1967090501121915,
"grad_norm": 11.4375,
"learning_rate": 3.760051934518178e-07,
"logits/chosen": -18.325563430786133,
"logits/rejected": -18.207347869873047,
"logps/chosen": -490.45599365234375,
"logps/rejected": -484.768798828125,
"loss": 0.6946,
"rewards/accuracies": 0.503125011920929,
"rewards/chosen": -0.012493303045630455,
"rewards/margins": 0.0020187748596072197,
"rewards/rejected": -0.0145120769739151,
"step": 400
},
{
"epoch": 1.1967090501121915,
"eval_logits/chosen": -18.481069564819336,
"eval_logits/rejected": -18.362834930419922,
"eval_logps/chosen": -494.2017822265625,
"eval_logps/rejected": -478.7664489746094,
"eval_loss": 0.6955097913742065,
"eval_rewards/accuracies": 0.47533631324768066,
"eval_rewards/chosen": 0.0015334173804149032,
"eval_rewards/margins": 5.264274932414992e-06,
"eval_rewards/rejected": 0.0015281536616384983,
"eval_runtime": 842.6072,
"eval_samples_per_second": 6.347,
"eval_steps_per_second": 0.794,
"step": 400
},
{
"epoch": 1.2266267763649963,
"grad_norm": 12.5,
"learning_rate": 3.6840136504250895e-07,
"logits/chosen": -18.419193267822266,
"logits/rejected": -18.217782974243164,
"logps/chosen": -504.786865234375,
"logps/rejected": -477.596923828125,
"loss": 0.6965,
"rewards/accuracies": 0.4984374940395355,
"rewards/chosen": -0.006161923985928297,
"rewards/margins": -0.001232474809512496,
"rewards/rejected": -0.004929449409246445,
"step": 410
},
{
"epoch": 1.256544502617801,
"grad_norm": 13.0625,
"learning_rate": 3.6065360280637635e-07,
"logits/chosen": -18.372119903564453,
"logits/rejected": -18.320249557495117,
"logps/chosen": -484.0809020996094,
"logps/rejected": -474.786376953125,
"loss": 0.6929,
"rewards/accuracies": 0.47343748807907104,
"rewards/chosen": -0.009603636339306831,
"rewards/margins": 0.00548522686585784,
"rewards/rejected": -0.015088860876858234,
"step": 420
},
{
"epoch": 1.2864622288706058,
"grad_norm": 16.375,
"learning_rate": 3.527713252587179e-07,
"logits/chosen": -18.48366928100586,
"logits/rejected": -18.39108657836914,
"logps/chosen": -501.91314697265625,
"logps/rejected": -480.6852111816406,
"loss": 0.6968,
"rewards/accuracies": 0.4671874940395355,
"rewards/chosen": -0.004988933447748423,
"rewards/margins": -0.0021209451369941235,
"rewards/rejected": -0.002867989707738161,
"step": 430
},
{
"epoch": 1.3163799551234106,
"grad_norm": 14.0625,
"learning_rate": 3.447641144374697e-07,
"logits/chosen": -18.41563606262207,
"logits/rejected": -18.326993942260742,
"logps/chosen": -495.029541015625,
"logps/rejected": -485.95654296875,
"loss": 0.6938,
"rewards/accuracies": 0.5062500238418579,
"rewards/chosen": -0.0021522603929042816,
"rewards/margins": 0.003724109847098589,
"rewards/rejected": -0.005876368843019009,
"step": 440
},
{
"epoch": 1.3462976813762153,
"grad_norm": 12.8125,
"learning_rate": 3.3664170425486577e-07,
"logits/chosen": -18.418684005737305,
"logits/rejected": -18.22823143005371,
"logps/chosen": -508.91619873046875,
"logps/rejected": -493.2528381347656,
"loss": 0.6962,
"rewards/accuracies": 0.4859375059604645,
"rewards/chosen": -0.016366075724363327,
"rewards/margins": -0.0013246103189885616,
"rewards/rejected": -0.015041463077068329,
"step": 450
},
{
"epoch": 1.37621540762902,
"grad_norm": 12.25,
"learning_rate": 3.2841396866447216e-07,
"logits/chosen": -18.291414260864258,
"logits/rejected": -18.216777801513672,
"logps/chosen": -494.11651611328125,
"logps/rejected": -479.777587890625,
"loss": 0.6941,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": 0.010070102289319038,
"rewards/margins": 0.002756762085482478,
"rewards/rejected": 0.007313339505344629,
"step": 460
},
{
"epoch": 1.406133133881825,
"grad_norm": 12.125,
"learning_rate": 3.200909096579822e-07,
"logits/chosen": -18.547330856323242,
"logits/rejected": -18.329965591430664,
"logps/chosen": -502.29052734375,
"logps/rejected": -481.80035400390625,
"loss": 0.6937,
"rewards/accuracies": 0.484375,
"rewards/chosen": 0.006962032523006201,
"rewards/margins": 0.004513027612119913,
"rewards/rejected": 0.0024490035139024258,
"step": 470
},
{
"epoch": 1.4360508601346298,
"grad_norm": 13.75,
"learning_rate": 3.116826451063618e-07,
"logits/chosen": -18.396873474121094,
"logits/rejected": -18.2117862701416,
"logps/chosen": -498.18048095703125,
"logps/rejected": -463.1722106933594,
"loss": 0.6994,
"rewards/accuracies": 0.4625000059604645,
"rewards/chosen": -0.007130985148251057,
"rewards/margins": -0.007668208330869675,
"rewards/rejected": 0.0005372242303565145,
"step": 480
},
{
"epoch": 1.4659685863874345,
"grad_norm": 13.4375,
"learning_rate": 3.031993964601274e-07,
"logits/chosen": -18.40250015258789,
"logits/rejected": -18.297494888305664,
"logps/chosen": -503.1748962402344,
"logps/rejected": -499.47900390625,
"loss": 0.6945,
"rewards/accuracies": 0.4937500059604645,
"rewards/chosen": -0.006336678750813007,
"rewards/margins": 0.0019012052798643708,
"rewards/rejected": -0.008237885311245918,
"step": 490
},
{
"epoch": 1.4958863126402393,
"grad_norm": 12.6875,
"learning_rate": 2.9465147632370795e-07,
"logits/chosen": -18.3398380279541,
"logits/rejected": -18.301984786987305,
"logps/chosen": -485.8975524902344,
"logps/rejected": -474.5433654785156,
"loss": 0.6946,
"rewards/accuracies": 0.4765625,
"rewards/chosen": -0.00577941257506609,
"rewards/margins": 0.0024705647956579924,
"rewards/rejected": -0.008249977603554726,
"step": 500
},
{
"epoch": 1.4958863126402393,
"eval_logits/chosen": -18.481569290161133,
"eval_logits/rejected": -18.363122940063477,
"eval_logps/chosen": -494.26336669921875,
"eval_logps/rejected": -478.822265625,
"eval_loss": 0.6959648728370667,
"eval_rewards/accuracies": 0.47907325625419617,
"eval_rewards/chosen": -0.0046310219913721085,
"eval_rewards/margins": -0.0005819797515869141,
"eval_rewards/rejected": -0.0040490408428013325,
"eval_runtime": 842.3853,
"eval_samples_per_second": 6.349,
"eval_steps_per_second": 0.794,
"step": 500
},
{
"epoch": 1.5258040388930443,
"grad_norm": 15.875,
"learning_rate": 2.86049275918997e-07,
"logits/chosen": -18.267154693603516,
"logits/rejected": -18.09113883972168,
"logps/chosen": -506.2464904785156,
"logps/rejected": -472.58721923828125,
"loss": 0.6937,
"rewards/accuracies": 0.5062500238418579,
"rewards/chosen": 0.00022939778864383698,
"rewards/margins": 0.003868215484544635,
"rewards/rejected": -0.003638815600425005,
"step": 510
},
{
"epoch": 1.555721765145849,
"grad_norm": 11.9375,
"learning_rate": 2.774032524533323e-07,
"logits/chosen": -18.302738189697266,
"logits/rejected": -18.214998245239258,
"logps/chosen": -485.1527404785156,
"logps/rejected": -474.42120361328125,
"loss": 0.6912,
"rewards/accuracies": 0.4937500059604645,
"rewards/chosen": -0.0037696107756346464,
"rewards/margins": 0.008755478076636791,
"rewards/rejected": -0.01252509094774723,
"step": 520
},
{
"epoch": 1.5856394913986538,
"grad_norm": 13.25,
"learning_rate": 2.6872391640726276e-07,
"logits/chosen": -18.349544525146484,
"logits/rejected": -18.272260665893555,
"logps/chosen": -496.89117431640625,
"logps/rejected": -471.0453186035156,
"loss": 0.696,
"rewards/accuracies": 0.46406251192092896,
"rewards/chosen": -0.016991907730698586,
"rewards/margins": -0.0011090601328760386,
"rewards/rejected": -0.01588284783065319,
"step": 530
},
{
"epoch": 1.6155572176514585,
"grad_norm": 14.5625,
"learning_rate": 2.6002181875755233e-07,
"logits/chosen": -18.489253997802734,
"logits/rejected": -18.443098068237305,
"logps/chosen": -492.5693359375,
"logps/rejected": -482.33306884765625,
"loss": 0.6967,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": -0.0029235139954835176,
"rewards/margins": -0.0017206415068358183,
"rewards/rejected": -0.0012028723722323775,
"step": 540
},
{
"epoch": 1.6454749439042633,
"grad_norm": 12.6875,
"learning_rate": 2.5130753815095647e-07,
"logits/chosen": -18.483600616455078,
"logits/rejected": -18.2879581451416,
"logps/chosen": -510.51348876953125,
"logps/rejected": -490.70196533203125,
"loss": 0.6949,
"rewards/accuracies": 0.4921875,
"rewards/chosen": -0.0024643188808113337,
"rewards/margins": 0.001509314402937889,
"rewards/rejected": -0.003973634447902441,
"step": 550
},
{
"epoch": 1.675392670157068,
"grad_norm": 12.0,
"learning_rate": 2.4259166804436003e-07,
"logits/chosen": -18.499126434326172,
"logits/rejected": -18.366363525390625,
"logps/chosen": -499.53240966796875,
"logps/rejected": -479.3192443847656,
"loss": 0.6907,
"rewards/accuracies": 0.5171874761581421,
"rewards/chosen": -0.0017261719331145287,
"rewards/margins": 0.009601183235645294,
"rewards/rejected": -0.011327354237437248,
"step": 560
},
{
"epoch": 1.7053103964098728,
"grad_norm": 13.5,
"learning_rate": 2.338848038269135e-07,
"logits/chosen": -18.419811248779297,
"logits/rejected": -18.180389404296875,
"logps/chosen": -502.84686279296875,
"logps/rejected": -476.781494140625,
"loss": 0.692,
"rewards/accuracies": 0.49531251192092896,
"rewards/chosen": 0.001199430669657886,
"rewards/margins": 0.007095737848430872,
"rewards/rejected": -0.005896307062357664,
"step": 570
},
{
"epoch": 1.7352281226626776,
"grad_norm": 14.125,
"learning_rate": 2.2519752993981804e-07,
"logits/chosen": -18.500263214111328,
"logits/rejected": -18.394670486450195,
"logps/chosen": -514.1810913085938,
"logps/rejected": -490.6826171875,
"loss": 0.7018,
"rewards/accuracies": 0.42656248807907104,
"rewards/chosen": -0.007822760380804539,
"rewards/margins": -0.011961746029555798,
"rewards/rejected": 0.004138986114412546,
"step": 580
},
{
"epoch": 1.7651458489154823,
"grad_norm": 14.0,
"learning_rate": 2.1654040700942112e-07,
"logits/chosen": -18.530935287475586,
"logits/rejected": -18.39280891418457,
"logps/chosen": -511.75115966796875,
"logps/rejected": -486.78564453125,
"loss": 0.6949,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": 0.006620324216783047,
"rewards/margins": 0.0015118361916393042,
"rewards/rejected": 0.005108488257974386,
"step": 590
},
{
"epoch": 1.795063575168287,
"grad_norm": 13.75,
"learning_rate": 2.0792395900926155e-07,
"logits/chosen": -18.366960525512695,
"logits/rejected": -18.27281379699707,
"logps/chosen": -495.75445556640625,
"logps/rejected": -479.6105041503906,
"loss": 0.6952,
"rewards/accuracies": 0.4703125059604645,
"rewards/chosen": -0.01034991629421711,
"rewards/margins": 0.0005709372344426811,
"rewards/rejected": -0.010920852422714233,
"step": 600
},
{
"epoch": 1.795063575168287,
"eval_logits/chosen": -18.48212432861328,
"eval_logits/rejected": -18.363605499267578,
"eval_logps/chosen": -494.263916015625,
"eval_logps/rejected": -478.8390808105469,
"eval_loss": 0.695127010345459,
"eval_rewards/accuracies": 0.4882287085056305,
"eval_rewards/chosen": -0.0046835425309836864,
"eval_rewards/margins": 0.0010564102558419108,
"eval_rewards/rejected": -0.005739952437579632,
"eval_runtime": 842.133,
"eval_samples_per_second": 6.351,
"eval_steps_per_second": 0.794,
"step": 600
},
{
"epoch": 1.824981301421092,
"grad_norm": 13.3125,
"learning_rate": 1.9935866046667134e-07,
"logits/chosen": -18.486186981201172,
"logits/rejected": -18.327022552490234,
"logps/chosen": -518.8204956054688,
"logps/rejected": -491.86737060546875,
"loss": 0.6928,
"rewards/accuracies": 0.5078125,
"rewards/chosen": -0.0042625004425644875,
"rewards/margins": 0.00539636705070734,
"rewards/rejected": -0.009658867493271828,
"step": 610
},
{
"epoch": 1.8548990276738968,
"grad_norm": 13.0625,
"learning_rate": 1.9085492372948678e-07,
"logits/chosen": -18.573169708251953,
"logits/rejected": -18.4843692779541,
"logps/chosen": -523.7990112304688,
"logps/rejected": -502.0945739746094,
"loss": 0.6918,
"rewards/accuracies": 0.504687488079071,
"rewards/chosen": -0.007377514149993658,
"rewards/margins": 0.00755069125443697,
"rewards/rejected": -0.01492820493876934,
"step": 620
},
{
"epoch": 1.8848167539267016,
"grad_norm": 13.4375,
"learning_rate": 1.824230863083477e-07,
"logits/chosen": -18.435693740844727,
"logits/rejected": -18.278911590576172,
"logps/chosen": -500.37103271484375,
"logps/rejected": -478.5856018066406,
"loss": 0.6983,
"rewards/accuracies": 0.46406251192092896,
"rewards/chosen": -0.014031234197318554,
"rewards/margins": -0.005335616413503885,
"rewards/rejected": -0.008695616386830807,
"step": 630
},
{
"epoch": 1.9147344801795063,
"grad_norm": 11.75,
"learning_rate": 1.7407339830997154e-07,
"logits/chosen": -18.601871490478516,
"logits/rejected": -18.49062156677246,
"logps/chosen": -509.70428466796875,
"logps/rejected": -490.6293029785156,
"loss": 0.6979,
"rewards/accuracies": 0.4515624940395355,
"rewards/chosen": -0.005590735003352165,
"rewards/margins": -0.00480449665337801,
"rewards/rejected": -0.0007862389320507646,
"step": 640
},
{
"epoch": 1.9446522064323113,
"grad_norm": 14.0,
"learning_rate": 1.658160099766799e-07,
"logits/chosen": -18.551902770996094,
"logits/rejected": -18.38374900817871,
"logps/chosen": -494.0267639160156,
"logps/rejected": -469.78143310546875,
"loss": 0.6993,
"rewards/accuracies": 0.4515624940395355,
"rewards/chosen": -0.014440399594604969,
"rewards/margins": -0.007908724248409271,
"rewards/rejected": -0.006531675346195698,
"step": 650
},
{
"epoch": 1.974569932685116,
"grad_norm": 13.4375,
"learning_rate": 1.576609593473246e-07,
"logits/chosen": -18.249267578125,
"logits/rejected": -18.19643783569336,
"logps/chosen": -497.38641357421875,
"logps/rejected": -477.0294494628906,
"loss": 0.695,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": -0.010250954888761044,
"rewards/margins": 0.0014018019428476691,
"rewards/rejected": -0.011652758345007896,
"step": 660
},
{
"epoch": 2.004487658937921,
"grad_norm": 13.125,
"learning_rate": 1.4961816005461222e-07,
"logits/chosen": -18.5499324798584,
"logits/rejected": -18.45195198059082,
"logps/chosen": -503.68096923828125,
"logps/rejected": -480.076904296875,
"loss": 0.695,
"rewards/accuracies": 0.49531251192092896,
"rewards/chosen": -0.003980870358645916,
"rewards/margins": 0.0012054868275299668,
"rewards/rejected": -0.005186358001083136,
"step": 670
},
{
"epoch": 2.0344053851907256,
"grad_norm": 11.9375,
"learning_rate": 1.416973892736638e-07,
"logits/chosen": -18.451631546020508,
"logits/rejected": -18.396869659423828,
"logps/chosen": -498.55267333984375,
"logps/rejected": -484.3282165527344,
"loss": 0.6977,
"rewards/accuracies": 0.4625000059604645,
"rewards/chosen": -0.014167798683047295,
"rewards/margins": -0.00439287768676877,
"rewards/rejected": -0.009774921461939812,
"step": 680
},
{
"epoch": 2.0643231114435303,
"grad_norm": 12.875,
"learning_rate": 1.339082758364569e-07,
"logits/chosen": -18.43014907836914,
"logits/rejected": -18.314260482788086,
"logps/chosen": -495.68145751953125,
"logps/rejected": -485.66632080078125,
"loss": 0.6979,
"rewards/accuracies": 0.4625000059604645,
"rewards/chosen": -0.017589876428246498,
"rewards/margins": -0.0047332653775811195,
"rewards/rejected": -0.012856610119342804,
"step": 690
},
{
"epoch": 2.094240837696335,
"grad_norm": 12.75,
"learning_rate": 1.2626028852660026e-07,
"logits/chosen": -18.41107749938965,
"logits/rejected": -18.227169036865234,
"logps/chosen": -505.9676818847656,
"logps/rejected": -484.62548828125,
"loss": 0.6947,
"rewards/accuracies": 0.4765625,
"rewards/chosen": -0.014858139678835869,
"rewards/margins": 0.0021128151565790176,
"rewards/rejected": -0.016970956698060036,
"step": 700
},
{
"epoch": 2.094240837696335,
"eval_logits/chosen": -18.482025146484375,
"eval_logits/rejected": -18.363367080688477,
"eval_logps/chosen": -494.2701416015625,
"eval_logps/rejected": -478.8379211425781,
"eval_loss": 0.6954838633537292,
"eval_rewards/accuracies": 0.4822496175765991,
"eval_rewards/chosen": -0.005303152371197939,
"eval_rewards/margins": 0.0003158066247124225,
"eval_rewards/rejected": -0.005618959199637175,
"eval_runtime": 841.92,
"eval_samples_per_second": 6.352,
"eval_steps_per_second": 0.795,
"step": 700
},
{
"epoch": 2.12415856394914,
"grad_norm": 13.375,
"learning_rate": 1.1876272456867081e-07,
"logits/chosen": -18.531566619873047,
"logits/rejected": -18.326671600341797,
"logps/chosen": -510.41888427734375,
"logps/rejected": -488.3369140625,
"loss": 0.6989,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": -0.008112497627735138,
"rewards/margins": -0.006051483564078808,
"rewards/rejected": -0.002061013597995043,
"step": 710
},
{
"epoch": 2.1540762902019446,
"grad_norm": 13.0,
"learning_rate": 1.114246983261046e-07,
"logits/chosen": -18.460107803344727,
"logits/rejected": -18.281509399414062,
"logps/chosen": -493.8604431152344,
"logps/rejected": -470.369140625,
"loss": 0.697,
"rewards/accuracies": 0.46406251192092896,
"rewards/chosen": -0.0026031527668237686,
"rewards/margins": -0.0029606991447508335,
"rewards/rejected": 0.0003575454466044903,
"step": 720
},
{
"epoch": 2.1839940164547493,
"grad_norm": 14.625,
"learning_rate": 1.0425513022138202e-07,
"logits/chosen": -18.540891647338867,
"logits/rejected": -18.398468017578125,
"logps/chosen": -490.9407653808594,
"logps/rejected": -473.2347717285156,
"loss": 0.693,
"rewards/accuracies": 0.4859375059604645,
"rewards/chosen": -0.0007933437591418624,
"rewards/margins": 0.005172435659915209,
"rewards/rejected": -0.005965779535472393,
"step": 730
},
{
"epoch": 2.213911742707554,
"grad_norm": 13.4375,
"learning_rate": 9.726273589197565e-08,
"logits/chosen": -18.563766479492188,
"logits/rejected": -18.471820831298828,
"logps/chosen": -508.5835876464844,
"logps/rejected": -494.5013122558594,
"loss": 0.6915,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.00014840513176750392,
"rewards/margins": 0.008764137513935566,
"rewards/rejected": -0.008912542834877968,
"step": 740
},
{
"epoch": 2.243829468960359,
"grad_norm": 16.25,
"learning_rate": 9.045601559524434e-08,
"logits/chosen": -18.197242736816406,
"logits/rejected": -18.150014877319336,
"logps/chosen": -496.7591247558594,
"logps/rejected": -475.01141357421875,
"loss": 0.6983,
"rewards/accuracies": 0.4609375,
"rewards/chosen": -0.005830574780702591,
"rewards/margins": -0.005549018736928701,
"rewards/rejected": -0.0002815568877849728,
"step": 750
},
{
"epoch": 2.273747195213164,
"grad_norm": 12.8125,
"learning_rate": 8.384324387515202e-08,
"logits/chosen": -18.513484954833984,
"logits/rejected": -18.47811508178711,
"logps/chosen": -509.2792053222656,
"logps/rejected": -504.4214782714844,
"loss": 0.6973,
"rewards/accuracies": 0.4703125059604645,
"rewards/chosen": -0.0049002873711287975,
"rewards/margins": -0.0031452118419110775,
"rewards/rejected": -0.001755074947141111,
"step": 760
},
{
"epoch": 2.303664921465969,
"grad_norm": 13.6875,
"learning_rate": 7.743245950337365e-08,
"logits/chosen": -18.391809463500977,
"logits/rejected": -18.290664672851562,
"logps/chosen": -499.37353515625,
"logps/rejected": -488.48388671875,
"loss": 0.6934,
"rewards/accuracies": 0.503125011920929,
"rewards/chosen": -0.013569754548370838,
"rewards/margins": 0.004729210399091244,
"rewards/rejected": -0.018298964947462082,
"step": 770
},
{
"epoch": 2.3335826477187736,
"grad_norm": 11.9375,
"learning_rate": 7.123145570701638e-08,
"logits/chosen": -18.322669982910156,
"logits/rejected": -18.09329605102539,
"logps/chosen": -498.12548828125,
"logps/rejected": -465.91497802734375,
"loss": 0.6925,
"rewards/accuracies": 0.4703125059604645,
"rewards/chosen": -0.011946515180170536,
"rewards/margins": 0.006278673652559519,
"rewards/rejected": -0.018225187435746193,
"step": 780
},
{
"epoch": 2.3635003739715783,
"grad_norm": 12.0,
"learning_rate": 6.524777069483525e-08,
"logits/chosen": -18.426311492919922,
"logits/rejected": -18.294950485229492,
"logps/chosen": -502.55810546875,
"logps/rejected": -476.4676818847656,
"loss": 0.7011,
"rewards/accuracies": 0.44062501192092896,
"rewards/chosen": -0.02296162210404873,
"rewards/margins": -0.01101845782250166,
"rewards/rejected": -0.01194316241890192,
"step": 790
},
{
"epoch": 2.393418100224383,
"grad_norm": 14.75,
"learning_rate": 5.948867849345979e-08,
"logits/chosen": -18.55495834350586,
"logits/rejected": -18.406208038330078,
"logps/chosen": -493.659423828125,
"logps/rejected": -478.211181640625,
"loss": 0.6995,
"rewards/accuracies": 0.45625001192092896,
"rewards/chosen": -0.009105198085308075,
"rewards/margins": -0.007736638188362122,
"rewards/rejected": -0.001368560828268528,
"step": 800
},
{
"epoch": 2.393418100224383,
"eval_logits/chosen": -18.481842041015625,
"eval_logits/rejected": -18.36322021484375,
"eval_logps/chosen": -494.27740478515625,
"eval_logps/rejected": -478.857421875,
"eval_loss": 0.6948285102844238,
"eval_rewards/accuracies": 0.49177876114845276,
"eval_rewards/chosen": -0.006035235244780779,
"eval_rewards/margins": 0.0015357759548351169,
"eval_rewards/rejected": -0.007571011781692505,
"eval_runtime": 842.3774,
"eval_samples_per_second": 6.349,
"eval_steps_per_second": 0.794,
"step": 800
},
{
"epoch": 2.423335826477188,
"grad_norm": 15.5,
"learning_rate": 5.396118010477274e-08,
"logits/chosen": -18.395492553710938,
"logits/rejected": -18.27509117126465,
"logps/chosen": -502.3111877441406,
"logps/rejected": -475.66973876953125,
"loss": 0.6989,
"rewards/accuracies": 0.4453125,
"rewards/chosen": -0.01698307693004608,
"rewards/margins": -0.006895692553371191,
"rewards/rejected": -0.010087382979691029,
"step": 810
},
{
"epoch": 2.4532535527299926,
"grad_norm": 15.0625,
"learning_rate": 4.867199499518873e-08,
"logits/chosen": -18.451091766357422,
"logits/rejected": -18.34830093383789,
"logps/chosen": -500.35595703125,
"logps/rejected": -482.3310546875,
"loss": 0.6971,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": -0.006859651766717434,
"rewards/margins": -0.0022591417655348778,
"rewards/rejected": -0.004600510932505131,
"step": 820
},
{
"epoch": 2.4831712789827973,
"grad_norm": 13.875,
"learning_rate": 4.362755292717993e-08,
"logits/chosen": -18.62723731994629,
"logits/rejected": -18.555469512939453,
"logps/chosen": -508.4722595214844,
"logps/rejected": -480.99932861328125,
"loss": 0.6959,
"rewards/accuracies": 0.4515624940395355,
"rewards/chosen": 7.3916649853345e-05,
"rewards/margins": -0.0006395616801455617,
"rewards/rejected": 0.0007134780171327293,
"step": 830
},
{
"epoch": 2.513089005235602,
"grad_norm": 15.1875,
"learning_rate": 3.883398614297831e-08,
"logits/chosen": -18.562936782836914,
"logits/rejected": -18.407032012939453,
"logps/chosen": -518.3756103515625,
"logps/rejected": -503.34063720703125,
"loss": 0.6958,
"rewards/accuracies": 0.4671874940395355,
"rewards/chosen": -0.007723680697381496,
"rewards/margins": -2.291230521223042e-05,
"rewards/rejected": -0.007700768765062094,
"step": 840
},
{
"epoch": 2.543006731488407,
"grad_norm": 13.375,
"learning_rate": 3.4297121909956485e-08,
"logits/chosen": -18.537033081054688,
"logits/rejected": -18.43063735961914,
"logps/chosen": -490.99017333984375,
"logps/rejected": -469.899658203125,
"loss": 0.6941,
"rewards/accuracies": 0.45625001192092896,
"rewards/chosen": -0.006467898841947317,
"rewards/margins": 0.003015016671270132,
"rewards/rejected": -0.00948291551321745,
"step": 850
},
{
"epoch": 2.5729244577412116,
"grad_norm": 14.0,
"learning_rate": 3.002247543674921e-08,
"logits/chosen": -18.188644409179688,
"logits/rejected": -18.097511291503906,
"logps/chosen": -497.510986328125,
"logps/rejected": -473.9842224121094,
"loss": 0.6961,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.011478578671813011,
"rewards/margins": -0.0014710351824760437,
"rewards/rejected": -0.010007544420659542,
"step": 860
},
{
"epoch": 2.6028421839940163,
"grad_norm": 14.4375,
"learning_rate": 2.6015243168726607e-08,
"logits/chosen": -18.49826431274414,
"logits/rejected": -18.359790802001953,
"logps/chosen": -513.165771484375,
"logps/rejected": -497.18487548828125,
"loss": 0.6947,
"rewards/accuracies": 0.4906249940395355,
"rewards/chosen": -0.007126118056476116,
"rewards/margins": 0.002322929911315441,
"rewards/rejected": -0.009449047967791557,
"step": 870
},
{
"epoch": 2.632759910246821,
"grad_norm": 12.4375,
"learning_rate": 2.228029647097057e-08,
"logits/chosen": -18.447689056396484,
"logits/rejected": -18.275293350219727,
"logps/chosen": -497.46392822265625,
"logps/rejected": -472.00531005859375,
"loss": 0.6973,
"rewards/accuracies": 0.4765625,
"rewards/chosen": -0.00794267375022173,
"rewards/margins": -0.0032855805475264788,
"rewards/rejected": -0.004657094366848469,
"step": 880
},
{
"epoch": 2.662677636499626,
"grad_norm": 12.6875,
"learning_rate": 1.882217570643216e-08,
"logits/chosen": -18.6273193359375,
"logits/rejected": -18.57034683227539,
"logps/chosen": -493.40191650390625,
"logps/rejected": -481.95062255859375,
"loss": 0.6963,
"rewards/accuracies": 0.49687498807907104,
"rewards/chosen": -0.010606282390654087,
"rewards/margins": -0.0014992398209869862,
"rewards/rejected": -0.009107043035328388,
"step": 890
},
{
"epoch": 2.6925953627524306,
"grad_norm": 13.5625,
"learning_rate": 1.5645084716469776e-08,
"logits/chosen": -18.280813217163086,
"logits/rejected": -18.22847557067871,
"logps/chosen": -503.5279846191406,
"logps/rejected": -490.97320556640625,
"loss": 0.6932,
"rewards/accuracies": 0.49687498807907104,
"rewards/chosen": -0.007966242730617523,
"rewards/margins": 0.004499543458223343,
"rewards/rejected": -0.012465784326195717,
"step": 900
},
{
"epoch": 2.6925953627524306,
"eval_logits/chosen": -18.48170280456543,
"eval_logits/rejected": -18.363323211669922,
"eval_logps/chosen": -494.2970275878906,
"eval_logps/rejected": -478.86920166015625,
"eval_loss": 0.6951792240142822,
"eval_rewards/accuracies": 0.48374438285827637,
"eval_rewards/chosen": -0.007996306754648685,
"eval_rewards/margins": 0.0007518637576140463,
"eval_rewards/rejected": -0.008748171851038933,
"eval_runtime": 842.6667,
"eval_samples_per_second": 6.347,
"eval_steps_per_second": 0.794,
"step": 900
},
{
"epoch": 2.7225130890052354,
"grad_norm": 13.375,
"learning_rate": 1.2752885710477584e-08,
"logits/chosen": -18.436344146728516,
"logits/rejected": -18.28668975830078,
"logps/chosen": -502.52911376953125,
"logps/rejected": -478.9175720214844,
"loss": 0.6917,
"rewards/accuracies": 0.47968751192092896,
"rewards/chosen": -0.0051972828805446625,
"rewards/margins": 0.007670181337743998,
"rewards/rejected": -0.012867462821304798,
"step": 910
},
{
"epoch": 2.75243081525804,
"grad_norm": 14.5625,
"learning_rate": 1.014909457081664e-08,
"logits/chosen": -18.285552978515625,
"logits/rejected": -18.188400268554688,
"logps/chosen": -495.7474670410156,
"logps/rejected": -482.32086181640625,
"loss": 0.7018,
"rewards/accuracies": 0.4281249940395355,
"rewards/chosen": -0.014425873756408691,
"rewards/margins": -0.012442706152796745,
"rewards/rejected": -0.0019831680692732334,
"step": 920
},
{
"epoch": 2.7823485415108453,
"grad_norm": 13.625,
"learning_rate": 7.836876578755758e-09,
"logits/chosen": -18.36880874633789,
"logits/rejected": -18.273122787475586,
"logps/chosen": -505.2574157714844,
"logps/rejected": -478.4847717285156,
"loss": 0.6956,
"rewards/accuracies": 0.4859375059604645,
"rewards/chosen": -0.005088160280138254,
"rewards/margins": 0.00010951366130029783,
"rewards/rejected": -0.005197672639042139,
"step": 930
},
{
"epoch": 2.81226626776365,
"grad_norm": 13.125,
"learning_rate": 5.819042566618831e-09,
"logits/chosen": -18.473098754882812,
"logits/rejected": -18.300846099853516,
"logps/chosen": -503.1376953125,
"logps/rejected": -475.238037109375,
"loss": 0.6943,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": 0.0007304133614525199,
"rewards/margins": 0.002083559287711978,
"rewards/rejected": -0.00135314604267478,
"step": 940
},
{
"epoch": 2.842183994016455,
"grad_norm": 12.625,
"learning_rate": 4.098045500815106e-09,
"logits/chosen": -18.308658599853516,
"logits/rejected": -18.19416046142578,
"logps/chosen": -495.00604248046875,
"logps/rejected": -475.799072265625,
"loss": 0.696,
"rewards/accuracies": 0.44843751192092896,
"rewards/chosen": -0.004273262806236744,
"rewards/margins": -0.0009155833977274597,
"rewards/rejected": -0.0033576800487935543,
"step": 950
},
{
"epoch": 2.8721017202692596,
"grad_norm": 14.5625,
"learning_rate": 2.6759774999071673e-09,
"logits/chosen": -18.59539794921875,
"logits/rejected": -18.472515106201172,
"logps/chosen": -514.5612182617188,
"logps/rejected": -492.31890869140625,
"loss": 0.6964,
"rewards/accuracies": 0.4515624940395355,
"rewards/chosen": -0.0067207301035523415,
"rewards/margins": -0.0017061748076230288,
"rewards/rejected": -0.005014555994421244,
"step": 960
},
{
"epoch": 2.9020194465220643,
"grad_norm": 11.625,
"learning_rate": 1.5545672913407914e-09,
"logits/chosen": -18.352832794189453,
"logits/rejected": -18.20647621154785,
"logps/chosen": -501.56317138671875,
"logps/rejected": -487.9432678222656,
"loss": 0.6959,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.013429331593215466,
"rewards/margins": -0.0001061473521986045,
"rewards/rejected": -0.013323183171451092,
"step": 970
},
{
"epoch": 2.931937172774869,
"grad_norm": 19.5,
"learning_rate": 7.351781099292353e-10,
"logits/chosen": -18.442501068115234,
"logits/rejected": -18.37118148803711,
"logps/chosen": -501.3379821777344,
"logps/rejected": -496.15924072265625,
"loss": 0.7002,
"rewards/accuracies": 0.45625001192092896,
"rewards/chosen": -0.01971256360411644,
"rewards/margins": -0.00949421338737011,
"rewards/rejected": -0.01021835207939148,
"step": 980
},
{
"epoch": 2.961854899027674,
"grad_norm": 11.5,
"learning_rate": 2.1880604064578035e-10,
"logits/chosen": -18.448497772216797,
"logits/rejected": -18.216806411743164,
"logps/chosen": -508.0194396972656,
"logps/rejected": -466.27301025390625,
"loss": 0.6949,
"rewards/accuracies": 0.484375,
"rewards/chosen": -0.0018872346263378859,
"rewards/margins": 0.0016591917956247926,
"rewards/rejected": -0.003546426072716713,
"step": 990
},
{
"epoch": 2.9917726252804786,
"grad_norm": 19.5,
"learning_rate": 6.07880773983993e-12,
"logits/chosen": -18.198387145996094,
"logits/rejected": -18.139768600463867,
"logps/chosen": -483.9991149902344,
"logps/rejected": -473.9156188964844,
"loss": 0.6964,
"rewards/accuracies": 0.45781248807907104,
"rewards/chosen": -0.009544356726109982,
"rewards/margins": -0.0014166312757879496,
"rewards/rejected": -0.00812772661447525,
"step": 1000
},
{
"epoch": 2.9917726252804786,
"eval_logits/chosen": -18.481870651245117,
"eval_logits/rejected": -18.36333465576172,
"eval_logps/chosen": -494.2957763671875,
"eval_logps/rejected": -478.8612060546875,
"eval_loss": 0.6955399513244629,
"eval_rewards/accuracies": 0.48131540417671204,
"eval_rewards/chosen": -0.007866346277296543,
"eval_rewards/margins": 8.37442566989921e-05,
"eval_rewards/rejected": -0.007950090803205967,
"eval_runtime": 843.4786,
"eval_samples_per_second": 6.34,
"eval_steps_per_second": 0.793,
"step": 1000
},
{
"epoch": 2.99775617053104,
"step": 1002,
"total_flos": 0.0,
"train_loss": 0.6953648589328377,
"train_runtime": 32693.7742,
"train_samples_per_second": 1.963,
"train_steps_per_second": 0.031
}
],
"logging_steps": 10,
"max_steps": 1002,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}