|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"eval_steps": 1000, |
|
"global_step": 1250, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.008, |
|
"grad_norm": 7.598788468990178, |
|
"learning_rate": 8e-08, |
|
"logits/chosen": -1.7671998739242554, |
|
"logits/rejected": -2.2639822959899902, |
|
"logps/chosen": -46.430763244628906, |
|
"logps/rejected": -102.85381317138672, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 8.744481268270988, |
|
"learning_rate": 8.000000000000001e-07, |
|
"logits/chosen": -1.6799863576889038, |
|
"logits/rejected": -2.1152544021606445, |
|
"logps/chosen": -50.29410934448242, |
|
"logps/rejected": -101.58956909179688, |
|
"loss": 0.6791, |
|
"rewards/accuracies": 0.7291666865348816, |
|
"rewards/chosen": 0.0007337681599892676, |
|
"rewards/margins": 0.02895486354827881, |
|
"rewards/rejected": -0.028221096843481064, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 3.572341321811657, |
|
"learning_rate": 1.6000000000000001e-06, |
|
"logits/chosen": -1.558701992034912, |
|
"logits/rejected": -2.037860870361328, |
|
"logps/chosen": -41.180152893066406, |
|
"logps/rejected": -187.47267150878906, |
|
"loss": 0.4012, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": 0.05867023020982742, |
|
"rewards/margins": 0.8922162055969238, |
|
"rewards/rejected": -0.833545982837677, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 4.668066827355425, |
|
"learning_rate": 2.4000000000000003e-06, |
|
"logits/chosen": -1.24845290184021, |
|
"logits/rejected": -1.647834062576294, |
|
"logps/chosen": -57.645042419433594, |
|
"logps/rejected": -346.35516357421875, |
|
"loss": 0.2058, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": -0.06952311843633652, |
|
"rewards/margins": 2.4007816314697266, |
|
"rewards/rejected": -2.470304489135742, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 3.5896956533073925, |
|
"learning_rate": 3.2000000000000003e-06, |
|
"logits/chosen": -0.7071009874343872, |
|
"logits/rejected": -1.1559783220291138, |
|
"logps/chosen": -38.179378509521484, |
|
"logps/rejected": -475.6770935058594, |
|
"loss": 0.158, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": 0.1055324524641037, |
|
"rewards/margins": 3.844930648803711, |
|
"rewards/rejected": -3.739398241043091, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 6.069124428046055, |
|
"learning_rate": 4.000000000000001e-06, |
|
"logits/chosen": -0.44346046447753906, |
|
"logits/rejected": -0.9727121591567993, |
|
"logps/chosen": -35.12229919433594, |
|
"logps/rejected": -532.2540893554688, |
|
"loss": 0.1132, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": 0.13596299290657043, |
|
"rewards/margins": 4.437729358673096, |
|
"rewards/rejected": -4.301766395568848, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 2.7055226938318175, |
|
"learning_rate": 4.800000000000001e-06, |
|
"logits/chosen": -0.20063920319080353, |
|
"logits/rejected": -0.6503936648368835, |
|
"logps/chosen": -47.3806037902832, |
|
"logps/rejected": -589.556396484375, |
|
"loss": 0.0932, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 0.03769879788160324, |
|
"rewards/margins": 4.943137168884277, |
|
"rewards/rejected": -4.9054388999938965, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.25261425361308987, |
|
"learning_rate": 5.600000000000001e-06, |
|
"logits/chosen": -0.23743292689323425, |
|
"logits/rejected": -0.7351670861244202, |
|
"logps/chosen": -63.814796447753906, |
|
"logps/rejected": -670.7484741210938, |
|
"loss": 0.0621, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -0.13264700770378113, |
|
"rewards/margins": 5.5608930587768555, |
|
"rewards/rejected": -5.693540573120117, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 6.481510280384696, |
|
"learning_rate": 6.4000000000000006e-06, |
|
"logits/chosen": -0.03323043882846832, |
|
"logits/rejected": -0.603085994720459, |
|
"logps/chosen": -121.7635498046875, |
|
"logps/rejected": -793.174560546875, |
|
"loss": 0.0371, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -0.7093967199325562, |
|
"rewards/margins": 6.21297550201416, |
|
"rewards/rejected": -6.922372341156006, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.9177658561115962, |
|
"learning_rate": 7.2000000000000005e-06, |
|
"logits/chosen": 0.006849220488220453, |
|
"logits/rejected": -0.6091193556785583, |
|
"logps/chosen": -155.50479125976562, |
|
"logps/rejected": -916.3829345703125, |
|
"loss": 0.0084, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0437133312225342, |
|
"rewards/margins": 7.108320713043213, |
|
"rewards/rejected": -8.152033805847168, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.06845804795916455, |
|
"learning_rate": 8.000000000000001e-06, |
|
"logits/chosen": 0.13967576622962952, |
|
"logits/rejected": -0.4425061345100403, |
|
"logps/chosen": -133.66134643554688, |
|
"logps/rejected": -994.0302734375, |
|
"loss": 0.0126, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -0.8357334136962891, |
|
"rewards/margins": 8.085713386535645, |
|
"rewards/rejected": -8.921446800231934, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 5.6107817864989356, |
|
"learning_rate": 8.8e-06, |
|
"logits/chosen": 0.22975310683250427, |
|
"logits/rejected": -0.414044052362442, |
|
"logps/chosen": -106.53846740722656, |
|
"logps/rejected": -1022.2738037109375, |
|
"loss": 0.0208, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -0.5662684440612793, |
|
"rewards/margins": 8.641936302185059, |
|
"rewards/rejected": -9.20820426940918, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.01682091967533478, |
|
"learning_rate": 9.600000000000001e-06, |
|
"logits/chosen": 0.3314499855041504, |
|
"logits/rejected": -0.5161079168319702, |
|
"logps/chosen": -112.84576416015625, |
|
"logps/rejected": -1047.9407958984375, |
|
"loss": 0.0003, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.6609307527542114, |
|
"rewards/margins": 8.777119636535645, |
|
"rewards/rejected": -9.438051223754883, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 2.1463004224597944, |
|
"learning_rate": 9.999512620046523e-06, |
|
"logits/chosen": 0.020063649863004684, |
|
"logits/rejected": -0.6272026300430298, |
|
"logps/chosen": -129.10415649414062, |
|
"logps/rejected": -1125.7564697265625, |
|
"loss": 0.0126, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.7790005207061768, |
|
"rewards/margins": 9.478322982788086, |
|
"rewards/rejected": -10.257322311401367, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"grad_norm": 0.12425603579424757, |
|
"learning_rate": 9.995614150494293e-06, |
|
"logits/chosen": -0.6389492154121399, |
|
"logits/rejected": -1.3565341234207153, |
|
"logps/chosen": -101.65057373046875, |
|
"logps/rejected": -955.8663330078125, |
|
"loss": 0.0057, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.516974925994873, |
|
"rewards/margins": 8.029803276062012, |
|
"rewards/rejected": -8.546777725219727, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 0.013089818267371892, |
|
"learning_rate": 9.987820251299121e-06, |
|
"logits/chosen": -0.40948599576950073, |
|
"logits/rejected": -1.0153663158416748, |
|
"logps/chosen": -87.0394287109375, |
|
"logps/rejected": -1100.132568359375, |
|
"loss": 0.0004, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.3547939658164978, |
|
"rewards/margins": 9.640316009521484, |
|
"rewards/rejected": -9.995109558105469, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"grad_norm": 0.01621300125862196, |
|
"learning_rate": 9.976136999909156e-06, |
|
"logits/chosen": -0.7102096080780029, |
|
"logits/rejected": -1.363110065460205, |
|
"logps/chosen": -88.75753021240234, |
|
"logps/rejected": -1060.689208984375, |
|
"loss": 0.0052, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -0.3866530954837799, |
|
"rewards/margins": 9.206262588500977, |
|
"rewards/rejected": -9.592916488647461, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.3599999999999999, |
|
"grad_norm": 0.0026423921093196084, |
|
"learning_rate": 9.960573506572391e-06, |
|
"logits/chosen": -0.8623284101486206, |
|
"logits/rejected": -1.6030025482177734, |
|
"logps/chosen": -77.21472930908203, |
|
"logps/rejected": -1054.2359619140625, |
|
"loss": 0.0006, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.29167506098747253, |
|
"rewards/margins": 9.230081558227539, |
|
"rewards/rejected": -9.521757125854492, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"grad_norm": 0.003569937734506862, |
|
"learning_rate": 9.941141907232766e-06, |
|
"logits/chosen": -0.7774030566215515, |
|
"logits/rejected": -1.4183218479156494, |
|
"logps/chosen": -95.35404968261719, |
|
"logps/rejected": -1243.467529296875, |
|
"loss": 0.0003, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.45167961716651917, |
|
"rewards/margins": 10.976358413696289, |
|
"rewards/rejected": -11.428037643432617, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"grad_norm": 0.004345604176761015, |
|
"learning_rate": 9.91785735406693e-06, |
|
"logits/chosen": -0.6248332262039185, |
|
"logits/rejected": -1.2730491161346436, |
|
"logps/chosen": -98.45368957519531, |
|
"logps/rejected": -1197.553466796875, |
|
"loss": 0.007, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.48718851804733276, |
|
"rewards/margins": 10.482004165649414, |
|
"rewards/rejected": -10.969191551208496, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 0.6594773158021805, |
|
"learning_rate": 9.890738003669029e-06, |
|
"logits/chosen": -0.5561812520027161, |
|
"logits/rejected": -1.166264533996582, |
|
"logps/chosen": -94.01605224609375, |
|
"logps/rejected": -1177.841064453125, |
|
"loss": 0.0013, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.43924322724342346, |
|
"rewards/margins": 10.321309089660645, |
|
"rewards/rejected": -10.760551452636719, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.6800000000000002, |
|
"grad_norm": 0.014550855783893434, |
|
"learning_rate": 9.859805002892733e-06, |
|
"logits/chosen": -0.30123084783554077, |
|
"logits/rejected": -0.9458983540534973, |
|
"logps/chosen": -84.95938110351562, |
|
"logps/rejected": -1145.023681640625, |
|
"loss": 0.001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.35356029868125916, |
|
"rewards/margins": 10.082305908203125, |
|
"rewards/rejected": -10.435864448547363, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"grad_norm": 0.004377641691260569, |
|
"learning_rate": 9.825082472361558e-06, |
|
"logits/chosen": -0.36692723631858826, |
|
"logits/rejected": -1.0984880924224854, |
|
"logps/chosen": -58.7005615234375, |
|
"logps/rejected": -1094.305419921875, |
|
"loss": 0.0071, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.10480108112096786, |
|
"rewards/margins": 9.808042526245117, |
|
"rewards/rejected": -9.91284465789795, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.8399999999999999, |
|
"grad_norm": 0.018821455289326113, |
|
"learning_rate": 9.786597487660336e-06, |
|
"logits/chosen": -0.23969046771526337, |
|
"logits/rejected": -0.9250698089599609, |
|
"logps/chosen": -67.17304992675781, |
|
"logps/rejected": -1114.4503173828125, |
|
"loss": 0.0004, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.18792255222797394, |
|
"rewards/margins": 9.926887512207031, |
|
"rewards/rejected": -10.114809036254883, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"grad_norm": 0.017614530283405035, |
|
"learning_rate": 9.744380058222483e-06, |
|
"logits/chosen": -0.45373648405075073, |
|
"logits/rejected": -1.0718930959701538, |
|
"logps/chosen": -128.1130828857422, |
|
"logps/rejected": -1161.447509765625, |
|
"loss": 0.0039, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.774398684501648, |
|
"rewards/margins": 9.828145980834961, |
|
"rewards/rejected": -10.602544784545898, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.020987613586006735, |
|
"learning_rate": 9.698463103929542e-06, |
|
"logits/chosen": -0.34758588671684265, |
|
"logits/rejected": -0.9034555554389954, |
|
"logps/chosen": -164.2602996826172, |
|
"logps/rejected": -1219.2265625, |
|
"loss": 0.0004, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.1342051029205322, |
|
"rewards/margins": 10.054884910583496, |
|
"rewards/rejected": -11.189090728759766, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"grad_norm": 0.004509813266090499, |
|
"learning_rate": 9.648882429441258e-06, |
|
"logits/chosen": -0.07843346893787384, |
|
"logits/rejected": -0.7051417827606201, |
|
"logps/chosen": -154.61831665039062, |
|
"logps/rejected": -1228.2486572265625, |
|
"loss": 0.0014, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0459339618682861, |
|
"rewards/margins": 10.211647987365723, |
|
"rewards/rejected": -11.25758171081543, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"grad_norm": 0.10540035188708549, |
|
"learning_rate": 9.595676696276173e-06, |
|
"logits/chosen": -0.16252107918262482, |
|
"logits/rejected": -0.6854008436203003, |
|
"logps/chosen": -215.68759155273438, |
|
"logps/rejected": -1336.6497802734375, |
|
"loss": 0.0007, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.645402193069458, |
|
"rewards/margins": 10.722552299499512, |
|
"rewards/rejected": -12.367955207824707, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"grad_norm": 0.0014480877122579167, |
|
"learning_rate": 9.538887392664544e-06, |
|
"logits/chosen": -0.04962470754981041, |
|
"logits/rejected": -0.597977340221405, |
|
"logps/chosen": -182.0589599609375, |
|
"logps/rejected": -1372.7177734375, |
|
"loss": 0.0003, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.3132940530776978, |
|
"rewards/margins": 11.408597946166992, |
|
"rewards/rejected": -12.721891403198242, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"grad_norm": 0.0169674710944411, |
|
"learning_rate": 9.478558801197065e-06, |
|
"logits/chosen": -0.06320101767778397, |
|
"logits/rejected": -0.6581576466560364, |
|
"logps/chosen": -164.2813720703125, |
|
"logps/rejected": -1302.8653564453125, |
|
"loss": 0.0071, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -1.146195888519287, |
|
"rewards/margins": 10.875839233398438, |
|
"rewards/rejected": -12.0220365524292, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 0.0007002624784587229, |
|
"learning_rate": 9.414737964294636e-06, |
|
"logits/chosen": -0.18863362073898315, |
|
"logits/rejected": -0.7296714782714844, |
|
"logps/chosen": -155.30770874023438, |
|
"logps/rejected": -1376.25830078125, |
|
"loss": 0.0002, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0464993715286255, |
|
"rewards/margins": 11.702775001525879, |
|
"rewards/rejected": -12.749274253845215, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"grad_norm": 0.0005702077735596562, |
|
"learning_rate": 9.347474647526095e-06, |
|
"logits/chosen": -0.2102803885936737, |
|
"logits/rejected": -0.6938263773918152, |
|
"logps/chosen": -163.68673706054688, |
|
"logps/rejected": -1451.975830078125, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.128525972366333, |
|
"rewards/margins": 12.382759094238281, |
|
"rewards/rejected": -13.511285781860352, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"grad_norm": 0.0011316887816189948, |
|
"learning_rate": 9.276821300802535e-06, |
|
"logits/chosen": -0.20408546924591064, |
|
"logits/rejected": -0.7608290910720825, |
|
"logps/chosen": -148.53030395507812, |
|
"logps/rejected": -1382.659423828125, |
|
"loss": 0.0002, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.995039165019989, |
|
"rewards/margins": 11.816390991210938, |
|
"rewards/rejected": -12.811429977416992, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"grad_norm": 0.0028257198320044193, |
|
"learning_rate": 9.202833017478421e-06, |
|
"logits/chosen": -0.23039910197257996, |
|
"logits/rejected": -0.8503023386001587, |
|
"logps/chosen": -127.6865234375, |
|
"logps/rejected": -1280.506591796875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.78924959897995, |
|
"rewards/margins": 10.988354682922363, |
|
"rewards/rejected": -11.777605056762695, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.7199999999999998, |
|
"grad_norm": 0.03275958999147167, |
|
"learning_rate": 9.125567491391476e-06, |
|
"logits/chosen": -0.16481304168701172, |
|
"logits/rejected": -0.6871527433395386, |
|
"logps/chosen": -165.09945678710938, |
|
"logps/rejected": -1415.8587646484375, |
|
"loss": 0.0019, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.147678017616272, |
|
"rewards/margins": 12.000423431396484, |
|
"rewards/rejected": -13.148101806640625, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"grad_norm": 0.040681404406324485, |
|
"learning_rate": 9.045084971874738e-06, |
|
"logits/chosen": -0.2651657462120056, |
|
"logits/rejected": -0.8547052145004272, |
|
"logps/chosen": -169.6096954345703, |
|
"logps/rejected": -1442.6204833984375, |
|
"loss": 0.0005, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.185150384902954, |
|
"rewards/margins": 12.228256225585938, |
|
"rewards/rejected": -13.413406372070312, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"grad_norm": 0.0005914776735028229, |
|
"learning_rate": 8.961448216775955e-06, |
|
"logits/chosen": -0.3672965168952942, |
|
"logits/rejected": -0.9414142370223999, |
|
"logps/chosen": -155.79690551757812, |
|
"logps/rejected": -1344.467529296875, |
|
"loss": 0.0003, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0553600788116455, |
|
"rewards/margins": 11.368631362915039, |
|
"rewards/rejected": -12.423991203308105, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"grad_norm": 0.0005670082430647187, |
|
"learning_rate": 8.874722443520898e-06, |
|
"logits/chosen": -0.3921353816986084, |
|
"logits/rejected": -0.8953983187675476, |
|
"logps/chosen": -158.56431579589844, |
|
"logps/rejected": -1356.921875, |
|
"loss": 0.0007, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.090203046798706, |
|
"rewards/margins": 11.472868919372559, |
|
"rewards/rejected": -12.563072204589844, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"grad_norm": 0.005160483245130726, |
|
"learning_rate": 8.784975278258783e-06, |
|
"logits/chosen": -0.5358741879463196, |
|
"logits/rejected": -1.061417579650879, |
|
"logps/chosen": -175.1329803466797, |
|
"logps/rejected": -1406.556396484375, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.2551603317260742, |
|
"rewards/margins": 11.804767608642578, |
|
"rewards/rejected": -13.059926986694336, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"grad_norm": 0.006657927449314328, |
|
"learning_rate": 8.692276703129421e-06, |
|
"logits/chosen": -0.6002996563911438, |
|
"logits/rejected": -1.1299346685409546, |
|
"logps/chosen": -152.69854736328125, |
|
"logps/rejected": -1418.118896484375, |
|
"loss": 0.0002, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0260169506072998, |
|
"rewards/margins": 12.134782791137695, |
|
"rewards/rejected": -13.160799980163574, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"grad_norm": 0.00039712494546681335, |
|
"learning_rate": 8.596699001693257e-06, |
|
"logits/chosen": -0.6303237676620483, |
|
"logits/rejected": -1.101627230644226, |
|
"logps/chosen": -196.59848022460938, |
|
"logps/rejected": -1504.538818359375, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.4475935697555542, |
|
"rewards/margins": 12.599725723266602, |
|
"rewards/rejected": -14.047317504882812, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.2800000000000002, |
|
"grad_norm": 0.0040631132506810195, |
|
"learning_rate": 8.498316702566828e-06, |
|
"logits/chosen": -0.5605727434158325, |
|
"logits/rejected": -1.073054313659668, |
|
"logps/chosen": -163.1142120361328, |
|
"logps/rejected": -1435.8837890625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.1389962434768677, |
|
"rewards/margins": 12.203179359436035, |
|
"rewards/rejected": -13.342175483703613, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"grad_norm": 0.041567571634384244, |
|
"learning_rate": 8.397206521307584e-06, |
|
"logits/chosen": -0.6039146780967712, |
|
"logits/rejected": -1.0801668167114258, |
|
"logps/chosen": -178.77394104003906, |
|
"logps/rejected": -1475.822998046875, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.293518304824829, |
|
"rewards/margins": 12.463233947753906, |
|
"rewards/rejected": -13.756753921508789, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"grad_norm": 0.0029527470935980476, |
|
"learning_rate": 8.293447300593402e-06, |
|
"logits/chosen": -0.6083961129188538, |
|
"logits/rejected": -1.0437417030334473, |
|
"logps/chosen": -192.32000732421875, |
|
"logps/rejected": -1542.182373046875, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.4102303981781006, |
|
"rewards/margins": 13.007156372070312, |
|
"rewards/rejected": -14.417387008666992, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"grad_norm": 0.0011462076542202355, |
|
"learning_rate": 8.18711994874345e-06, |
|
"logits/chosen": -0.5748112201690674, |
|
"logits/rejected": -0.9151349067687988, |
|
"logps/chosen": -222.9849395751953, |
|
"logps/rejected": -1637.6048583984375, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.6979871988296509, |
|
"rewards/margins": 13.692995071411133, |
|
"rewards/rejected": -15.390981674194336, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"grad_norm": 0.0010367050759902319, |
|
"learning_rate": 8.078307376628292e-06, |
|
"logits/chosen": -0.5503496527671814, |
|
"logits/rejected": -1.0760968923568726, |
|
"logps/chosen": -169.05398559570312, |
|
"logps/rejected": -1424.4925537109375, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.197346568107605, |
|
"rewards/margins": 12.040412902832031, |
|
"rewards/rejected": -13.237760543823242, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"grad_norm": 0.00030163796426365894, |
|
"learning_rate": 7.967094433018508e-06, |
|
"logits/chosen": -0.522539496421814, |
|
"logits/rejected": -1.063500165939331, |
|
"logps/chosen": -175.1356658935547, |
|
"logps/rejected": -1461.3223876953125, |
|
"loss": 0.0013, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -1.2569339275360107, |
|
"rewards/margins": 12.344822883605957, |
|
"rewards/rejected": -13.601758003234863, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"grad_norm": 0.017735619925505812, |
|
"learning_rate": 7.85356783842216e-06, |
|
"logits/chosen": -0.5384105443954468, |
|
"logits/rejected": -1.0146501064300537, |
|
"logps/chosen": -158.76834106445312, |
|
"logps/rejected": -1430.92041015625, |
|
"loss": 0.004, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0810316801071167, |
|
"rewards/margins": 12.214686393737793, |
|
"rewards/rejected": -13.2957181930542, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"grad_norm": 0.016912085649151143, |
|
"learning_rate": 7.737816117462752e-06, |
|
"logits/chosen": -0.6946359872817993, |
|
"logits/rejected": -1.0958452224731445, |
|
"logps/chosen": -171.2017364501953, |
|
"logps/rejected": -1454.8028564453125, |
|
"loss": 0.0002, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.2047702074050903, |
|
"rewards/margins": 12.34769058227539, |
|
"rewards/rejected": -13.552459716796875, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"grad_norm": 0.014315964975255087, |
|
"learning_rate": 7.619929529850397e-06, |
|
"logits/chosen": -0.6201928853988647, |
|
"logits/rejected": -1.0985499620437622, |
|
"logps/chosen": -137.38720703125, |
|
"logps/rejected": -1331.304443359375, |
|
"loss": 0.0151, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8758776783943176, |
|
"rewards/margins": 11.414178848266602, |
|
"rewards/rejected": -12.290056228637695, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.0008323648197206945, |
|
"learning_rate": 7.500000000000001e-06, |
|
"logits/chosen": -0.6446269750595093, |
|
"logits/rejected": -1.0706923007965088, |
|
"logps/chosen": -151.62704467773438, |
|
"logps/rejected": -1406.1658935546875, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0133395195007324, |
|
"rewards/margins": 12.042734146118164, |
|
"rewards/rejected": -13.056074142456055, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"grad_norm": 0.005439240404948527, |
|
"learning_rate": 7.378121045351378e-06, |
|
"logits/chosen": -0.6006187200546265, |
|
"logits/rejected": -0.9754472970962524, |
|
"logps/chosen": -175.7959747314453, |
|
"logps/rejected": -1473.20947265625, |
|
"loss": 0.0003, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.240095853805542, |
|
"rewards/margins": 12.511373519897461, |
|
"rewards/rejected": -13.75146770477295, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"grad_norm": 0.005118727114675882, |
|
"learning_rate": 7.254387703447154e-06, |
|
"logits/chosen": -0.6073562502861023, |
|
"logits/rejected": -1.0599517822265625, |
|
"logps/chosen": -158.0718994140625, |
|
"logps/rejected": -1458.5751953125, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0801844596862793, |
|
"rewards/margins": 12.502667427062988, |
|
"rewards/rejected": -13.582852363586426, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"grad_norm": 0.0006207044531316051, |
|
"learning_rate": 7.128896457825364e-06, |
|
"logits/chosen": -0.6554626226425171, |
|
"logits/rejected": -1.1788212060928345, |
|
"logps/chosen": -147.23751831054688, |
|
"logps/rejected": -1414.2178955078125, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.9758721590042114, |
|
"rewards/margins": 12.157670974731445, |
|
"rewards/rejected": -13.133543014526367, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"grad_norm": 0.017948964858749528, |
|
"learning_rate": 7.0017451627844765e-06, |
|
"logits/chosen": -0.6513161063194275, |
|
"logits/rejected": -1.2309751510620117, |
|
"logps/chosen": -156.2422637939453, |
|
"logps/rejected": -1466.906494140625, |
|
"loss": 0.0003, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0636872053146362, |
|
"rewards/margins": 12.582140922546387, |
|
"rewards/rejected": -13.645828247070312, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"grad_norm": 0.003856347784286379, |
|
"learning_rate": 6.873032967079562e-06, |
|
"logits/chosen": -0.6816533803939819, |
|
"logits/rejected": -1.114074468612671, |
|
"logps/chosen": -242.92758178710938, |
|
"logps/rejected": -1675.7265625, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.8909059762954712, |
|
"rewards/margins": 13.899398803710938, |
|
"rewards/rejected": -15.790304183959961, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"grad_norm": 0.0019510230690865061, |
|
"learning_rate": 6.7428602366090764e-06, |
|
"logits/chosen": -0.4731020927429199, |
|
"logits/rejected": -0.9437645673751831, |
|
"logps/chosen": -153.3728790283203, |
|
"logps/rejected": -1450.95458984375, |
|
"loss": 0.0002, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0217416286468506, |
|
"rewards/margins": 12.475053787231445, |
|
"rewards/rejected": -13.496793746948242, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 4.5600000000000005, |
|
"grad_norm": 0.0015858533428631317, |
|
"learning_rate": 6.611328476152557e-06, |
|
"logits/chosen": -0.45364946126937866, |
|
"logits/rejected": -0.953697681427002, |
|
"logps/chosen": -169.60000610351562, |
|
"logps/rejected": -1481.096435546875, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.1921452283859253, |
|
"rewards/margins": 12.605159759521484, |
|
"rewards/rejected": -13.797304153442383, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"grad_norm": 0.004170987996850058, |
|
"learning_rate": 6.4785402502202345e-06, |
|
"logits/chosen": -0.3475385904312134, |
|
"logits/rejected": -0.9073439836502075, |
|
"logps/chosen": -140.90237426757812, |
|
"logps/rejected": -1381.1484375, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.921575665473938, |
|
"rewards/margins": 11.858769416809082, |
|
"rewards/rejected": -12.78034496307373, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"grad_norm": 0.0013538463974769534, |
|
"learning_rate": 6.344599103076329e-06, |
|
"logits/chosen": -0.3682401776313782, |
|
"logits/rejected": -0.9007229804992676, |
|
"logps/chosen": -147.44747924804688, |
|
"logps/rejected": -1403.5577392578125, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.9861438870429993, |
|
"rewards/margins": 12.035198211669922, |
|
"rewards/rejected": -13.021342277526855, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"grad_norm": 0.022561866804519767, |
|
"learning_rate": 6.209609477998339e-06, |
|
"logits/chosen": -0.4450947642326355, |
|
"logits/rejected": -0.9426549077033997, |
|
"logps/chosen": -155.5927276611328, |
|
"logps/rejected": -1427.051513671875, |
|
"loss": 0.0002, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.061085820198059, |
|
"rewards/margins": 12.196368217468262, |
|
"rewards/rejected": -13.257455825805664, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"grad_norm": 0.012428458617134987, |
|
"learning_rate": 6.073676635835317e-06, |
|
"logits/chosen": -0.47192803025245667, |
|
"logits/rejected": -0.9936426281929016, |
|
"logps/chosen": -163.2184295654297, |
|
"logps/rejected": -1448.4224853515625, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.1418524980545044, |
|
"rewards/margins": 12.326138496398926, |
|
"rewards/rejected": -13.467989921569824, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"grad_norm": 0.00027433530717749753, |
|
"learning_rate": 5.936906572928625e-06, |
|
"logits/chosen": -0.4440648555755615, |
|
"logits/rejected": -0.9577704668045044, |
|
"logps/chosen": -123.54743957519531, |
|
"logps/rejected": -1388.123779296875, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.7459378838539124, |
|
"rewards/margins": 12.11369514465332, |
|
"rewards/rejected": -12.85963249206543, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 5.04, |
|
"grad_norm": 0.0003376506922564907, |
|
"learning_rate": 5.799405938459175e-06, |
|
"logits/chosen": -0.449303537607193, |
|
"logits/rejected": -0.9946249723434448, |
|
"logps/chosen": -126.98262023925781, |
|
"logps/rejected": -1411.4127197265625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.7872297763824463, |
|
"rewards/margins": 12.30899715423584, |
|
"rewards/rejected": -13.096226692199707, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"grad_norm": 0.005188794996734725, |
|
"learning_rate": 5.661281951285613e-06, |
|
"logits/chosen": -0.39260971546173096, |
|
"logits/rejected": -0.8862244486808777, |
|
"logps/chosen": -139.25743103027344, |
|
"logps/rejected": -1483.836669921875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8930659294128418, |
|
"rewards/margins": 12.942354202270508, |
|
"rewards/rejected": -13.835420608520508, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"grad_norm": 0.0005439385645287033, |
|
"learning_rate": 5.522642316338268e-06, |
|
"logits/chosen": -0.42691946029663086, |
|
"logits/rejected": -0.9130016565322876, |
|
"logps/chosen": -146.08859252929688, |
|
"logps/rejected": -1495.5863037109375, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.9477936625480652, |
|
"rewards/margins": 13.005441665649414, |
|
"rewards/rejected": -13.953234672546387, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 5.28, |
|
"grad_norm": 0.000583833541599897, |
|
"learning_rate": 5.383595140634093e-06, |
|
"logits/chosen": -0.34127289056777954, |
|
"logits/rejected": -0.829853892326355, |
|
"logps/chosen": -133.29141235351562, |
|
"logps/rejected": -1512.446044921875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8321868777275085, |
|
"rewards/margins": 13.277082443237305, |
|
"rewards/rejected": -14.109270095825195, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"grad_norm": 0.005030134807238761, |
|
"learning_rate": 5.244248848978067e-06, |
|
"logits/chosen": -0.3165265917778015, |
|
"logits/rejected": -0.9533147811889648, |
|
"logps/chosen": -104.08113098144531, |
|
"logps/rejected": -1304.6605224609375, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.5602812170982361, |
|
"rewards/margins": 11.446382522583008, |
|
"rewards/rejected": -12.00666332244873, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 5.44, |
|
"grad_norm": 0.003070807279465456, |
|
"learning_rate": 5.1047120994167855e-06, |
|
"logits/chosen": -0.36942166090011597, |
|
"logits/rejected": -0.9378865957260132, |
|
"logps/chosen": -122.25132751464844, |
|
"logps/rejected": -1403.7523193359375, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.7320325970649719, |
|
"rewards/margins": 12.275636672973633, |
|
"rewards/rejected": -13.007669448852539, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 5.52, |
|
"grad_norm": 0.0008123370720346338, |
|
"learning_rate": 4.965093698510192e-06, |
|
"logits/chosen": -0.3969612419605255, |
|
"logits/rejected": -0.9130090475082397, |
|
"logps/chosen": -164.13131713867188, |
|
"logps/rejected": -1582.1212158203125, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.1271440982818604, |
|
"rewards/margins": 13.687349319458008, |
|
"rewards/rejected": -14.814491271972656, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"grad_norm": 0.0008344763947590128, |
|
"learning_rate": 4.825502516487497e-06, |
|
"logits/chosen": -0.3772839903831482, |
|
"logits/rejected": -0.8978067636489868, |
|
"logps/chosen": -145.3348846435547, |
|
"logps/rejected": -1506.9521484375, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.9458595514297485, |
|
"rewards/margins": 13.112927436828613, |
|
"rewards/rejected": -14.05878734588623, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"grad_norm": 0.0007531302584682961, |
|
"learning_rate": 4.686047402353433e-06, |
|
"logits/chosen": -0.45077744126319885, |
|
"logits/rejected": -1.0165283679962158, |
|
"logps/chosen": -132.34994506835938, |
|
"logps/rejected": -1427.802734375, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8286944627761841, |
|
"rewards/margins": 12.429733276367188, |
|
"rewards/rejected": -13.258428573608398, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 5.76, |
|
"grad_norm": 0.0003263470597252238, |
|
"learning_rate": 4.546837099011101e-06, |
|
"logits/chosen": -0.43557509779930115, |
|
"logits/rejected": -0.9456827044487, |
|
"logps/chosen": -133.05575561523438, |
|
"logps/rejected": -1460.742431640625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.832891583442688, |
|
"rewards/margins": 12.750478744506836, |
|
"rewards/rejected": -13.58337116241455, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 5.84, |
|
"grad_norm": 0.0003894890886611588, |
|
"learning_rate": 4.4079801584674955e-06, |
|
"logits/chosen": -0.39473429322242737, |
|
"logits/rejected": -0.9540184140205383, |
|
"logps/chosen": -120.5350341796875, |
|
"logps/rejected": -1388.319580078125, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.7102386951446533, |
|
"rewards/margins": 12.142650604248047, |
|
"rewards/rejected": -12.852888107299805, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 5.92, |
|
"grad_norm": 0.00021687658056609183, |
|
"learning_rate": 4.269584857187942e-06, |
|
"logits/chosen": -0.41277170181274414, |
|
"logits/rejected": -0.8866747617721558, |
|
"logps/chosen": -169.7379150390625, |
|
"logps/rejected": -1618.2318115234375, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.1842690706253052, |
|
"rewards/margins": 13.998774528503418, |
|
"rewards/rejected": -15.183042526245117, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 0.00017363950347948243, |
|
"learning_rate": 4.131759111665349e-06, |
|
"logits/chosen": -0.380937397480011, |
|
"logits/rejected": -0.941672146320343, |
|
"logps/chosen": -138.58726501464844, |
|
"logps/rejected": -1467.87109375, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8956032991409302, |
|
"rewards/margins": 12.76499080657959, |
|
"rewards/rejected": -13.66059398651123, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"grad_norm": 0.0002869387785848348, |
|
"learning_rate": 3.994610394270178e-06, |
|
"logits/chosen": -0.3407558500766754, |
|
"logits/rejected": -0.9460574984550476, |
|
"logps/chosen": -120.59251403808594, |
|
"logps/rejected": -1365.903564453125, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.7153140306472778, |
|
"rewards/margins": 11.916279792785645, |
|
"rewards/rejected": -12.631593704223633, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 6.16, |
|
"grad_norm": 0.00022830076793507427, |
|
"learning_rate": 3.8582456494467214e-06, |
|
"logits/chosen": -0.3290683329105377, |
|
"logits/rejected": -0.8766037225723267, |
|
"logps/chosen": -137.58761596679688, |
|
"logps/rejected": -1491.4849853515625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8715416789054871, |
|
"rewards/margins": 13.01722240447998, |
|
"rewards/rejected": -13.888763427734375, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 6.24, |
|
"grad_norm": 0.00018905031588614676, |
|
"learning_rate": 3.7227712103210485e-06, |
|
"logits/chosen": -0.4009069502353668, |
|
"logits/rejected": -0.8301585912704468, |
|
"logps/chosen": -168.92832946777344, |
|
"logps/rejected": -1632.318115234375, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.167081594467163, |
|
"rewards/margins": 14.170913696289062, |
|
"rewards/rejected": -15.337995529174805, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 6.32, |
|
"grad_norm": 0.00044615726295310295, |
|
"learning_rate": 3.5882927157856175e-06, |
|
"logits/chosen": -0.3509620130062103, |
|
"logits/rejected": -0.8593319654464722, |
|
"logps/chosen": -129.48745727539062, |
|
"logps/rejected": -1498.2364501953125, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.7902848720550537, |
|
"rewards/margins": 13.172172546386719, |
|
"rewards/rejected": -13.962457656860352, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"grad_norm": 0.00017684322493448067, |
|
"learning_rate": 3.4549150281252635e-06, |
|
"logits/chosen": -0.4214898645877838, |
|
"logits/rejected": -0.8738347887992859, |
|
"logps/chosen": -171.40628051757812, |
|
"logps/rejected": -1577.4674072265625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.1943633556365967, |
|
"rewards/margins": 13.582929611206055, |
|
"rewards/rejected": -14.77729320526123, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 6.48, |
|
"grad_norm": 0.00035092578034022254, |
|
"learning_rate": 3.322742151248726e-06, |
|
"logits/chosen": -0.33817845582962036, |
|
"logits/rejected": -0.8328613042831421, |
|
"logps/chosen": -131.61691284179688, |
|
"logps/rejected": -1441.378662109375, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8236126899719238, |
|
"rewards/margins": 12.581751823425293, |
|
"rewards/rejected": -13.405363082885742, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 6.5600000000000005, |
|
"grad_norm": 0.0016578838421130056, |
|
"learning_rate": 3.1918771495895395e-06, |
|
"logits/chosen": -0.3588394224643707, |
|
"logits/rejected": -0.890457034111023, |
|
"logps/chosen": -147.90191650390625, |
|
"logps/rejected": -1499.8687744140625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.9654636383056641, |
|
"rewards/margins": 13.020975112915039, |
|
"rewards/rejected": -13.986437797546387, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 6.64, |
|
"grad_norm": 0.0001813290316556118, |
|
"learning_rate": 3.0624220677394854e-06, |
|
"logits/chosen": -0.4224972724914551, |
|
"logits/rejected": -0.9041132926940918, |
|
"logps/chosen": -146.0318145751953, |
|
"logps/rejected": -1597.461669921875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.9522792100906372, |
|
"rewards/margins": 14.007960319519043, |
|
"rewards/rejected": -14.960238456726074, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 6.72, |
|
"grad_norm": 0.007917608250023045, |
|
"learning_rate": 2.934477850877292e-06, |
|
"logits/chosen": -0.36725106835365295, |
|
"logits/rejected": -0.8673862218856812, |
|
"logps/chosen": -137.09661865234375, |
|
"logps/rejected": -1493.104248046875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8727433085441589, |
|
"rewards/margins": 13.050328254699707, |
|
"rewards/rejected": -13.923070907592773, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"grad_norm": 0.00018574879436063482, |
|
"learning_rate": 2.8081442660546126e-06, |
|
"logits/chosen": -0.3642883896827698, |
|
"logits/rejected": -0.9518113136291504, |
|
"logps/chosen": -126.36039733886719, |
|
"logps/rejected": -1447.3575439453125, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.7840255498886108, |
|
"rewards/margins": 12.662456512451172, |
|
"rewards/rejected": -13.446481704711914, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"grad_norm": 0.00015909977255206564, |
|
"learning_rate": 2.683519824400693e-06, |
|
"logits/chosen": -0.292142391204834, |
|
"logits/rejected": -0.8652153015136719, |
|
"logps/chosen": -124.55448150634766, |
|
"logps/rejected": -1406.6923828125, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.7584154605865479, |
|
"rewards/margins": 12.294742584228516, |
|
"rewards/rejected": -13.0531587600708, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 6.96, |
|
"grad_norm": 0.0001625250801281625, |
|
"learning_rate": 2.560701704306336e-06, |
|
"logits/chosen": -0.34566569328308105, |
|
"logits/rejected": -0.8776302337646484, |
|
"logps/chosen": -149.78610229492188, |
|
"logps/rejected": -1521.619384765625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.9827316999435425, |
|
"rewards/margins": 13.225728988647461, |
|
"rewards/rejected": -14.208460807800293, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 7.04, |
|
"grad_norm": 0.0001369508246975124, |
|
"learning_rate": 2.4397856756471435e-06, |
|
"logits/chosen": -0.32312315702438354, |
|
"logits/rejected": -0.8410239219665527, |
|
"logps/chosen": -129.86459350585938, |
|
"logps/rejected": -1484.922607421875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8025870323181152, |
|
"rewards/margins": 13.027560234069824, |
|
"rewards/rejected": -13.830146789550781, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 7.12, |
|
"grad_norm": 0.00018364985576119344, |
|
"learning_rate": 2.320866025105016e-06, |
|
"logits/chosen": -0.3720766007900238, |
|
"logits/rejected": -0.8578876256942749, |
|
"logps/chosen": -173.00662231445312, |
|
"logps/rejected": -1543.7581787109375, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.2114688158035278, |
|
"rewards/margins": 13.233538627624512, |
|
"rewards/rejected": -14.445009231567383, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"grad_norm": 0.0008614496535749721, |
|
"learning_rate": 2.204035482646267e-06, |
|
"logits/chosen": -0.34498947858810425, |
|
"logits/rejected": -0.8200048208236694, |
|
"logps/chosen": -159.82220458984375, |
|
"logps/rejected": -1630.5267333984375, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0811651945114136, |
|
"rewards/margins": 14.221280097961426, |
|
"rewards/rejected": -15.302446365356445, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 7.28, |
|
"grad_norm": 0.00012796605758885017, |
|
"learning_rate": 2.0893851492135536e-06, |
|
"logits/chosen": -0.4566912055015564, |
|
"logits/rejected": -0.9449372291564941, |
|
"logps/chosen": -158.0172119140625, |
|
"logps/rejected": -1570.6591796875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0653969049453735, |
|
"rewards/margins": 13.639493942260742, |
|
"rewards/rejected": -14.7048921585083, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 7.36, |
|
"grad_norm": 0.002657232072751586, |
|
"learning_rate": 1.977004425688126e-06, |
|
"logits/chosen": -0.4158572554588318, |
|
"logits/rejected": -0.9982155561447144, |
|
"logps/chosen": -130.85671997070312, |
|
"logps/rejected": -1492.7935791015625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8182401657104492, |
|
"rewards/margins": 13.083892822265625, |
|
"rewards/rejected": -13.902132987976074, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 7.44, |
|
"grad_norm": 0.00015579327036541104, |
|
"learning_rate": 1.8669809431776991e-06, |
|
"logits/chosen": -0.40051618218421936, |
|
"logits/rejected": -0.930172324180603, |
|
"logps/chosen": -153.02651977539062, |
|
"logps/rejected": -1536.75732421875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0270575284957886, |
|
"rewards/margins": 13.345794677734375, |
|
"rewards/rejected": -14.372851371765137, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 7.52, |
|
"grad_norm": 0.006509186534878427, |
|
"learning_rate": 1.7594004946843458e-06, |
|
"logits/chosen": -0.3410387635231018, |
|
"logits/rejected": -0.8968143463134766, |
|
"logps/chosen": -131.83535766601562, |
|
"logps/rejected": -1495.5213623046875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8293743133544922, |
|
"rewards/margins": 13.11143970489502, |
|
"rewards/rejected": -13.940814018249512, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 7.6, |
|
"grad_norm": 0.0001596335948222842, |
|
"learning_rate": 1.6543469682057105e-06, |
|
"logits/chosen": -0.3258531987667084, |
|
"logits/rejected": -0.8879348635673523, |
|
"logps/chosen": -116.85892486572266, |
|
"logps/rejected": -1435.84912109375, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.6833716034889221, |
|
"rewards/margins": 12.654440879821777, |
|
"rewards/rejected": -13.337811470031738, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 7.68, |
|
"grad_norm": 0.00016669337554250593, |
|
"learning_rate": 1.551902281321651e-06, |
|
"logits/chosen": -0.3701092600822449, |
|
"logits/rejected": -0.9417698979377747, |
|
"logps/chosen": -125.5888671875, |
|
"logps/rejected": -1435.673095703125, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.7691789865493774, |
|
"rewards/margins": 12.566195487976074, |
|
"rewards/rejected": -13.33537483215332, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 7.76, |
|
"grad_norm": 0.00013859610902316376, |
|
"learning_rate": 1.4521463173173966e-06, |
|
"logits/chosen": -0.3638702929019928, |
|
"logits/rejected": -0.8297930955886841, |
|
"logps/chosen": -154.0557403564453, |
|
"logps/rejected": -1617.8785400390625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.033390998840332, |
|
"rewards/margins": 14.146600723266602, |
|
"rewards/rejected": -15.179990768432617, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 7.84, |
|
"grad_norm": 0.00012978293834875766, |
|
"learning_rate": 1.3551568628929434e-06, |
|
"logits/chosen": -0.3917458951473236, |
|
"logits/rejected": -0.9406350255012512, |
|
"logps/chosen": -130.6827392578125, |
|
"logps/rejected": -1494.780029296875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8124713897705078, |
|
"rewards/margins": 13.109392166137695, |
|
"rewards/rejected": -13.921862602233887, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 7.92, |
|
"grad_norm": 0.00016484979693604264, |
|
"learning_rate": 1.2610095475073415e-06, |
|
"logits/chosen": -0.39324721693992615, |
|
"logits/rejected": -0.9108613729476929, |
|
"logps/chosen": -125.08223724365234, |
|
"logps/rejected": -1461.036865234375, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.75953209400177, |
|
"rewards/margins": 12.83247184753418, |
|
"rewards/rejected": -13.592002868652344, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 0.00016513337681309213, |
|
"learning_rate": 1.1697777844051105e-06, |
|
"logits/chosen": -0.47117894887924194, |
|
"logits/rejected": -0.973685085773468, |
|
"logps/chosen": -137.49221801757812, |
|
"logps/rejected": -1485.5107421875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8799915313720703, |
|
"rewards/margins": 12.966386795043945, |
|
"rewards/rejected": -13.8463773727417, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 8.08, |
|
"grad_norm": 0.00014151799507930023, |
|
"learning_rate": 1.0815327133708015e-06, |
|
"logits/chosen": -0.32878726720809937, |
|
"logits/rejected": -0.8332622647285461, |
|
"logps/chosen": -141.76785278320312, |
|
"logps/rejected": -1504.282958984375, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.9092656373977661, |
|
"rewards/margins": 13.11542797088623, |
|
"rewards/rejected": -14.024694442749023, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 8.16, |
|
"grad_norm": 0.00022245344369295513, |
|
"learning_rate": 9.963431452563331e-07, |
|
"logits/chosen": -0.39487534761428833, |
|
"logits/rejected": -0.8839765787124634, |
|
"logps/chosen": -171.04611206054688, |
|
"logps/rejected": -1563.74365234375, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.1952791213989258, |
|
"rewards/margins": 13.446779251098633, |
|
"rewards/rejected": -14.642059326171875, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 8.24, |
|
"grad_norm": 0.0001694321641851975, |
|
"learning_rate": 9.142755083243577e-07, |
|
"logits/chosen": -0.3724760413169861, |
|
"logits/rejected": -0.9137827754020691, |
|
"logps/chosen": -128.69775390625, |
|
"logps/rejected": -1472.91357421875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.7906900644302368, |
|
"rewards/margins": 12.917367935180664, |
|
"rewards/rejected": -13.70805835723877, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 8.32, |
|
"grad_norm": 0.0004678365231010692, |
|
"learning_rate": 8.353937964495029e-07, |
|
"logits/chosen": -0.3941499590873718, |
|
"logits/rejected": -0.8401497006416321, |
|
"logps/chosen": -165.19224548339844, |
|
"logps/rejected": -1618.8040771484375, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.135845422744751, |
|
"rewards/margins": 14.052388191223145, |
|
"rewards/rejected": -15.188232421875, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 8.4, |
|
"grad_norm": 0.00019961493455925282, |
|
"learning_rate": 7.597595192178702e-07, |
|
"logits/chosen": -0.3931668996810913, |
|
"logits/rejected": -0.8902953863143921, |
|
"logps/chosen": -154.61544799804688, |
|
"logps/rejected": -1578.909423828125, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.042776346206665, |
|
"rewards/margins": 13.743990898132324, |
|
"rewards/rejected": -14.786767959594727, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 8.48, |
|
"grad_norm": 0.00015962162148062636, |
|
"learning_rate": 6.874316539637127e-07, |
|
"logits/chosen": -0.4204545021057129, |
|
"logits/rejected": -0.9560686349868774, |
|
"logps/chosen": -128.07919311523438, |
|
"logps/rejected": -1500.8055419921875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.7846135497093201, |
|
"rewards/margins": 13.211763381958008, |
|
"rewards/rejected": -13.996376037597656, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 8.56, |
|
"grad_norm": 0.001197256519458604, |
|
"learning_rate": 6.184665997806832e-07, |
|
"logits/chosen": -0.42808738350868225, |
|
"logits/rejected": -0.8699554204940796, |
|
"logps/chosen": -164.53189086914062, |
|
"logps/rejected": -1619.1337890625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.1256366968154907, |
|
"rewards/margins": 14.069290161132812, |
|
"rewards/rejected": -15.194926261901855, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 8.64, |
|
"grad_norm": 0.0014134395460349601, |
|
"learning_rate": 5.529181335435124e-07, |
|
"logits/chosen": -0.39539769291877747, |
|
"logits/rejected": -0.9618415832519531, |
|
"logps/chosen": -124.12815856933594, |
|
"logps/rejected": -1469.0845947265625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.7464931607246399, |
|
"rewards/margins": 12.919212341308594, |
|
"rewards/rejected": -13.665704727172852, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 8.72, |
|
"grad_norm": 0.00013913379217651929, |
|
"learning_rate": 4.908373679744316e-07, |
|
"logits/chosen": -0.4164504408836365, |
|
"logits/rejected": -1.0009126663208008, |
|
"logps/chosen": -139.15652465820312, |
|
"logps/rejected": -1507.514404296875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8938719034194946, |
|
"rewards/margins": 13.158498764038086, |
|
"rewards/rejected": -14.05237102508545, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 8.8, |
|
"grad_norm": 0.001742282508212273, |
|
"learning_rate": 4.322727117869951e-07, |
|
"logits/chosen": -0.45068103075027466, |
|
"logits/rejected": -0.9950752258300781, |
|
"logps/chosen": -129.97946166992188, |
|
"logps/rejected": -1477.4703369140625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8063904643058777, |
|
"rewards/margins": 12.941418647766113, |
|
"rewards/rejected": -13.747808456420898, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 8.88, |
|
"grad_norm": 0.00014054830993016855, |
|
"learning_rate": 3.772698319384349e-07, |
|
"logits/chosen": -0.3605000376701355, |
|
"logits/rejected": -0.9571415185928345, |
|
"logps/chosen": -102.00578308105469, |
|
"logps/rejected": -1384.301513671875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.5351399183273315, |
|
"rewards/margins": 12.273322105407715, |
|
"rewards/rejected": -12.80846118927002, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 8.96, |
|
"grad_norm": 0.00029001415653845576, |
|
"learning_rate": 3.258716180199278e-07, |
|
"logits/chosen": -0.35858815908432007, |
|
"logits/rejected": -0.9055237770080566, |
|
"logps/chosen": -139.0391387939453, |
|
"logps/rejected": -1482.545654296875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8927180171012878, |
|
"rewards/margins": 12.917207717895508, |
|
"rewards/rejected": -13.80992603302002, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 9.04, |
|
"grad_norm": 0.0005895794083238161, |
|
"learning_rate": 2.7811814881259503e-07, |
|
"logits/chosen": -0.344729483127594, |
|
"logits/rejected": -0.8606122136116028, |
|
"logps/chosen": -132.51242065429688, |
|
"logps/rejected": -1505.0494384765625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8304397463798523, |
|
"rewards/margins": 13.201517105102539, |
|
"rewards/rejected": -14.031954765319824, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 9.12, |
|
"grad_norm": 0.00013310639980010605, |
|
"learning_rate": 2.3404666103526542e-07, |
|
"logits/chosen": -0.3512091338634491, |
|
"logits/rejected": -0.8659976124763489, |
|
"logps/chosen": -145.8941192626953, |
|
"logps/rejected": -1529.08447265625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.9467619061470032, |
|
"rewards/margins": 13.3263578414917, |
|
"rewards/rejected": -14.27312183380127, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 9.2, |
|
"grad_norm": 0.00012364944710080487, |
|
"learning_rate": 1.9369152030840553e-07, |
|
"logits/chosen": -0.35208699107170105, |
|
"logits/rejected": -0.899590790271759, |
|
"logps/chosen": -143.52272033691406, |
|
"logps/rejected": -1492.267578125, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.9294937252998352, |
|
"rewards/margins": 12.974908828735352, |
|
"rewards/rejected": -13.904401779174805, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 9.28, |
|
"grad_norm": 0.00013560146353849627, |
|
"learning_rate": 1.5708419435684463e-07, |
|
"logits/chosen": -0.380057156085968, |
|
"logits/rejected": -0.9084013104438782, |
|
"logps/chosen": -157.8520050048828, |
|
"logps/rejected": -1592.5137939453125, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.075063943862915, |
|
"rewards/margins": 13.840289115905762, |
|
"rewards/rejected": -14.915351867675781, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 9.36, |
|
"grad_norm": 0.00013265989294205703, |
|
"learning_rate": 1.2425322847218368e-07, |
|
"logits/chosen": -0.33621734380722046, |
|
"logits/rejected": -0.800961971282959, |
|
"logps/chosen": -154.0393524169922, |
|
"logps/rejected": -1592.49462890625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.023474931716919, |
|
"rewards/margins": 13.9025297164917, |
|
"rewards/rejected": -14.926007270812988, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 9.44, |
|
"grad_norm": 0.003418968557861204, |
|
"learning_rate": 9.522422325404234e-08, |
|
"logits/chosen": -0.42315536737442017, |
|
"logits/rejected": -0.896791934967041, |
|
"logps/chosen": -162.86721801757812, |
|
"logps/rejected": -1653.62890625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.1122061014175415, |
|
"rewards/margins": 14.423039436340332, |
|
"rewards/rejected": -15.535245895385742, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 9.52, |
|
"grad_norm": 0.0004120904531551459, |
|
"learning_rate": 7.001981464747565e-08, |
|
"logits/chosen": -0.4035482406616211, |
|
"logits/rejected": -0.9819211959838867, |
|
"logps/chosen": -137.95887756347656, |
|
"logps/rejected": -1535.572021484375, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8891565203666687, |
|
"rewards/margins": 13.448022842407227, |
|
"rewards/rejected": -14.337178230285645, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"grad_norm": 0.00017994417316440225, |
|
"learning_rate": 4.865965629214819e-08, |
|
"logits/chosen": -0.35764050483703613, |
|
"logits/rejected": -0.8284955024719238, |
|
"logps/chosen": -176.431396484375, |
|
"logps/rejected": -1656.4488525390625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.2497293949127197, |
|
"rewards/margins": 14.32325267791748, |
|
"rewards/rejected": -15.572982788085938, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 9.68, |
|
"grad_norm": 0.00015125200057797107, |
|
"learning_rate": 3.1160404197018155e-08, |
|
"logits/chosen": -0.3409956097602844, |
|
"logits/rejected": -0.8691213726997375, |
|
"logps/chosen": -136.07290649414062, |
|
"logps/rejected": -1491.394775390625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8631143569946289, |
|
"rewards/margins": 13.04126262664795, |
|
"rewards/rejected": -13.904377937316895, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 9.76, |
|
"grad_norm": 0.006288398835461867, |
|
"learning_rate": 1.753570375247815e-08, |
|
"logits/chosen": -0.4766923785209656, |
|
"logits/rejected": -0.9798077344894409, |
|
"logps/chosen": -154.1293487548828, |
|
"logps/rejected": -1594.1053466796875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0420167446136475, |
|
"rewards/margins": 13.897071838378906, |
|
"rewards/rejected": -14.939088821411133, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 9.84, |
|
"grad_norm": 0.00015912389084664545, |
|
"learning_rate": 7.796179090094891e-09, |
|
"logits/chosen": -0.3149864971637726, |
|
"logits/rejected": -0.8940708041191101, |
|
"logps/chosen": -117.2905044555664, |
|
"logps/rejected": -1411.2535400390625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.6836379766464233, |
|
"rewards/margins": 12.397500991821289, |
|
"rewards/rejected": -13.081140518188477, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 9.92, |
|
"grad_norm": 0.0002417004626132451, |
|
"learning_rate": 1.9494247982282386e-09, |
|
"logits/chosen": -0.42949041724205017, |
|
"logits/rejected": -0.9556988477706909, |
|
"logps/chosen": -133.76651000976562, |
|
"logps/rejected": -1445.5709228515625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.8304960131645203, |
|
"rewards/margins": 12.607949256896973, |
|
"rewards/rejected": -13.438446044921875, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.0008051537929778158, |
|
"learning_rate": 0.0, |
|
"logits/chosen": -0.3784290850162506, |
|
"logits/rejected": -0.879416823387146, |
|
"logps/chosen": -144.29238891601562, |
|
"logps/rejected": -1541.6170654296875, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.9475828409194946, |
|
"rewards/margins": 13.463716506958008, |
|
"rewards/rejected": -14.411298751831055, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 1250, |
|
"total_flos": 0.0, |
|
"train_loss": 0.015015173686749768, |
|
"train_runtime": 115157.0828, |
|
"train_samples_per_second": 0.695, |
|
"train_steps_per_second": 0.011 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1250, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|