|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 1000, |
|
"global_step": 1545, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 238.7771549579817, |
|
"learning_rate": 3.225806451612903e-09, |
|
"logits/chosen": -3.3442530632019043, |
|
"logits/rejected": -3.478727102279663, |
|
"logps/chosen": -194.86203002929688, |
|
"logps/rejected": -182.01235961914062, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 210.0539707320501, |
|
"learning_rate": 3.225806451612903e-08, |
|
"logits/chosen": -3.15413236618042, |
|
"logits/rejected": -3.161997079849243, |
|
"logps/chosen": -265.6070556640625, |
|
"logps/rejected": -222.88095092773438, |
|
"loss": 0.6949, |
|
"rewards/accuracies": 0.4444444477558136, |
|
"rewards/chosen": 0.011013220064342022, |
|
"rewards/margins": 0.011229809373617172, |
|
"rewards/rejected": -0.00021659024059772491, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 176.7933427886698, |
|
"learning_rate": 6.451612903225806e-08, |
|
"logits/chosen": -3.0983855724334717, |
|
"logits/rejected": -3.1877849102020264, |
|
"logps/chosen": -166.89926147460938, |
|
"logps/rejected": -166.43698120117188, |
|
"loss": 0.6918, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": 0.009564275853335857, |
|
"rewards/margins": 0.0018194920849055052, |
|
"rewards/rejected": 0.007744784001260996, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 203.6488787858732, |
|
"learning_rate": 9.677419354838709e-08, |
|
"logits/chosen": -3.0741190910339355, |
|
"logits/rejected": -3.1300857067108154, |
|
"logps/chosen": -228.1751251220703, |
|
"logps/rejected": -237.1351776123047, |
|
"loss": 0.6817, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.030515898019075394, |
|
"rewards/margins": 0.031470950692892075, |
|
"rewards/rejected": -0.000955055293161422, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 193.58974634035104, |
|
"learning_rate": 1.2903225806451611e-07, |
|
"logits/chosen": -3.169820785522461, |
|
"logits/rejected": -3.0861575603485107, |
|
"logps/chosen": -221.1776885986328, |
|
"logps/rejected": -159.6044464111328, |
|
"loss": 0.6714, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.06581683456897736, |
|
"rewards/margins": 0.07549744844436646, |
|
"rewards/rejected": -0.009680609218776226, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 204.80430949892167, |
|
"learning_rate": 1.6129032258064515e-07, |
|
"logits/chosen": -3.061490535736084, |
|
"logits/rejected": -3.091951847076416, |
|
"logps/chosen": -189.7487335205078, |
|
"logps/rejected": -243.40603637695312, |
|
"loss": 0.6635, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.1369192898273468, |
|
"rewards/margins": 0.06373085081577301, |
|
"rewards/rejected": 0.07318843901157379, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 184.5945707092153, |
|
"learning_rate": 1.9354838709677418e-07, |
|
"logits/chosen": -3.197416305541992, |
|
"logits/rejected": -3.229931592941284, |
|
"logps/chosen": -258.2798156738281, |
|
"logps/rejected": -230.84716796875, |
|
"loss": 0.6548, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.1522468477487564, |
|
"rewards/margins": 0.250114768743515, |
|
"rewards/rejected": -0.0978679284453392, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 186.27179515709204, |
|
"learning_rate": 2.2580645161290322e-07, |
|
"logits/chosen": -2.967573404312134, |
|
"logits/rejected": -3.0102295875549316, |
|
"logps/chosen": -224.0232391357422, |
|
"logps/rejected": -250.5163116455078, |
|
"loss": 0.6435, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.29443851113319397, |
|
"rewards/margins": 0.16650478541851044, |
|
"rewards/rejected": 0.12793374061584473, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 270.5970909334128, |
|
"learning_rate": 2.5806451612903223e-07, |
|
"logits/chosen": -3.2193164825439453, |
|
"logits/rejected": -3.1845672130584717, |
|
"logps/chosen": -215.4364776611328, |
|
"logps/rejected": -197.53713989257812, |
|
"loss": 0.6466, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.18258078396320343, |
|
"rewards/margins": 0.08748456835746765, |
|
"rewards/rejected": 0.09509620815515518, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 205.49332887939408, |
|
"learning_rate": 2.903225806451613e-07, |
|
"logits/chosen": -3.09409499168396, |
|
"logits/rejected": -3.0983214378356934, |
|
"logps/chosen": -103.38987731933594, |
|
"logps/rejected": -143.33724975585938, |
|
"loss": 0.6189, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.08013658225536346, |
|
"rewards/margins": 0.021646564826369286, |
|
"rewards/rejected": 0.05849001556634903, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 125.49552783164246, |
|
"learning_rate": 3.225806451612903e-07, |
|
"logits/chosen": -3.0359697341918945, |
|
"logits/rejected": -3.104940891265869, |
|
"logps/chosen": -159.39813232421875, |
|
"logps/rejected": -180.9935302734375, |
|
"loss": 0.6091, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.13538669049739838, |
|
"rewards/margins": 0.23133841156959534, |
|
"rewards/rejected": -0.09595172107219696, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 220.0417527459765, |
|
"learning_rate": 3.5483870967741936e-07, |
|
"logits/chosen": -3.001208782196045, |
|
"logits/rejected": -2.9704670906066895, |
|
"logps/chosen": -183.01312255859375, |
|
"logps/rejected": -209.7218475341797, |
|
"loss": 0.5823, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.03632747381925583, |
|
"rewards/margins": 0.1413092315196991, |
|
"rewards/rejected": -0.17763671278953552, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 168.84413823172952, |
|
"learning_rate": 3.8709677419354837e-07, |
|
"logits/chosen": -3.1182332038879395, |
|
"logits/rejected": -3.0886902809143066, |
|
"logps/chosen": -189.5988311767578, |
|
"logps/rejected": -178.51364135742188, |
|
"loss": 0.6219, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": -0.06012638285756111, |
|
"rewards/margins": 0.12087175995111465, |
|
"rewards/rejected": -0.18099813163280487, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 208.2575863637529, |
|
"learning_rate": 4.1935483870967743e-07, |
|
"logits/chosen": -3.1611361503601074, |
|
"logits/rejected": -3.1497890949249268, |
|
"logps/chosen": -157.99807739257812, |
|
"logps/rejected": -189.0376739501953, |
|
"loss": 0.532, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.09179370105266571, |
|
"rewards/margins": 0.6432317495346069, |
|
"rewards/rejected": -0.55143803358078, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 150.85083560407247, |
|
"learning_rate": 4.5161290322580644e-07, |
|
"logits/chosen": -3.091956377029419, |
|
"logits/rejected": -3.1225342750549316, |
|
"logps/chosen": -185.01499938964844, |
|
"logps/rejected": -240.418212890625, |
|
"loss": 0.567, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.088681660592556, |
|
"rewards/margins": 0.8317887187004089, |
|
"rewards/rejected": -0.7431070804595947, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 208.61538011383496, |
|
"learning_rate": 4.838709677419355e-07, |
|
"logits/chosen": -3.1025145053863525, |
|
"logits/rejected": -3.107062578201294, |
|
"logps/chosen": -242.4476318359375, |
|
"logps/rejected": -186.19691467285156, |
|
"loss": 0.5551, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.16192469000816345, |
|
"rewards/margins": 0.43396979570388794, |
|
"rewards/rejected": -0.2720451056957245, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 227.4405540805906, |
|
"learning_rate": 4.982014388489209e-07, |
|
"logits/chosen": -3.05570650100708, |
|
"logits/rejected": -3.053194761276245, |
|
"logps/chosen": -243.532958984375, |
|
"logps/rejected": -238.6374053955078, |
|
"loss": 0.5381, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.14193885028362274, |
|
"rewards/margins": 1.0523029565811157, |
|
"rewards/rejected": -1.1942418813705444, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 185.31205838070358, |
|
"learning_rate": 4.946043165467625e-07, |
|
"logits/chosen": -3.031973361968994, |
|
"logits/rejected": -2.987246036529541, |
|
"logps/chosen": -148.59695434570312, |
|
"logps/rejected": -111.39070892333984, |
|
"loss": 0.5769, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.12481436878442764, |
|
"rewards/margins": 0.8159207105636597, |
|
"rewards/rejected": -0.9407350420951843, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 228.1100532242365, |
|
"learning_rate": 4.910071942446043e-07, |
|
"logits/chosen": -3.133244276046753, |
|
"logits/rejected": -3.033322334289551, |
|
"logps/chosen": -329.6561279296875, |
|
"logps/rejected": -191.4559783935547, |
|
"loss": 0.5142, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.49908939003944397, |
|
"rewards/margins": 1.1547267436981201, |
|
"rewards/rejected": -0.6556375026702881, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 215.88646054369426, |
|
"learning_rate": 4.87410071942446e-07, |
|
"logits/chosen": -3.1060004234313965, |
|
"logits/rejected": -3.1299846172332764, |
|
"logps/chosen": -234.3701629638672, |
|
"logps/rejected": -184.80455017089844, |
|
"loss": 0.5214, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.3190203309059143, |
|
"rewards/margins": 0.7846688032150269, |
|
"rewards/rejected": -1.103689193725586, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 96.46619144056267, |
|
"learning_rate": 4.838129496402878e-07, |
|
"logits/chosen": -3.0408408641815186, |
|
"logits/rejected": -3.1078829765319824, |
|
"logps/chosen": -197.76657104492188, |
|
"logps/rejected": -256.5730285644531, |
|
"loss": 0.5628, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.9596657752990723, |
|
"rewards/margins": 1.5147722959518433, |
|
"rewards/rejected": -0.555106520652771, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 176.7140571201679, |
|
"learning_rate": 4.802158273381295e-07, |
|
"logits/chosen": -3.130012035369873, |
|
"logits/rejected": -2.9814813137054443, |
|
"logps/chosen": -302.2297058105469, |
|
"logps/rejected": -213.53564453125, |
|
"loss": 0.541, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.21759366989135742, |
|
"rewards/margins": 1.1542640924453735, |
|
"rewards/rejected": -1.371857762336731, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 205.46982898914413, |
|
"learning_rate": 4.7661870503597116e-07, |
|
"logits/chosen": -3.0621910095214844, |
|
"logits/rejected": -3.102663040161133, |
|
"logps/chosen": -170.4927978515625, |
|
"logps/rejected": -152.03933715820312, |
|
"loss": 0.5619, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.28674641251564026, |
|
"rewards/margins": 0.7877071499824524, |
|
"rewards/rejected": -1.074453592300415, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 302.7432924096287, |
|
"learning_rate": 4.730215827338129e-07, |
|
"logits/chosen": -2.86230731010437, |
|
"logits/rejected": -2.8046908378601074, |
|
"logps/chosen": -277.9963684082031, |
|
"logps/rejected": -311.2679748535156, |
|
"loss": 0.5988, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.9725619554519653, |
|
"rewards/margins": 2.1599535942077637, |
|
"rewards/rejected": -3.1325161457061768, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 192.16860695693003, |
|
"learning_rate": 4.6942446043165467e-07, |
|
"logits/chosen": -2.9580187797546387, |
|
"logits/rejected": -2.9902093410491943, |
|
"logps/chosen": -244.5732421875, |
|
"logps/rejected": -234.186767578125, |
|
"loss": 0.5837, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.015125697478652, |
|
"rewards/margins": 0.9667981863021851, |
|
"rewards/rejected": -0.9819238781929016, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 193.43307883430833, |
|
"learning_rate": 4.6582733812949637e-07, |
|
"logits/chosen": -3.113363742828369, |
|
"logits/rejected": -3.1423449516296387, |
|
"logps/chosen": -240.1184539794922, |
|
"logps/rejected": -243.276123046875, |
|
"loss": 0.5109, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.8149322271347046, |
|
"rewards/margins": 0.7386936545372009, |
|
"rewards/rejected": -1.5536259412765503, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 183.86586127077544, |
|
"learning_rate": 4.622302158273381e-07, |
|
"logits/chosen": -3.1412100791931152, |
|
"logits/rejected": -3.0982837677001953, |
|
"logps/chosen": -221.32955932617188, |
|
"logps/rejected": -194.5541534423828, |
|
"loss": 0.5086, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.8559683561325073, |
|
"rewards/margins": 0.9293031692504883, |
|
"rewards/rejected": -1.785271406173706, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 252.90755166196357, |
|
"learning_rate": 4.586330935251798e-07, |
|
"logits/chosen": -3.0562283992767334, |
|
"logits/rejected": -3.016035556793213, |
|
"logps/chosen": -288.05792236328125, |
|
"logps/rejected": -246.1804656982422, |
|
"loss": 0.6534, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.19852256774902344, |
|
"rewards/margins": 0.7442554235458374, |
|
"rewards/rejected": -0.9427779912948608, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 211.83556122462332, |
|
"learning_rate": 4.550359712230216e-07, |
|
"logits/chosen": -2.89155650138855, |
|
"logits/rejected": -2.8994014263153076, |
|
"logps/chosen": -286.43341064453125, |
|
"logps/rejected": -301.67144775390625, |
|
"loss": 0.6047, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.3442370295524597, |
|
"rewards/margins": 1.805853247642517, |
|
"rewards/rejected": -2.1500906944274902, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 418.7444892216886, |
|
"learning_rate": 4.5143884892086333e-07, |
|
"logits/chosen": -2.7451279163360596, |
|
"logits/rejected": -2.687638759613037, |
|
"logps/chosen": -267.42816162109375, |
|
"logps/rejected": -184.01612854003906, |
|
"loss": 0.5466, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.1652831733226776, |
|
"rewards/margins": 1.0949604511260986, |
|
"rewards/rejected": -0.9296773076057434, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 257.51596842504944, |
|
"learning_rate": 4.4784172661870503e-07, |
|
"logits/chosen": -2.8880581855773926, |
|
"logits/rejected": -2.859224796295166, |
|
"logps/chosen": -228.3848114013672, |
|
"logps/rejected": -243.755859375, |
|
"loss": 0.644, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.26425519585609436, |
|
"rewards/margins": 0.545816957950592, |
|
"rewards/rejected": -0.810072124004364, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 146.22803074809437, |
|
"learning_rate": 4.4424460431654673e-07, |
|
"logits/chosen": -2.8631234169006348, |
|
"logits/rejected": -2.929395914077759, |
|
"logps/chosen": -157.17486572265625, |
|
"logps/rejected": -203.16934204101562, |
|
"loss": 0.5586, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.0279247760772705, |
|
"rewards/margins": 2.014747142791748, |
|
"rewards/rejected": -3.0426723957061768, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 255.32693322709616, |
|
"learning_rate": 4.4064748201438843e-07, |
|
"logits/chosen": -2.983949661254883, |
|
"logits/rejected": -3.011338233947754, |
|
"logps/chosen": -186.72018432617188, |
|
"logps/rejected": -193.33999633789062, |
|
"loss": 0.4988, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.3878747522830963, |
|
"rewards/margins": 1.1526678800582886, |
|
"rewards/rejected": -1.5405426025390625, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 238.21102592762895, |
|
"learning_rate": 4.370503597122302e-07, |
|
"logits/chosen": -2.962764263153076, |
|
"logits/rejected": -2.9538509845733643, |
|
"logps/chosen": -277.8987121582031, |
|
"logps/rejected": -255.3301544189453, |
|
"loss": 0.4998, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.7843912839889526, |
|
"rewards/margins": 1.1306078433990479, |
|
"rewards/rejected": -1.914999008178711, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 198.9409546988356, |
|
"learning_rate": 4.3345323741007194e-07, |
|
"logits/chosen": -2.998401403427124, |
|
"logits/rejected": -3.0707414150238037, |
|
"logps/chosen": -188.1188201904297, |
|
"logps/rejected": -207.1416778564453, |
|
"loss": 0.5624, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.6548808813095093, |
|
"rewards/margins": 1.881704330444336, |
|
"rewards/rejected": -2.5365853309631348, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 105.06843294784386, |
|
"learning_rate": 4.2985611510791364e-07, |
|
"logits/chosen": -2.783904552459717, |
|
"logits/rejected": -2.7255444526672363, |
|
"logps/chosen": -198.8332061767578, |
|
"logps/rejected": -173.35971069335938, |
|
"loss": 0.4945, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.7126100659370422, |
|
"rewards/margins": 1.1439498662948608, |
|
"rewards/rejected": -1.8565599918365479, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 263.3174694783378, |
|
"learning_rate": 4.262589928057554e-07, |
|
"logits/chosen": -2.9126217365264893, |
|
"logits/rejected": -2.9537577629089355, |
|
"logps/chosen": -175.30380249023438, |
|
"logps/rejected": -158.21139526367188, |
|
"loss": 0.4754, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -1.1357567310333252, |
|
"rewards/margins": 2.3206634521484375, |
|
"rewards/rejected": -3.4564201831817627, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 177.41732636758604, |
|
"learning_rate": 4.226618705035971e-07, |
|
"logits/chosen": -3.006334066390991, |
|
"logits/rejected": -3.057971477508545, |
|
"logps/chosen": -319.56353759765625, |
|
"logps/rejected": -296.5467529296875, |
|
"loss": 0.5596, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -1.418001651763916, |
|
"rewards/margins": 1.0575675964355469, |
|
"rewards/rejected": -2.475569248199463, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 218.4833326220091, |
|
"learning_rate": 4.1906474820143885e-07, |
|
"logits/chosen": -2.894559383392334, |
|
"logits/rejected": -2.959474802017212, |
|
"logps/chosen": -183.92538452148438, |
|
"logps/rejected": -209.09469604492188, |
|
"loss": 0.5208, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -1.1497817039489746, |
|
"rewards/margins": 0.8618785738945007, |
|
"rewards/rejected": -2.01166033744812, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 202.84052669573208, |
|
"learning_rate": 4.154676258992806e-07, |
|
"logits/chosen": -2.7759809494018555, |
|
"logits/rejected": -2.7760024070739746, |
|
"logps/chosen": -226.66012573242188, |
|
"logps/rejected": -175.62423706054688, |
|
"loss": 0.5323, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -1.1147387027740479, |
|
"rewards/margins": 1.6622241735458374, |
|
"rewards/rejected": -2.776962995529175, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 182.17649058100815, |
|
"learning_rate": 4.118705035971223e-07, |
|
"logits/chosen": -3.002817153930664, |
|
"logits/rejected": -2.912900686264038, |
|
"logps/chosen": -181.59104919433594, |
|
"logps/rejected": -180.56857299804688, |
|
"loss": 0.4953, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.058836579322815, |
|
"rewards/margins": 1.3610327243804932, |
|
"rewards/rejected": -2.4198696613311768, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 161.96603302480173, |
|
"learning_rate": 4.08273381294964e-07, |
|
"logits/chosen": -2.995290517807007, |
|
"logits/rejected": -2.921790361404419, |
|
"logps/chosen": -279.0407409667969, |
|
"logps/rejected": -241.841796875, |
|
"loss": 0.5213, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.12093071639537811, |
|
"rewards/margins": 1.5623955726623535, |
|
"rewards/rejected": -1.6833263635635376, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 201.93847203217072, |
|
"learning_rate": 4.046762589928057e-07, |
|
"logits/chosen": -2.744863510131836, |
|
"logits/rejected": -2.775908946990967, |
|
"logps/chosen": -235.76181030273438, |
|
"logps/rejected": -256.9134826660156, |
|
"loss": 0.6025, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.520436704158783, |
|
"rewards/margins": 1.4860963821411133, |
|
"rewards/rejected": -2.006533145904541, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 394.9065983141447, |
|
"learning_rate": 4.0107913669064746e-07, |
|
"logits/chosen": -2.8987364768981934, |
|
"logits/rejected": -3.0120251178741455, |
|
"logps/chosen": -199.3771514892578, |
|
"logps/rejected": -276.7981872558594, |
|
"loss": 0.4675, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.3013836443424225, |
|
"rewards/margins": 2.102478504180908, |
|
"rewards/rejected": -2.4038619995117188, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 154.86934618345205, |
|
"learning_rate": 3.974820143884892e-07, |
|
"logits/chosen": -2.8435750007629395, |
|
"logits/rejected": -2.9437308311462402, |
|
"logps/chosen": -166.1263885498047, |
|
"logps/rejected": -191.59690856933594, |
|
"loss": 0.4899, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -1.5653941631317139, |
|
"rewards/margins": 1.7190685272216797, |
|
"rewards/rejected": -3.2844626903533936, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 140.28100702480398, |
|
"learning_rate": 3.938848920863309e-07, |
|
"logits/chosen": -2.9444668292999268, |
|
"logits/rejected": -2.960024118423462, |
|
"logps/chosen": -230.9547119140625, |
|
"logps/rejected": -249.8640899658203, |
|
"loss": 0.5214, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -1.988498330116272, |
|
"rewards/margins": 0.8448504209518433, |
|
"rewards/rejected": -2.833348512649536, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 251.261088110334, |
|
"learning_rate": 3.9028776978417266e-07, |
|
"logits/chosen": -2.956639289855957, |
|
"logits/rejected": -2.9189281463623047, |
|
"logps/chosen": -248.42074584960938, |
|
"logps/rejected": -259.29547119140625, |
|
"loss": 0.5548, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.4849539995193481, |
|
"rewards/margins": 1.1761761903762817, |
|
"rewards/rejected": -2.66113018989563, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 157.1165843220562, |
|
"learning_rate": 3.8669064748201436e-07, |
|
"logits/chosen": -2.9550623893737793, |
|
"logits/rejected": -2.995311975479126, |
|
"logps/chosen": -175.0579071044922, |
|
"logps/rejected": -205.593505859375, |
|
"loss": 0.4984, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -1.3374639749526978, |
|
"rewards/margins": 2.801967144012451, |
|
"rewards/rejected": -4.139430999755859, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 239.1056515400595, |
|
"learning_rate": 3.830935251798561e-07, |
|
"logits/chosen": -2.902780771255493, |
|
"logits/rejected": -2.9937572479248047, |
|
"logps/chosen": -168.61141967773438, |
|
"logps/rejected": -209.18344116210938, |
|
"loss": 0.5985, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -2.3175175189971924, |
|
"rewards/margins": 1.5122530460357666, |
|
"rewards/rejected": -3.82977032661438, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 148.09456662862073, |
|
"learning_rate": 3.7949640287769787e-07, |
|
"logits/chosen": -2.844252347946167, |
|
"logits/rejected": -2.7700600624084473, |
|
"logps/chosen": -273.8468933105469, |
|
"logps/rejected": -166.2052764892578, |
|
"loss": 0.5295, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -1.9907764196395874, |
|
"rewards/margins": 0.7116690874099731, |
|
"rewards/rejected": -2.7024455070495605, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 212.94009211428397, |
|
"learning_rate": 3.7589928057553957e-07, |
|
"logits/chosen": -2.781285285949707, |
|
"logits/rejected": -2.8481528759002686, |
|
"logps/chosen": -287.84381103515625, |
|
"logps/rejected": -233.2913818359375, |
|
"loss": 0.4471, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -1.6348762512207031, |
|
"rewards/margins": 2.143733501434326, |
|
"rewards/rejected": -3.7786097526550293, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 347.65521206077733, |
|
"learning_rate": 3.7230215827338127e-07, |
|
"logits/chosen": -2.8734822273254395, |
|
"logits/rejected": -2.871880531311035, |
|
"logps/chosen": -236.97799682617188, |
|
"logps/rejected": -269.2080078125, |
|
"loss": 0.5097, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -2.1686577796936035, |
|
"rewards/margins": 2.460658550262451, |
|
"rewards/rejected": -4.629316806793213, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"grad_norm": 17.093431174075842, |
|
"learning_rate": 3.6870503597122297e-07, |
|
"logits/chosen": -2.881934404373169, |
|
"logits/rejected": -2.821448802947998, |
|
"logps/chosen": -159.77235412597656, |
|
"logps/rejected": -186.3824920654297, |
|
"loss": 0.3832, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.8075105547904968, |
|
"rewards/margins": 3.561521053314209, |
|
"rewards/rejected": -4.36903190612793, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"grad_norm": 10.962962944310295, |
|
"learning_rate": 3.651079136690647e-07, |
|
"logits/chosen": -2.906359910964966, |
|
"logits/rejected": -2.8581392765045166, |
|
"logps/chosen": -248.25723266601562, |
|
"logps/rejected": -273.17218017578125, |
|
"loss": 0.1333, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.4104425013065338, |
|
"rewards/margins": 6.661971092224121, |
|
"rewards/rejected": -6.251528739929199, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"grad_norm": 19.397740520521022, |
|
"learning_rate": 3.615107913669064e-07, |
|
"logits/chosen": -2.8592209815979004, |
|
"logits/rejected": -2.821002721786499, |
|
"logps/chosen": -153.77841186523438, |
|
"logps/rejected": -253.50192260742188, |
|
"loss": 0.1241, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.922268271446228, |
|
"rewards/margins": 4.912806510925293, |
|
"rewards/rejected": -5.8350749015808105, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"grad_norm": 31.58327548524221, |
|
"learning_rate": 3.579136690647482e-07, |
|
"logits/chosen": -2.772770881652832, |
|
"logits/rejected": -2.7921929359436035, |
|
"logps/chosen": -147.0404052734375, |
|
"logps/rejected": -281.2487487792969, |
|
"loss": 0.1004, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.5385946035385132, |
|
"rewards/margins": 6.8628716468811035, |
|
"rewards/rejected": -6.324276924133301, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"grad_norm": 10.777257966533346, |
|
"learning_rate": 3.5431654676258993e-07, |
|
"logits/chosen": -2.779730796813965, |
|
"logits/rejected": -2.801069498062134, |
|
"logps/chosen": -153.9626922607422, |
|
"logps/rejected": -171.65768432617188, |
|
"loss": 0.1374, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -1.910001516342163, |
|
"rewards/margins": 4.283631324768066, |
|
"rewards/rejected": -6.193632125854492, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"grad_norm": 23.32063703058829, |
|
"learning_rate": 3.5071942446043163e-07, |
|
"logits/chosen": -2.872498035430908, |
|
"logits/rejected": -2.884324789047241, |
|
"logps/chosen": -267.8298034667969, |
|
"logps/rejected": -381.44049072265625, |
|
"loss": 0.106, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 2.8878796100616455, |
|
"rewards/margins": 10.999748229980469, |
|
"rewards/rejected": -8.111869812011719, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"grad_norm": 31.976606028398866, |
|
"learning_rate": 3.471223021582734e-07, |
|
"logits/chosen": -2.8648922443389893, |
|
"logits/rejected": -2.8326311111450195, |
|
"logps/chosen": -198.3783721923828, |
|
"logps/rejected": -266.2000427246094, |
|
"loss": 0.1014, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.19176311790943146, |
|
"rewards/margins": 7.178011894226074, |
|
"rewards/rejected": -6.986248970031738, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"grad_norm": 15.736933231868846, |
|
"learning_rate": 3.435251798561151e-07, |
|
"logits/chosen": -2.8333282470703125, |
|
"logits/rejected": -2.9636716842651367, |
|
"logps/chosen": -206.75283813476562, |
|
"logps/rejected": -282.7793273925781, |
|
"loss": 0.0985, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -0.6388500332832336, |
|
"rewards/margins": 4.777499198913574, |
|
"rewards/rejected": -5.416348934173584, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"grad_norm": 17.063403322364767, |
|
"learning_rate": 3.3992805755395684e-07, |
|
"logits/chosen": -2.919651508331299, |
|
"logits/rejected": -2.996105670928955, |
|
"logps/chosen": -192.1748504638672, |
|
"logps/rejected": -310.4754333496094, |
|
"loss": 0.1476, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.5340520143508911, |
|
"rewards/margins": 7.349068641662598, |
|
"rewards/rejected": -6.815016269683838, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"grad_norm": 20.21744560573151, |
|
"learning_rate": 3.3633093525179854e-07, |
|
"logits/chosen": -2.9296090602874756, |
|
"logits/rejected": -2.7481861114501953, |
|
"logps/chosen": -203.38052368164062, |
|
"logps/rejected": -244.37014770507812, |
|
"loss": 0.1342, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 1.0323004722595215, |
|
"rewards/margins": 8.135452270507812, |
|
"rewards/rejected": -7.103150844573975, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 16.433861008142927, |
|
"learning_rate": 3.3273381294964024e-07, |
|
"logits/chosen": -2.9079694747924805, |
|
"logits/rejected": -2.8130078315734863, |
|
"logps/chosen": -186.45281982421875, |
|
"logps/rejected": -252.8771514892578, |
|
"loss": 0.0962, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 1.2089195251464844, |
|
"rewards/margins": 5.797079563140869, |
|
"rewards/rejected": -4.588160037994385, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"grad_norm": 29.799329342774556, |
|
"learning_rate": 3.29136690647482e-07, |
|
"logits/chosen": -2.971191883087158, |
|
"logits/rejected": -2.8457350730895996, |
|
"logps/chosen": -229.301513671875, |
|
"logps/rejected": -390.90899658203125, |
|
"loss": 0.1191, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 3.2872531414031982, |
|
"rewards/margins": 14.088516235351562, |
|
"rewards/rejected": -10.801262855529785, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"grad_norm": 13.229591095093818, |
|
"learning_rate": 3.255395683453237e-07, |
|
"logits/chosen": -2.7274880409240723, |
|
"logits/rejected": -2.754389524459839, |
|
"logps/chosen": -139.60235595703125, |
|
"logps/rejected": -234.2040557861328, |
|
"loss": 0.1683, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.6343626976013184, |
|
"rewards/margins": 7.628392696380615, |
|
"rewards/rejected": -6.994029998779297, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"grad_norm": 8.348959533199398, |
|
"learning_rate": 3.2194244604316545e-07, |
|
"logits/chosen": -2.8291614055633545, |
|
"logits/rejected": -2.768712282180786, |
|
"logps/chosen": -156.22329711914062, |
|
"logps/rejected": -280.3912658691406, |
|
"loss": 0.1206, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.6103225946426392, |
|
"rewards/margins": 5.600022315979004, |
|
"rewards/rejected": -6.2103447914123535, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"grad_norm": 6.080339854051856, |
|
"learning_rate": 3.183453237410072e-07, |
|
"logits/chosen": -2.8922338485717773, |
|
"logits/rejected": -3.0048136711120605, |
|
"logps/chosen": -185.24578857421875, |
|
"logps/rejected": -273.0670166015625, |
|
"loss": 0.1864, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -1.4969745874404907, |
|
"rewards/margins": 6.30242395401001, |
|
"rewards/rejected": -7.799398899078369, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"grad_norm": 8.711710883557748, |
|
"learning_rate": 3.147482014388489e-07, |
|
"logits/chosen": -2.827256679534912, |
|
"logits/rejected": -2.9615511894226074, |
|
"logps/chosen": -190.73292541503906, |
|
"logps/rejected": -271.886962890625, |
|
"loss": 0.1163, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.6087120771408081, |
|
"rewards/margins": 7.128113746643066, |
|
"rewards/rejected": -7.7368268966674805, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"grad_norm": 23.330004354397758, |
|
"learning_rate": 3.1115107913669066e-07, |
|
"logits/chosen": -3.0536856651306152, |
|
"logits/rejected": -3.0247254371643066, |
|
"logps/chosen": -250.87240600585938, |
|
"logps/rejected": -255.5919952392578, |
|
"loss": 0.1319, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.45528966188430786, |
|
"rewards/margins": 6.966174125671387, |
|
"rewards/rejected": -6.510884761810303, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"grad_norm": 23.99609139468775, |
|
"learning_rate": 3.0755395683453236e-07, |
|
"logits/chosen": -2.9475927352905273, |
|
"logits/rejected": -3.0371546745300293, |
|
"logps/chosen": -187.01266479492188, |
|
"logps/rejected": -247.2963409423828, |
|
"loss": 0.1305, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.9167653918266296, |
|
"rewards/margins": 7.090806007385254, |
|
"rewards/rejected": -8.007570266723633, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"grad_norm": 28.810275406507504, |
|
"learning_rate": 3.039568345323741e-07, |
|
"logits/chosen": -2.9736738204956055, |
|
"logits/rejected": -3.023808240890503, |
|
"logps/chosen": -155.4862060546875, |
|
"logps/rejected": -202.7901153564453, |
|
"loss": 0.1, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -2.3716559410095215, |
|
"rewards/margins": 6.325069427490234, |
|
"rewards/rejected": -8.696724891662598, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"grad_norm": 28.433296481008185, |
|
"learning_rate": 3.003597122302158e-07, |
|
"logits/chosen": -2.9116153717041016, |
|
"logits/rejected": -2.9804043769836426, |
|
"logps/chosen": -283.181396484375, |
|
"logps/rejected": -294.97491455078125, |
|
"loss": 0.1238, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.19354362785816193, |
|
"rewards/margins": 8.582574844360352, |
|
"rewards/rejected": -8.389032363891602, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 8.876311353316893, |
|
"learning_rate": 2.967625899280575e-07, |
|
"logits/chosen": -2.9362125396728516, |
|
"logits/rejected": -3.0337347984313965, |
|
"logps/chosen": -181.9010009765625, |
|
"logps/rejected": -255.519775390625, |
|
"loss": 0.1098, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.3271546959877014, |
|
"rewards/margins": 7.4435715675354, |
|
"rewards/rejected": -7.770726203918457, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"grad_norm": 9.661909624634754, |
|
"learning_rate": 2.9316546762589927e-07, |
|
"logits/chosen": -3.0636532306671143, |
|
"logits/rejected": -3.1014134883880615, |
|
"logps/chosen": -215.9973602294922, |
|
"logps/rejected": -313.1678161621094, |
|
"loss": 0.0814, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.42110705375671387, |
|
"rewards/margins": 10.026618957519531, |
|
"rewards/rejected": -10.447726249694824, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"grad_norm": 16.74604259022382, |
|
"learning_rate": 2.8956834532374097e-07, |
|
"logits/chosen": -3.016714334487915, |
|
"logits/rejected": -2.938993453979492, |
|
"logps/chosen": -242.8096160888672, |
|
"logps/rejected": -330.51348876953125, |
|
"loss": 0.0879, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.959947407245636, |
|
"rewards/margins": 7.937321662902832, |
|
"rewards/rejected": -8.897270202636719, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"grad_norm": 21.06683152485559, |
|
"learning_rate": 2.859712230215827e-07, |
|
"logits/chosen": -2.8868894577026367, |
|
"logits/rejected": -2.8016741275787354, |
|
"logps/chosen": -141.5897674560547, |
|
"logps/rejected": -192.32943725585938, |
|
"loss": 0.0998, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.15523338317871094, |
|
"rewards/margins": 7.094580173492432, |
|
"rewards/rejected": -7.249814033508301, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"grad_norm": 31.082677373086035, |
|
"learning_rate": 2.8237410071942447e-07, |
|
"logits/chosen": -3.1299731731414795, |
|
"logits/rejected": -3.0648064613342285, |
|
"logps/chosen": -195.8338165283203, |
|
"logps/rejected": -223.024169921875, |
|
"loss": 0.1339, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.07549004256725311, |
|
"rewards/margins": 8.427389144897461, |
|
"rewards/rejected": -8.502878189086914, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 43.39594697344513, |
|
"learning_rate": 2.7877697841726617e-07, |
|
"logits/chosen": -2.9652228355407715, |
|
"logits/rejected": -3.046382427215576, |
|
"logps/chosen": -220.59426879882812, |
|
"logps/rejected": -365.4892578125, |
|
"loss": 0.0799, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.5180127620697021, |
|
"rewards/margins": 8.326019287109375, |
|
"rewards/rejected": -7.80800724029541, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"grad_norm": 19.539022726952826, |
|
"learning_rate": 2.751798561151079e-07, |
|
"logits/chosen": -3.0099740028381348, |
|
"logits/rejected": -3.0176591873168945, |
|
"logps/chosen": -228.35977172851562, |
|
"logps/rejected": -307.06292724609375, |
|
"loss": 0.0939, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.7477496862411499, |
|
"rewards/margins": 6.7176995277404785, |
|
"rewards/rejected": -7.465450286865234, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"grad_norm": 28.726157406769754, |
|
"learning_rate": 2.7158273381294963e-07, |
|
"logits/chosen": -3.113996982574463, |
|
"logits/rejected": -3.1443333625793457, |
|
"logps/chosen": -273.08770751953125, |
|
"logps/rejected": -364.9424133300781, |
|
"loss": 0.203, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 1.5305331945419312, |
|
"rewards/margins": 10.640276908874512, |
|
"rewards/rejected": -9.10974407196045, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"grad_norm": 47.13980834678601, |
|
"learning_rate": 2.679856115107914e-07, |
|
"logits/chosen": -3.059725761413574, |
|
"logits/rejected": -3.009997844696045, |
|
"logps/chosen": -298.9270935058594, |
|
"logps/rejected": -321.82427978515625, |
|
"loss": 0.0951, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.40496888756752014, |
|
"rewards/margins": 8.552021026611328, |
|
"rewards/rejected": -8.147050857543945, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"grad_norm": 40.67896985913433, |
|
"learning_rate": 2.643884892086331e-07, |
|
"logits/chosen": -2.9675583839416504, |
|
"logits/rejected": -2.9391565322875977, |
|
"logps/chosen": -193.52569580078125, |
|
"logps/rejected": -270.5869140625, |
|
"loss": 0.1291, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 1.095348596572876, |
|
"rewards/margins": 9.404745101928711, |
|
"rewards/rejected": -8.309396743774414, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"grad_norm": 0.7304638838059418, |
|
"learning_rate": 2.607913669064748e-07, |
|
"logits/chosen": -3.051098346710205, |
|
"logits/rejected": -3.031184673309326, |
|
"logps/chosen": -276.66949462890625, |
|
"logps/rejected": -325.5706481933594, |
|
"loss": 0.2483, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 2.204752206802368, |
|
"rewards/margins": 8.472108840942383, |
|
"rewards/rejected": -6.26735782623291, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"grad_norm": 51.38852311030539, |
|
"learning_rate": 2.5719424460431653e-07, |
|
"logits/chosen": -2.971363067626953, |
|
"logits/rejected": -2.7650504112243652, |
|
"logps/chosen": -227.90182495117188, |
|
"logps/rejected": -277.134765625, |
|
"loss": 0.0864, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 1.8440443277359009, |
|
"rewards/margins": 8.721940994262695, |
|
"rewards/rejected": -6.877896308898926, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"grad_norm": 27.97321903508496, |
|
"learning_rate": 2.5359712230215824e-07, |
|
"logits/chosen": -2.748213052749634, |
|
"logits/rejected": -2.9322519302368164, |
|
"logps/chosen": -214.3332977294922, |
|
"logps/rejected": -324.88092041015625, |
|
"loss": 0.1023, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.5567120313644409, |
|
"rewards/margins": 10.445902824401855, |
|
"rewards/rejected": -9.889190673828125, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"grad_norm": 25.914111546778514, |
|
"learning_rate": 2.5e-07, |
|
"logits/chosen": -2.8877971172332764, |
|
"logits/rejected": -2.816826581954956, |
|
"logps/chosen": -204.908203125, |
|
"logps/rejected": -430.1329650878906, |
|
"loss": 0.1047, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.22576239705085754, |
|
"rewards/margins": 20.19011878967285, |
|
"rewards/rejected": -19.96435546875, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"grad_norm": 1.1980659785434056, |
|
"learning_rate": 2.464028776978417e-07, |
|
"logits/chosen": -2.936771869659424, |
|
"logits/rejected": -2.96122407913208, |
|
"logps/chosen": -255.11215209960938, |
|
"logps/rejected": -308.09161376953125, |
|
"loss": 0.0862, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.9314340353012085, |
|
"rewards/margins": 9.529711723327637, |
|
"rewards/rejected": -8.598278045654297, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"grad_norm": 27.693072917563903, |
|
"learning_rate": 2.4280575539568344e-07, |
|
"logits/chosen": -2.6308648586273193, |
|
"logits/rejected": -2.745168447494507, |
|
"logps/chosen": -194.28121948242188, |
|
"logps/rejected": -305.8986511230469, |
|
"loss": 0.0968, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -2.185044288635254, |
|
"rewards/margins": 8.040155410766602, |
|
"rewards/rejected": -10.225198745727539, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"grad_norm": 40.41147122052846, |
|
"learning_rate": 2.392086330935252e-07, |
|
"logits/chosen": -2.8742213249206543, |
|
"logits/rejected": -2.8451921939849854, |
|
"logps/chosen": -241.3814697265625, |
|
"logps/rejected": -313.4134521484375, |
|
"loss": 0.2677, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 1.6791108846664429, |
|
"rewards/margins": 9.191535949707031, |
|
"rewards/rejected": -7.512425899505615, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"grad_norm": 71.6126200211681, |
|
"learning_rate": 2.356115107913669e-07, |
|
"logits/chosen": -2.9680380821228027, |
|
"logits/rejected": -2.8588905334472656, |
|
"logps/chosen": -178.02896118164062, |
|
"logps/rejected": -293.750244140625, |
|
"loss": 0.0765, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.8559761047363281, |
|
"rewards/margins": 9.371949195861816, |
|
"rewards/rejected": -10.227925300598145, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"grad_norm": 61.57298874023214, |
|
"learning_rate": 2.3201438848920862e-07, |
|
"logits/chosen": -2.919063091278076, |
|
"logits/rejected": -2.9276976585388184, |
|
"logps/chosen": -173.81768798828125, |
|
"logps/rejected": -284.75054931640625, |
|
"loss": 0.0982, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 1.0116955041885376, |
|
"rewards/margins": 8.733976364135742, |
|
"rewards/rejected": -7.722282409667969, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"grad_norm": 29.6755083957111, |
|
"learning_rate": 2.2841726618705035e-07, |
|
"logits/chosen": -2.9070231914520264, |
|
"logits/rejected": -2.838956356048584, |
|
"logps/chosen": -219.24081420898438, |
|
"logps/rejected": -275.2736511230469, |
|
"loss": 0.1104, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.1092593669891357, |
|
"rewards/margins": 6.717539310455322, |
|
"rewards/rejected": -7.826798915863037, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"grad_norm": 88.06965761995421, |
|
"learning_rate": 2.2482014388489208e-07, |
|
"logits/chosen": -2.7650623321533203, |
|
"logits/rejected": -2.985419750213623, |
|
"logps/chosen": -209.66958618164062, |
|
"logps/rejected": -307.84808349609375, |
|
"loss": 0.1387, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 1.090811848640442, |
|
"rewards/margins": 8.861490249633789, |
|
"rewards/rejected": -7.770678520202637, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"grad_norm": 14.973419395885822, |
|
"learning_rate": 2.212230215827338e-07, |
|
"logits/chosen": -2.8417439460754395, |
|
"logits/rejected": -2.875882863998413, |
|
"logps/chosen": -176.11224365234375, |
|
"logps/rejected": -296.80194091796875, |
|
"loss": 0.1462, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.6913576126098633, |
|
"rewards/margins": 9.900197982788086, |
|
"rewards/rejected": -9.208839416503906, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"grad_norm": 19.94743902740955, |
|
"learning_rate": 2.1762589928057553e-07, |
|
"logits/chosen": -2.825326442718506, |
|
"logits/rejected": -2.7393977642059326, |
|
"logps/chosen": -183.79501342773438, |
|
"logps/rejected": -237.4878387451172, |
|
"loss": 0.0987, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -0.1172792911529541, |
|
"rewards/margins": 5.7345991134643555, |
|
"rewards/rejected": -5.851877689361572, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"grad_norm": 24.84797812605884, |
|
"learning_rate": 2.1402877697841726e-07, |
|
"logits/chosen": -2.966536283493042, |
|
"logits/rejected": -2.9578917026519775, |
|
"logps/chosen": -203.7595672607422, |
|
"logps/rejected": -262.04974365234375, |
|
"loss": 0.1062, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.500684380531311, |
|
"rewards/margins": 6.407332420349121, |
|
"rewards/rejected": -5.906649112701416, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"grad_norm": 10.723678972332182, |
|
"learning_rate": 2.1043165467625899e-07, |
|
"logits/chosen": -3.024386405944824, |
|
"logits/rejected": -2.9682140350341797, |
|
"logps/chosen": -290.12860107421875, |
|
"logps/rejected": -395.7617492675781, |
|
"loss": 0.0888, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 1.9799671173095703, |
|
"rewards/margins": 10.694775581359863, |
|
"rewards/rejected": -8.71480655670166, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"grad_norm": 107.78338392646336, |
|
"learning_rate": 2.068345323741007e-07, |
|
"logits/chosen": -2.7938714027404785, |
|
"logits/rejected": -2.8441925048828125, |
|
"logps/chosen": -113.68382263183594, |
|
"logps/rejected": -215.2233428955078, |
|
"loss": 0.1476, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -1.5707006454467773, |
|
"rewards/margins": 6.20760440826416, |
|
"rewards/rejected": -7.7783050537109375, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"grad_norm": 12.51020681306747, |
|
"learning_rate": 2.0323741007194244e-07, |
|
"logits/chosen": -2.961688995361328, |
|
"logits/rejected": -2.7563016414642334, |
|
"logps/chosen": -224.689453125, |
|
"logps/rejected": -250.39193725585938, |
|
"loss": 0.1186, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.786786675453186, |
|
"rewards/margins": 5.6869611740112305, |
|
"rewards/rejected": -7.473748207092285, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"grad_norm": 22.530969988611286, |
|
"learning_rate": 1.9964028776978417e-07, |
|
"logits/chosen": -2.937544345855713, |
|
"logits/rejected": -2.9909420013427734, |
|
"logps/chosen": -186.51730346679688, |
|
"logps/rejected": -228.6781463623047, |
|
"loss": 0.09, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.5189081430435181, |
|
"rewards/margins": 6.764664649963379, |
|
"rewards/rejected": -7.283572196960449, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"grad_norm": 13.752486135555781, |
|
"learning_rate": 1.960431654676259e-07, |
|
"logits/chosen": -3.005913019180298, |
|
"logits/rejected": -2.9097745418548584, |
|
"logps/chosen": -204.80996704101562, |
|
"logps/rejected": -216.8582763671875, |
|
"loss": 0.0884, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.1967257261276245, |
|
"rewards/margins": 7.3372955322265625, |
|
"rewards/rejected": -8.534021377563477, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"eval_logits/chosen": -2.976330518722534, |
|
"eval_logits/rejected": -2.9599409103393555, |
|
"eval_logps/chosen": -225.5250244140625, |
|
"eval_logps/rejected": -230.1306610107422, |
|
"eval_loss": 0.5467228293418884, |
|
"eval_rewards/accuracies": 0.7024999856948853, |
|
"eval_rewards/chosen": -3.4828414916992188, |
|
"eval_rewards/margins": 2.553647041320801, |
|
"eval_rewards/rejected": -6.036489486694336, |
|
"eval_runtime": 169.6259, |
|
"eval_samples_per_second": 18.606, |
|
"eval_steps_per_second": 0.295, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"grad_norm": 84.31680232274636, |
|
"learning_rate": 1.9244604316546762e-07, |
|
"logits/chosen": -3.030494451522827, |
|
"logits/rejected": -3.0824122428894043, |
|
"logps/chosen": -208.97262573242188, |
|
"logps/rejected": -380.12371826171875, |
|
"loss": 0.131, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.10666930675506592, |
|
"rewards/margins": 10.088615417480469, |
|
"rewards/rejected": -9.98194694519043, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"grad_norm": 16.33806610185454, |
|
"learning_rate": 1.8884892086330935e-07, |
|
"logits/chosen": -2.9731550216674805, |
|
"logits/rejected": -2.98013973236084, |
|
"logps/chosen": -201.14505004882812, |
|
"logps/rejected": -301.49871826171875, |
|
"loss": 0.0921, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.5621670484542847, |
|
"rewards/margins": 10.991891860961914, |
|
"rewards/rejected": -10.429725646972656, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 5.953845816794541, |
|
"learning_rate": 1.8525179856115105e-07, |
|
"logits/chosen": -2.810067653656006, |
|
"logits/rejected": -2.896484851837158, |
|
"logps/chosen": -170.7087860107422, |
|
"logps/rejected": -305.812744140625, |
|
"loss": 0.2128, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.03446235507726669, |
|
"rewards/margins": 8.075230598449707, |
|
"rewards/rejected": -8.109692573547363, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"grad_norm": 25.008245531662734, |
|
"learning_rate": 1.816546762589928e-07, |
|
"logits/chosen": -2.9846489429473877, |
|
"logits/rejected": -2.7843635082244873, |
|
"logps/chosen": -199.68411254882812, |
|
"logps/rejected": -286.22967529296875, |
|
"loss": 0.0662, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 1.3490269184112549, |
|
"rewards/margins": 9.136482238769531, |
|
"rewards/rejected": -7.787455081939697, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"grad_norm": 5.633840150274412, |
|
"learning_rate": 1.7805755395683453e-07, |
|
"logits/chosen": -2.7847142219543457, |
|
"logits/rejected": -2.885359287261963, |
|
"logps/chosen": -219.5730438232422, |
|
"logps/rejected": -254.0897674560547, |
|
"loss": 0.0558, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.2533214092254639, |
|
"rewards/margins": 8.608622550964355, |
|
"rewards/rejected": -9.861943244934082, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"grad_norm": 7.078559278301402, |
|
"learning_rate": 1.7446043165467626e-07, |
|
"logits/chosen": -2.852372407913208, |
|
"logits/rejected": -2.7808871269226074, |
|
"logps/chosen": -215.5196075439453, |
|
"logps/rejected": -262.0018005371094, |
|
"loss": 0.0832, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.6660928130149841, |
|
"rewards/margins": 8.312829971313477, |
|
"rewards/rejected": -8.978921890258789, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"grad_norm": 59.08802888748966, |
|
"learning_rate": 1.7086330935251798e-07, |
|
"logits/chosen": -2.7517917156219482, |
|
"logits/rejected": -2.806405544281006, |
|
"logps/chosen": -172.39793395996094, |
|
"logps/rejected": -225.39071655273438, |
|
"loss": 0.0657, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.16179580986499786, |
|
"rewards/margins": 9.866128921508789, |
|
"rewards/rejected": -10.027925491333008, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"grad_norm": 17.218454632279617, |
|
"learning_rate": 1.6726618705035968e-07, |
|
"logits/chosen": -2.901048183441162, |
|
"logits/rejected": -2.8675291538238525, |
|
"logps/chosen": -256.65814208984375, |
|
"logps/rejected": -278.4682312011719, |
|
"loss": 0.0809, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -1.1122452020645142, |
|
"rewards/margins": 8.697083473205566, |
|
"rewards/rejected": -9.80932903289795, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"grad_norm": 5.5094848478152585, |
|
"learning_rate": 1.6366906474820144e-07, |
|
"logits/chosen": -2.7242445945739746, |
|
"logits/rejected": -2.590859889984131, |
|
"logps/chosen": -196.37344360351562, |
|
"logps/rejected": -369.8520812988281, |
|
"loss": 0.0932, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -1.2810817956924438, |
|
"rewards/margins": 14.394078254699707, |
|
"rewards/rejected": -15.67516040802002, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"grad_norm": 4.35723362011521, |
|
"learning_rate": 1.6007194244604316e-07, |
|
"logits/chosen": -2.9155492782592773, |
|
"logits/rejected": -2.9282360076904297, |
|
"logps/chosen": -253.76278686523438, |
|
"logps/rejected": -349.87811279296875, |
|
"loss": 0.0685, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.497481107711792, |
|
"rewards/margins": 10.071554183959961, |
|
"rewards/rejected": -9.574074745178223, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"grad_norm": 27.13694624173697, |
|
"learning_rate": 1.564748201438849e-07, |
|
"logits/chosen": -2.9795854091644287, |
|
"logits/rejected": -2.912909746170044, |
|
"logps/chosen": -204.10121154785156, |
|
"logps/rejected": -311.55316162109375, |
|
"loss": 0.0655, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 1.9577583074569702, |
|
"rewards/margins": 11.63161563873291, |
|
"rewards/rejected": -9.673857688903809, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"grad_norm": 9.412304352633278, |
|
"learning_rate": 1.5287769784172662e-07, |
|
"logits/chosen": -2.699101686477661, |
|
"logits/rejected": -2.817841053009033, |
|
"logps/chosen": -154.8762664794922, |
|
"logps/rejected": -330.6363525390625, |
|
"loss": 0.0521, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -1.620474100112915, |
|
"rewards/margins": 9.67332649230957, |
|
"rewards/rejected": -11.29379940032959, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"grad_norm": 3.6214126849238792, |
|
"learning_rate": 1.4928057553956832e-07, |
|
"logits/chosen": -2.9571373462677, |
|
"logits/rejected": -2.856677532196045, |
|
"logps/chosen": -171.8690643310547, |
|
"logps/rejected": -235.8432159423828, |
|
"loss": 0.0925, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -6.249312877655029, |
|
"rewards/margins": 6.034473419189453, |
|
"rewards/rejected": -12.283784866333008, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"grad_norm": 34.86087199990997, |
|
"learning_rate": 1.4568345323741007e-07, |
|
"logits/chosen": -2.827342987060547, |
|
"logits/rejected": -2.9063026905059814, |
|
"logps/chosen": -211.0481719970703, |
|
"logps/rejected": -349.0590515136719, |
|
"loss": 0.0662, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -2.3218135833740234, |
|
"rewards/margins": 9.982672691345215, |
|
"rewards/rejected": -12.304486274719238, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"grad_norm": 4.714978056289899, |
|
"learning_rate": 1.420863309352518e-07, |
|
"logits/chosen": -2.8489651679992676, |
|
"logits/rejected": -2.886852741241455, |
|
"logps/chosen": -204.08822631835938, |
|
"logps/rejected": -322.1864929199219, |
|
"loss": 0.0854, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.09814932197332382, |
|
"rewards/margins": 9.744218826293945, |
|
"rewards/rejected": -9.842368125915527, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"grad_norm": 2.9947273748809358, |
|
"learning_rate": 1.3848920863309352e-07, |
|
"logits/chosen": -2.791313886642456, |
|
"logits/rejected": -2.819204807281494, |
|
"logps/chosen": -269.57891845703125, |
|
"logps/rejected": -348.280517578125, |
|
"loss": 0.095, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -2.2503886222839355, |
|
"rewards/margins": 8.809748649597168, |
|
"rewards/rejected": -11.060136795043945, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"grad_norm": 3.0283319227532206, |
|
"learning_rate": 1.3489208633093525e-07, |
|
"logits/chosen": -2.9087767601013184, |
|
"logits/rejected": -2.84749174118042, |
|
"logps/chosen": -266.15948486328125, |
|
"logps/rejected": -327.80059814453125, |
|
"loss": 0.0629, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -1.6806023120880127, |
|
"rewards/margins": 9.073920249938965, |
|
"rewards/rejected": -10.754522323608398, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"grad_norm": 23.204528474486622, |
|
"learning_rate": 1.3129496402877695e-07, |
|
"logits/chosen": -2.733978748321533, |
|
"logits/rejected": -2.7428078651428223, |
|
"logps/chosen": -224.3394317626953, |
|
"logps/rejected": -264.8204345703125, |
|
"loss": 0.0768, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -2.331441879272461, |
|
"rewards/margins": 8.235403060913086, |
|
"rewards/rejected": -10.56684398651123, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"grad_norm": 2.386208084053275, |
|
"learning_rate": 1.276978417266187e-07, |
|
"logits/chosen": -2.870147943496704, |
|
"logits/rejected": -2.857393980026245, |
|
"logps/chosen": -246.6834716796875, |
|
"logps/rejected": -351.1631774902344, |
|
"loss": 0.0737, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -1.3186081647872925, |
|
"rewards/margins": 11.430757522583008, |
|
"rewards/rejected": -12.749364852905273, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"grad_norm": 5.886073369587131, |
|
"learning_rate": 1.2410071942446043e-07, |
|
"logits/chosen": -2.635704517364502, |
|
"logits/rejected": -2.534574270248413, |
|
"logps/chosen": -153.0242462158203, |
|
"logps/rejected": -228.0364990234375, |
|
"loss": 0.0531, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.8014551997184753, |
|
"rewards/margins": 9.71955680847168, |
|
"rewards/rejected": -10.521011352539062, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"grad_norm": 1.8895213300439382, |
|
"learning_rate": 1.2050359712230216e-07, |
|
"logits/chosen": -2.8585143089294434, |
|
"logits/rejected": -2.8710408210754395, |
|
"logps/chosen": -193.92520141601562, |
|
"logps/rejected": -308.4482727050781, |
|
"loss": 0.0702, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.551455020904541, |
|
"rewards/margins": 10.751824378967285, |
|
"rewards/rejected": -12.303278923034668, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"grad_norm": 6.1150716672083645, |
|
"learning_rate": 1.1690647482014387e-07, |
|
"logits/chosen": -2.977564811706543, |
|
"logits/rejected": -2.8283185958862305, |
|
"logps/chosen": -313.2995300292969, |
|
"logps/rejected": -312.1686096191406, |
|
"loss": 0.0969, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.5176019668579102, |
|
"rewards/margins": 10.63847541809082, |
|
"rewards/rejected": -12.156076431274414, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"grad_norm": 9.938851512731329, |
|
"learning_rate": 1.1330935251798561e-07, |
|
"logits/chosen": -2.9206390380859375, |
|
"logits/rejected": -2.8288142681121826, |
|
"logps/chosen": -225.1664276123047, |
|
"logps/rejected": -310.8753662109375, |
|
"loss": 0.0297, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.9396001696586609, |
|
"rewards/margins": 11.567548751831055, |
|
"rewards/rejected": -10.627946853637695, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"grad_norm": 2.069636358363744, |
|
"learning_rate": 1.0971223021582733e-07, |
|
"logits/chosen": -2.859739065170288, |
|
"logits/rejected": -2.7655975818634033, |
|
"logps/chosen": -255.2501678466797, |
|
"logps/rejected": -281.08489990234375, |
|
"loss": 0.0568, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -2.5483479499816895, |
|
"rewards/margins": 9.106160163879395, |
|
"rewards/rejected": -11.654507637023926, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"grad_norm": 10.342520372077749, |
|
"learning_rate": 1.0611510791366907e-07, |
|
"logits/chosen": -3.0828137397766113, |
|
"logits/rejected": -2.9020261764526367, |
|
"logps/chosen": -222.02584838867188, |
|
"logps/rejected": -289.7030334472656, |
|
"loss": 0.0554, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.2204093188047409, |
|
"rewards/margins": 11.187307357788086, |
|
"rewards/rejected": -10.966898918151855, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"grad_norm": 1.6091661711871676, |
|
"learning_rate": 1.0251798561151078e-07, |
|
"logits/chosen": -2.819439649581909, |
|
"logits/rejected": -2.7268171310424805, |
|
"logps/chosen": -206.8452606201172, |
|
"logps/rejected": -289.83172607421875, |
|
"loss": 0.0735, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -5.2341508865356445, |
|
"rewards/margins": 8.223525047302246, |
|
"rewards/rejected": -13.457676887512207, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"grad_norm": 2.9406146592333395, |
|
"learning_rate": 9.892086330935251e-08, |
|
"logits/chosen": -2.7542366981506348, |
|
"logits/rejected": -2.938502073287964, |
|
"logps/chosen": -203.71774291992188, |
|
"logps/rejected": -298.8545227050781, |
|
"loss": 0.0786, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -4.396937370300293, |
|
"rewards/margins": 8.35850715637207, |
|
"rewards/rejected": -12.755444526672363, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"grad_norm": 5.476946083131152, |
|
"learning_rate": 9.532374100719425e-08, |
|
"logits/chosen": -2.734649658203125, |
|
"logits/rejected": -2.8041725158691406, |
|
"logps/chosen": -243.1942596435547, |
|
"logps/rejected": -244.3195037841797, |
|
"loss": 0.0801, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.2553873658180237, |
|
"rewards/margins": 9.097474098205566, |
|
"rewards/rejected": -8.842086791992188, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 3.926063875507809, |
|
"learning_rate": 9.172661870503596e-08, |
|
"logits/chosen": -2.8663649559020996, |
|
"logits/rejected": -2.8047869205474854, |
|
"logps/chosen": -220.23318481445312, |
|
"logps/rejected": -269.9891052246094, |
|
"loss": 0.0798, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -2.644606828689575, |
|
"rewards/margins": 8.667104721069336, |
|
"rewards/rejected": -11.311712265014648, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"grad_norm": 28.267660173886174, |
|
"learning_rate": 8.812949640287769e-08, |
|
"logits/chosen": -2.8638269901275635, |
|
"logits/rejected": -2.9266953468322754, |
|
"logps/chosen": -217.5587615966797, |
|
"logps/rejected": -212.8009033203125, |
|
"loss": 0.1024, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -2.6887857913970947, |
|
"rewards/margins": 6.493393898010254, |
|
"rewards/rejected": -9.18217945098877, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"grad_norm": 12.557325080488198, |
|
"learning_rate": 8.453237410071942e-08, |
|
"logits/chosen": -2.8576910495758057, |
|
"logits/rejected": -2.9612555503845215, |
|
"logps/chosen": -258.0310363769531, |
|
"logps/rejected": -358.3188171386719, |
|
"loss": 0.0534, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -2.497694730758667, |
|
"rewards/margins": 11.422506332397461, |
|
"rewards/rejected": -13.920202255249023, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"grad_norm": 2.097907737579444, |
|
"learning_rate": 8.093525179856114e-08, |
|
"logits/chosen": -2.9389355182647705, |
|
"logits/rejected": -2.9012742042541504, |
|
"logps/chosen": -263.598876953125, |
|
"logps/rejected": -348.38128662109375, |
|
"loss": 0.0666, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.6279190182685852, |
|
"rewards/margins": 10.751107215881348, |
|
"rewards/rejected": -10.123188972473145, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"grad_norm": 3.2585508984060305, |
|
"learning_rate": 7.733812949640288e-08, |
|
"logits/chosen": -2.897578716278076, |
|
"logits/rejected": -2.814047336578369, |
|
"logps/chosen": -205.3290252685547, |
|
"logps/rejected": -290.0876159667969, |
|
"loss": 0.0725, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -5.120051383972168, |
|
"rewards/margins": 8.90400505065918, |
|
"rewards/rejected": -14.024057388305664, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"grad_norm": 28.5204767402595, |
|
"learning_rate": 7.37410071942446e-08, |
|
"logits/chosen": -2.6974236965179443, |
|
"logits/rejected": -2.763514280319214, |
|
"logps/chosen": -230.7462158203125, |
|
"logps/rejected": -315.193603515625, |
|
"loss": 0.0837, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -2.1283421516418457, |
|
"rewards/margins": 8.708791732788086, |
|
"rewards/rejected": -10.837133407592773, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"grad_norm": 13.988032625962447, |
|
"learning_rate": 7.014388489208632e-08, |
|
"logits/chosen": -2.9279181957244873, |
|
"logits/rejected": -2.898128032684326, |
|
"logps/chosen": -178.9357147216797, |
|
"logps/rejected": -326.0805969238281, |
|
"loss": 0.0547, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.4182695746421814, |
|
"rewards/margins": 9.742707252502441, |
|
"rewards/rejected": -9.324437141418457, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"grad_norm": 1.9784130840431484, |
|
"learning_rate": 6.654676258992805e-08, |
|
"logits/chosen": -2.5293657779693604, |
|
"logits/rejected": -2.56721830368042, |
|
"logps/chosen": -215.4763946533203, |
|
"logps/rejected": -347.84918212890625, |
|
"loss": 0.0672, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.11363963782787323, |
|
"rewards/margins": 13.453043937683105, |
|
"rewards/rejected": -13.566683769226074, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"grad_norm": 9.356111730892692, |
|
"learning_rate": 6.294964028776978e-08, |
|
"logits/chosen": -2.9187986850738525, |
|
"logits/rejected": -2.9558041095733643, |
|
"logps/chosen": -185.33265686035156, |
|
"logps/rejected": -348.08343505859375, |
|
"loss": 0.0603, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -1.900217056274414, |
|
"rewards/margins": 11.395307540893555, |
|
"rewards/rejected": -13.295524597167969, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"grad_norm": 1.7310543650629324, |
|
"learning_rate": 5.9352517985611505e-08, |
|
"logits/chosen": -2.9439430236816406, |
|
"logits/rejected": -2.846768856048584, |
|
"logps/chosen": -295.23602294921875, |
|
"logps/rejected": -313.1105651855469, |
|
"loss": 0.0848, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.23460149765014648, |
|
"rewards/margins": 8.801981925964355, |
|
"rewards/rejected": -8.56737995147705, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"grad_norm": 5.698972392066655, |
|
"learning_rate": 5.575539568345323e-08, |
|
"logits/chosen": -2.932072162628174, |
|
"logits/rejected": -2.9518001079559326, |
|
"logps/chosen": -236.2991943359375, |
|
"logps/rejected": -389.4167175292969, |
|
"loss": 0.0435, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.4790451526641846, |
|
"rewards/margins": 12.147565841674805, |
|
"rewards/rejected": -13.626611709594727, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"grad_norm": 10.118850215739602, |
|
"learning_rate": 5.2158273381294966e-08, |
|
"logits/chosen": -2.7540392875671387, |
|
"logits/rejected": -2.6350178718566895, |
|
"logps/chosen": -203.79840087890625, |
|
"logps/rejected": -262.6423034667969, |
|
"loss": 0.0881, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -5.214129447937012, |
|
"rewards/margins": 8.150261878967285, |
|
"rewards/rejected": -13.36439037322998, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"grad_norm": 6.195639443759268, |
|
"learning_rate": 4.856115107913669e-08, |
|
"logits/chosen": -2.778777837753296, |
|
"logits/rejected": -2.7979698181152344, |
|
"logps/chosen": -248.45901489257812, |
|
"logps/rejected": -357.4532470703125, |
|
"loss": 0.06, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.6782010197639465, |
|
"rewards/margins": 11.35696792602539, |
|
"rewards/rejected": -12.03516960144043, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"grad_norm": 0.19531387796331173, |
|
"learning_rate": 4.496402877697841e-08, |
|
"logits/chosen": -2.6897778511047363, |
|
"logits/rejected": -2.7521700859069824, |
|
"logps/chosen": -278.0186767578125, |
|
"logps/rejected": -327.90216064453125, |
|
"loss": 0.0685, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -3.4422764778137207, |
|
"rewards/margins": 11.567034721374512, |
|
"rewards/rejected": -15.009310722351074, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"grad_norm": 5.15723697089762, |
|
"learning_rate": 4.136690647482014e-08, |
|
"logits/chosen": -2.8552756309509277, |
|
"logits/rejected": -2.815695285797119, |
|
"logps/chosen": -189.77650451660156, |
|
"logps/rejected": -303.08135986328125, |
|
"loss": 0.0761, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -3.0464930534362793, |
|
"rewards/margins": 10.03233528137207, |
|
"rewards/rejected": -13.078828811645508, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"grad_norm": 1.272902203913625, |
|
"learning_rate": 3.776978417266187e-08, |
|
"logits/chosen": -2.840439558029175, |
|
"logits/rejected": -2.854123592376709, |
|
"logps/chosen": -201.63998413085938, |
|
"logps/rejected": -294.6650085449219, |
|
"loss": 0.0943, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.1479772329330444, |
|
"rewards/margins": 9.911928176879883, |
|
"rewards/rejected": -11.059905052185059, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"grad_norm": 6.709732675322476, |
|
"learning_rate": 3.41726618705036e-08, |
|
"logits/chosen": -2.6554994583129883, |
|
"logits/rejected": -2.745185375213623, |
|
"logps/chosen": -229.5808868408203, |
|
"logps/rejected": -260.39434814453125, |
|
"loss": 0.0925, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -4.566291809082031, |
|
"rewards/margins": 6.469024658203125, |
|
"rewards/rejected": -11.035317420959473, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"grad_norm": 7.800521530829396, |
|
"learning_rate": 3.057553956834532e-08, |
|
"logits/chosen": -3.0627307891845703, |
|
"logits/rejected": -2.9227752685546875, |
|
"logps/chosen": -265.76641845703125, |
|
"logps/rejected": -373.4715881347656, |
|
"loss": 0.0574, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.11531007289886475, |
|
"rewards/margins": 10.523666381835938, |
|
"rewards/rejected": -10.408354759216309, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"grad_norm": 9.312558432834338, |
|
"learning_rate": 2.6978417266187048e-08, |
|
"logits/chosen": -3.0164947509765625, |
|
"logits/rejected": -2.9149060249328613, |
|
"logps/chosen": -299.1823425292969, |
|
"logps/rejected": -357.61688232421875, |
|
"loss": 0.0641, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -1.9149433374404907, |
|
"rewards/margins": 11.019976615905762, |
|
"rewards/rejected": -12.934918403625488, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"grad_norm": 12.48672504880718, |
|
"learning_rate": 2.3381294964028775e-08, |
|
"logits/chosen": -2.6838114261627197, |
|
"logits/rejected": -2.7183403968811035, |
|
"logps/chosen": -177.66366577148438, |
|
"logps/rejected": -297.00103759765625, |
|
"loss": 0.0596, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -2.6696372032165527, |
|
"rewards/margins": 10.785930633544922, |
|
"rewards/rejected": -13.45556926727295, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"grad_norm": 18.95745366827935, |
|
"learning_rate": 1.9784172661870502e-08, |
|
"logits/chosen": -2.6468663215637207, |
|
"logits/rejected": -2.6348750591278076, |
|
"logps/chosen": -230.4730224609375, |
|
"logps/rejected": -255.1483612060547, |
|
"loss": 0.0823, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -0.6124650835990906, |
|
"rewards/margins": 10.041625022888184, |
|
"rewards/rejected": -10.65408992767334, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"grad_norm": 0.8016635311410285, |
|
"learning_rate": 1.618705035971223e-08, |
|
"logits/chosen": -2.8173935413360596, |
|
"logits/rejected": -2.7464711666107178, |
|
"logps/chosen": -202.07252502441406, |
|
"logps/rejected": -323.95135498046875, |
|
"loss": 0.0776, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -2.8145692348480225, |
|
"rewards/margins": 11.602530479431152, |
|
"rewards/rejected": -14.417098999023438, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"grad_norm": 80.91582579930383, |
|
"learning_rate": 1.2589928057553956e-08, |
|
"logits/chosen": -2.9232335090637207, |
|
"logits/rejected": -2.922365188598633, |
|
"logps/chosen": -182.398681640625, |
|
"logps/rejected": -370.7036437988281, |
|
"loss": 0.0741, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.1800309121608734, |
|
"rewards/margins": 13.427223205566406, |
|
"rewards/rejected": -13.2471923828125, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"grad_norm": 0.5505807027887548, |
|
"learning_rate": 8.992805755395683e-09, |
|
"logits/chosen": -2.819612979888916, |
|
"logits/rejected": -2.773894786834717, |
|
"logps/chosen": -236.72335815429688, |
|
"logps/rejected": -311.35552978515625, |
|
"loss": 0.084, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.4171246588230133, |
|
"rewards/margins": 12.874395370483398, |
|
"rewards/rejected": -13.291519165039062, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"grad_norm": 10.305894823331844, |
|
"learning_rate": 5.39568345323741e-09, |
|
"logits/chosen": -2.8409948348999023, |
|
"logits/rejected": -2.846924066543579, |
|
"logps/chosen": -266.71209716796875, |
|
"logps/rejected": -324.0392150878906, |
|
"loss": 0.0839, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 1.8426433801651, |
|
"rewards/margins": 10.864583969116211, |
|
"rewards/rejected": -9.021939277648926, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"grad_norm": 21.7857600856585, |
|
"learning_rate": 1.7985611510791367e-09, |
|
"logits/chosen": -2.7813632488250732, |
|
"logits/rejected": -2.836905002593994, |
|
"logps/chosen": -192.66018676757812, |
|
"logps/rejected": -365.13128662109375, |
|
"loss": 0.0741, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.4063602387905121, |
|
"rewards/margins": 11.652292251586914, |
|
"rewards/rejected": -12.058652877807617, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 1545, |
|
"total_flos": 0.0, |
|
"train_loss": 0.25373340110176973, |
|
"train_runtime": 3870.3099, |
|
"train_samples_per_second": 6.386, |
|
"train_steps_per_second": 0.399 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1545, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 1000, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|