phi3m0128-cds-0.8-kendall-onof-decrease-corr-max-2-simpo-max1500-default
/
checkpoint-1050
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.9024495058014611, | |
"eval_steps": 50, | |
"global_step": 1050, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.008594757198109154, | |
"grad_norm": 0.06708361208438873, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": 14.524938583374023, | |
"logits/rejected": 14.82593822479248, | |
"logps/chosen": -0.31433865427970886, | |
"logps/rejected": -0.32406437397003174, | |
"loss": 0.9442, | |
"rewards/accuracies": 0.4124999940395355, | |
"rewards/chosen": -0.4715079367160797, | |
"rewards/margins": 0.014588532969355583, | |
"rewards/rejected": -0.48609647154808044, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.017189514396218308, | |
"grad_norm": 0.056814808398485184, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": 14.309213638305664, | |
"logits/rejected": 14.978128433227539, | |
"logps/chosen": -0.31283506751060486, | |
"logps/rejected": -0.3911947011947632, | |
"loss": 0.928, | |
"rewards/accuracies": 0.550000011920929, | |
"rewards/chosen": -0.46925264596939087, | |
"rewards/margins": 0.1175394207239151, | |
"rewards/rejected": -0.5867919921875, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.02578427159432746, | |
"grad_norm": 0.061199307441711426, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": 14.68384075164795, | |
"logits/rejected": 15.338122367858887, | |
"logps/chosen": -0.3007296621799469, | |
"logps/rejected": -0.3204456865787506, | |
"loss": 0.9439, | |
"rewards/accuracies": 0.4375, | |
"rewards/chosen": -0.45109447836875916, | |
"rewards/margins": 0.029573997482657433, | |
"rewards/rejected": -0.48066848516464233, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.034379028792436615, | |
"grad_norm": 0.08423774689435959, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": 14.39265251159668, | |
"logits/rejected": 15.059102058410645, | |
"logps/chosen": -0.28216058015823364, | |
"logps/rejected": -0.33495840430259705, | |
"loss": 0.9184, | |
"rewards/accuracies": 0.4124999940395355, | |
"rewards/chosen": -0.42324090003967285, | |
"rewards/margins": 0.07919676601886749, | |
"rewards/rejected": -0.5024376511573792, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.042973785990545764, | |
"grad_norm": 0.06052614375948906, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": 14.383735656738281, | |
"logits/rejected": 15.029413223266602, | |
"logps/chosen": -0.27970507740974426, | |
"logps/rejected": -0.33213528990745544, | |
"loss": 0.9317, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.4195576310157776, | |
"rewards/margins": 0.07864536345005035, | |
"rewards/rejected": -0.49820294976234436, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.042973785990545764, | |
"eval_logits/chosen": 14.424538612365723, | |
"eval_logits/rejected": 15.006633758544922, | |
"eval_logps/chosen": -0.2923925220966339, | |
"eval_logps/rejected": -0.3531996011734009, | |
"eval_loss": 0.9324354529380798, | |
"eval_rewards/accuracies": 0.5052631497383118, | |
"eval_rewards/chosen": -0.43858882784843445, | |
"eval_rewards/margins": 0.09121060371398926, | |
"eval_rewards/rejected": -0.5297994017601013, | |
"eval_runtime": 26.3759, | |
"eval_samples_per_second": 28.549, | |
"eval_steps_per_second": 3.602, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.05156854318865492, | |
"grad_norm": 0.06899414211511612, | |
"learning_rate": 4.980286753286196e-06, | |
"logits/chosen": 14.888933181762695, | |
"logits/rejected": 15.33955192565918, | |
"logps/chosen": -0.2886829972267151, | |
"logps/rejected": -0.34016504883766174, | |
"loss": 0.9323, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -0.43302449584007263, | |
"rewards/margins": 0.07722309231758118, | |
"rewards/rejected": -0.5102475881576538, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.060163300386764075, | |
"grad_norm": 0.06679105013608932, | |
"learning_rate": 4.973180832407471e-06, | |
"logits/chosen": 14.721624374389648, | |
"logits/rejected": 15.614666938781738, | |
"logps/chosen": -0.29435139894485474, | |
"logps/rejected": -0.38699784874916077, | |
"loss": 0.9172, | |
"rewards/accuracies": 0.48750001192092896, | |
"rewards/chosen": -0.4415270686149597, | |
"rewards/margins": 0.13896968960762024, | |
"rewards/rejected": -0.5804967880249023, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.06875805758487323, | |
"grad_norm": 0.07169903814792633, | |
"learning_rate": 4.964990092676263e-06, | |
"logits/chosen": 13.848808288574219, | |
"logits/rejected": 14.609800338745117, | |
"logps/chosen": -0.26156893372535706, | |
"logps/rejected": -0.33030644059181213, | |
"loss": 0.9245, | |
"rewards/accuracies": 0.48750001192092896, | |
"rewards/chosen": -0.3923533856868744, | |
"rewards/margins": 0.10310628265142441, | |
"rewards/rejected": -0.495459645986557, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.07735281478298238, | |
"grad_norm": 0.06593246012926102, | |
"learning_rate": 4.9557181268217225e-06, | |
"logits/chosen": 14.603567123413086, | |
"logits/rejected": 14.994171142578125, | |
"logps/chosen": -0.3191321790218353, | |
"logps/rejected": -0.3477073311805725, | |
"loss": 0.9359, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -0.4786983132362366, | |
"rewards/margins": 0.042862698435783386, | |
"rewards/rejected": -0.5215609669685364, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.08594757198109153, | |
"grad_norm": 0.0718066617846489, | |
"learning_rate": 4.9453690018345144e-06, | |
"logits/chosen": 13.928094863891602, | |
"logits/rejected": 14.792709350585938, | |
"logps/chosen": -0.24115696549415588, | |
"logps/rejected": -0.3537539839744568, | |
"loss": 0.9066, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.3617354929447174, | |
"rewards/margins": 0.16889554262161255, | |
"rewards/rejected": -0.5306310653686523, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.08594757198109153, | |
"eval_logits/chosen": 14.40036392211914, | |
"eval_logits/rejected": 14.97786808013916, | |
"eval_logps/chosen": -0.2777771055698395, | |
"eval_logps/rejected": -0.3516874611377716, | |
"eval_loss": 0.9236211180686951, | |
"eval_rewards/accuracies": 0.5052631497383118, | |
"eval_rewards/chosen": -0.4166657328605652, | |
"eval_rewards/margins": 0.11086549609899521, | |
"eval_rewards/rejected": -0.5275312066078186, | |
"eval_runtime": 25.8056, | |
"eval_samples_per_second": 29.18, | |
"eval_steps_per_second": 3.681, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.09454232917920069, | |
"grad_norm": 0.06681054830551147, | |
"learning_rate": 4.933947257182901e-06, | |
"logits/chosen": 14.76116943359375, | |
"logits/rejected": 15.001077651977539, | |
"logps/chosen": -0.297056645154953, | |
"logps/rejected": -0.3221590518951416, | |
"loss": 0.929, | |
"rewards/accuracies": 0.44999998807907104, | |
"rewards/chosen": -0.4455850124359131, | |
"rewards/margins": 0.03765357658267021, | |
"rewards/rejected": -0.4832385182380676, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.10313708637730984, | |
"grad_norm": 0.10024584829807281, | |
"learning_rate": 4.921457902821578e-06, | |
"logits/chosen": 14.405306816101074, | |
"logits/rejected": 15.084524154663086, | |
"logps/chosen": -0.2726767361164093, | |
"logps/rejected": -0.3543504774570465, | |
"loss": 0.9299, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.40901508927345276, | |
"rewards/margins": 0.12251058965921402, | |
"rewards/rejected": -0.531525731086731, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.11173184357541899, | |
"grad_norm": 0.08629737794399261, | |
"learning_rate": 4.907906416994146e-06, | |
"logits/chosen": 14.073992729187012, | |
"logits/rejected": 14.882128715515137, | |
"logps/chosen": -0.2827032506465912, | |
"logps/rejected": -0.369393527507782, | |
"loss": 0.9109, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.42405492067337036, | |
"rewards/margins": 0.13003548979759216, | |
"rewards/rejected": -0.5540903806686401, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.12032660077352815, | |
"grad_norm": 0.07973086833953857, | |
"learning_rate": 4.893298743830168e-06, | |
"logits/chosen": 13.96656322479248, | |
"logits/rejected": 14.639463424682617, | |
"logps/chosen": -0.28426361083984375, | |
"logps/rejected": -0.3899250030517578, | |
"loss": 0.9138, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.4263954162597656, | |
"rewards/margins": 0.1584920585155487, | |
"rewards/rejected": -0.5848874449729919, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.1289213579716373, | |
"grad_norm": 0.08767445385456085, | |
"learning_rate": 4.8776412907378845e-06, | |
"logits/chosen": 13.705177307128906, | |
"logits/rejected": 14.19865608215332, | |
"logps/chosen": -0.26735779643058777, | |
"logps/rejected": -0.34726911783218384, | |
"loss": 0.9157, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.40103667974472046, | |
"rewards/margins": 0.1198669821023941, | |
"rewards/rejected": -0.5209037065505981, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.1289213579716373, | |
"eval_logits/chosen": 13.20260238647461, | |
"eval_logits/rejected": 13.959339141845703, | |
"eval_logps/chosen": -0.27623170614242554, | |
"eval_logps/rejected": -0.3724917769432068, | |
"eval_loss": 0.909102737903595, | |
"eval_rewards/accuracies": 0.557894766330719, | |
"eval_rewards/chosen": -0.4143475592136383, | |
"eval_rewards/margins": 0.14439010620117188, | |
"eval_rewards/rejected": -0.5587376356124878, | |
"eval_runtime": 25.7839, | |
"eval_samples_per_second": 29.204, | |
"eval_steps_per_second": 3.684, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.13751611516974646, | |
"grad_norm": 0.09749539196491241, | |
"learning_rate": 4.860940925593703e-06, | |
"logits/chosen": 13.301411628723145, | |
"logits/rejected": 14.054819107055664, | |
"logps/chosen": -0.2808162569999695, | |
"logps/rejected": -0.39500662684440613, | |
"loss": 0.9, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.4212244153022766, | |
"rewards/margins": 0.17128555476665497, | |
"rewards/rejected": -0.592509925365448, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.1461108723678556, | |
"grad_norm": 0.14965052902698517, | |
"learning_rate": 4.84320497372973e-06, | |
"logits/chosen": 12.261284828186035, | |
"logits/rejected": 13.0617036819458, | |
"logps/chosen": -0.29266461730003357, | |
"logps/rejected": -0.4265298843383789, | |
"loss": 0.896, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.43899694085121155, | |
"rewards/margins": 0.20079784095287323, | |
"rewards/rejected": -0.6397948265075684, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.15470562956596476, | |
"grad_norm": 0.13044072687625885, | |
"learning_rate": 4.824441214720629e-06, | |
"logits/chosen": 11.509119033813477, | |
"logits/rejected": 12.31033706665039, | |
"logps/chosen": -0.27384257316589355, | |
"logps/rejected": -0.3920982778072357, | |
"loss": 0.8911, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.4107638895511627, | |
"rewards/margins": 0.17738358676433563, | |
"rewards/rejected": -0.5881474018096924, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.1633003867640739, | |
"grad_norm": 0.16182811558246613, | |
"learning_rate": 4.804657878971252e-06, | |
"logits/chosen": 10.68933391571045, | |
"logits/rejected": 11.632065773010254, | |
"logps/chosen": -0.292975515127182, | |
"logps/rejected": -0.42257896065711975, | |
"loss": 0.9002, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.4394632875919342, | |
"rewards/margins": 0.19440510869026184, | |
"rewards/rejected": -0.633868396282196, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.17189514396218306, | |
"grad_norm": 0.181160107254982, | |
"learning_rate": 4.783863644106502e-06, | |
"logits/chosen": 10.593437194824219, | |
"logits/rejected": 11.435877799987793, | |
"logps/chosen": -0.32495418190956116, | |
"logps/rejected": -0.4480825364589691, | |
"loss": 0.8773, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.48743128776550293, | |
"rewards/margins": 0.18469250202178955, | |
"rewards/rejected": -0.6721237897872925, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.17189514396218306, | |
"eval_logits/chosen": 9.299257278442383, | |
"eval_logits/rejected": 10.055145263671875, | |
"eval_logps/chosen": -0.31059205532073975, | |
"eval_logps/rejected": -0.47102925181388855, | |
"eval_loss": 0.8721462488174438, | |
"eval_rewards/accuracies": 0.6105263233184814, | |
"eval_rewards/chosen": -0.4658880829811096, | |
"eval_rewards/margins": 0.24065588414669037, | |
"eval_rewards/rejected": -0.7065439224243164, | |
"eval_runtime": 25.78, | |
"eval_samples_per_second": 29.209, | |
"eval_steps_per_second": 3.685, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.18048990116029223, | |
"grad_norm": 0.24912959337234497, | |
"learning_rate": 4.762067631165049e-06, | |
"logits/chosen": 8.803088188171387, | |
"logits/rejected": 9.326388359069824, | |
"logps/chosen": -0.3249451816082001, | |
"logps/rejected": -0.44993042945861816, | |
"loss": 0.8484, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.4874177575111389, | |
"rewards/margins": 0.18747788667678833, | |
"rewards/rejected": -0.6748956441879272, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.18908465835840138, | |
"grad_norm": 0.319579541683197, | |
"learning_rate": 4.7392794005985324e-06, | |
"logits/chosen": 6.257112979888916, | |
"logits/rejected": 7.168400764465332, | |
"logps/chosen": -0.335318386554718, | |
"logps/rejected": -0.5439311265945435, | |
"loss": 0.8499, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.5029775500297546, | |
"rewards/margins": 0.31291908025741577, | |
"rewards/rejected": -0.8158966302871704, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.19767941555651053, | |
"grad_norm": 0.31494757533073425, | |
"learning_rate": 4.715508948078037e-06, | |
"logits/chosen": 5.725883960723877, | |
"logits/rejected": 5.9254865646362305, | |
"logps/chosen": -0.3735908567905426, | |
"logps/rejected": -0.5729750394821167, | |
"loss": 0.826, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.5603862404823303, | |
"rewards/margins": 0.2990763187408447, | |
"rewards/rejected": -0.859462559223175, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.20627417275461968, | |
"grad_norm": 0.46439653635025024, | |
"learning_rate": 4.690766700109659e-06, | |
"logits/chosen": 5.059751033782959, | |
"logits/rejected": 5.128623008728027, | |
"logps/chosen": -0.4083784222602844, | |
"logps/rejected": -0.6792675852775574, | |
"loss": 0.7992, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.612567663192749, | |
"rewards/margins": 0.40633392333984375, | |
"rewards/rejected": -1.0189014673233032, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.21486892995272883, | |
"grad_norm": 0.42406076192855835, | |
"learning_rate": 4.665063509461098e-06, | |
"logits/chosen": 4.128974437713623, | |
"logits/rejected": 4.141166687011719, | |
"logps/chosen": -0.4256651997566223, | |
"logps/rejected": -0.7279168367385864, | |
"loss": 0.7848, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.6384977102279663, | |
"rewards/margins": 0.4533773958683014, | |
"rewards/rejected": -1.0918750762939453, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.21486892995272883, | |
"eval_logits/chosen": 3.800307512283325, | |
"eval_logits/rejected": 3.1472771167755127, | |
"eval_logps/chosen": -0.4563433527946472, | |
"eval_logps/rejected": -0.8247694373130798, | |
"eval_loss": 0.7728626728057861, | |
"eval_rewards/accuracies": 0.6526315808296204, | |
"eval_rewards/chosen": -0.6845150589942932, | |
"eval_rewards/margins": 0.5526391267776489, | |
"eval_rewards/rejected": -1.237154245376587, | |
"eval_runtime": 25.7836, | |
"eval_samples_per_second": 29.205, | |
"eval_steps_per_second": 3.685, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.22346368715083798, | |
"grad_norm": 0.4071955680847168, | |
"learning_rate": 4.638410650401267e-06, | |
"logits/chosen": 3.169527530670166, | |
"logits/rejected": 2.603461503982544, | |
"logps/chosen": -0.5029922723770142, | |
"logps/rejected": -0.9469219446182251, | |
"loss": 0.7273, | |
"rewards/accuracies": 0.7124999761581421, | |
"rewards/chosen": -0.7544883489608765, | |
"rewards/margins": 0.6658946871757507, | |
"rewards/rejected": -1.4203828573226929, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.23205844434894715, | |
"grad_norm": 0.6253886222839355, | |
"learning_rate": 4.610819813755038e-06, | |
"logits/chosen": 3.8718018531799316, | |
"logits/rejected": 2.569753646850586, | |
"logps/chosen": -0.4955294132232666, | |
"logps/rejected": -0.8811863660812378, | |
"loss": 0.7483, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -0.7432941198348999, | |
"rewards/margins": 0.5784854888916016, | |
"rewards/rejected": -1.321779489517212, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.2406532015470563, | |
"grad_norm": 0.5592113733291626, | |
"learning_rate": 4.582303101775249e-06, | |
"logits/chosen": 3.4818286895751953, | |
"logits/rejected": 2.428328275680542, | |
"logps/chosen": -0.5700691342353821, | |
"logps/rejected": -1.010145664215088, | |
"loss": 0.7165, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.8551036715507507, | |
"rewards/margins": 0.6601148843765259, | |
"rewards/rejected": -1.5152184963226318, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.24924795874516545, | |
"grad_norm": 0.8438608050346375, | |
"learning_rate": 4.55287302283426e-06, | |
"logits/chosen": 2.5937914848327637, | |
"logits/rejected": 1.8570162057876587, | |
"logps/chosen": -0.592321515083313, | |
"logps/rejected": -1.1775600910186768, | |
"loss": 0.6685, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.8884822130203247, | |
"rewards/margins": 0.8778578042984009, | |
"rewards/rejected": -1.7663400173187256, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.2578427159432746, | |
"grad_norm": 2.9559757709503174, | |
"learning_rate": 4.522542485937369e-06, | |
"logits/chosen": 3.2419090270996094, | |
"logits/rejected": 1.9082870483398438, | |
"logps/chosen": -0.6832663416862488, | |
"logps/rejected": -1.5631868839263916, | |
"loss": 0.6009, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -1.0248994827270508, | |
"rewards/margins": 1.3198809623718262, | |
"rewards/rejected": -2.344780445098877, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.2578427159432746, | |
"eval_logits/chosen": 2.5470504760742188, | |
"eval_logits/rejected": 1.492888331413269, | |
"eval_logps/chosen": -0.7285813689231873, | |
"eval_logps/rejected": -1.8318607807159424, | |
"eval_loss": 0.5855891704559326, | |
"eval_rewards/accuracies": 0.7052631378173828, | |
"eval_rewards/chosen": -1.092872142791748, | |
"eval_rewards/margins": 1.6549187898635864, | |
"eval_rewards/rejected": -2.747790813446045, | |
"eval_runtime": 25.8105, | |
"eval_samples_per_second": 29.174, | |
"eval_steps_per_second": 3.681, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.2664374731413838, | |
"grad_norm": 1.4503060579299927, | |
"learning_rate": 4.491324795060491e-06, | |
"logits/chosen": 1.6672757863998413, | |
"logits/rejected": 0.7888604402542114, | |
"logps/chosen": -0.769140899181366, | |
"logps/rejected": -2.0822532176971436, | |
"loss": 0.512, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -1.1537113189697266, | |
"rewards/margins": 1.9696683883666992, | |
"rewards/rejected": -3.123379945755005, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.2750322303394929, | |
"grad_norm": 0.36741188168525696, | |
"learning_rate": 4.4592336433146e-06, | |
"logits/chosen": 2.6584715843200684, | |
"logits/rejected": 1.835911750793457, | |
"logps/chosen": -0.8400143384933472, | |
"logps/rejected": -1.9262489080429077, | |
"loss": 0.5405, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -1.2600215673446655, | |
"rewards/margins": 1.6293519735336304, | |
"rewards/rejected": -2.889373302459717, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.28362698753760207, | |
"grad_norm": 0.6233783960342407, | |
"learning_rate": 4.426283106939474e-06, | |
"logits/chosen": 3.2203617095947266, | |
"logits/rejected": 2.3215420246124268, | |
"logps/chosen": -0.7985933423042297, | |
"logps/rejected": -2.4170174598693848, | |
"loss": 0.5335, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -1.197890043258667, | |
"rewards/margins": 2.427635669708252, | |
"rewards/rejected": -3.625525712966919, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.2922217447357112, | |
"grad_norm": 1.0881849527359009, | |
"learning_rate": 4.3924876391293915e-06, | |
"logits/chosen": 2.229017734527588, | |
"logits/rejected": 1.2251309156417847, | |
"logps/chosen": -0.8058193325996399, | |
"logps/rejected": -2.810622215270996, | |
"loss": 0.4903, | |
"rewards/accuracies": 0.7124999761581421, | |
"rewards/chosen": -1.2087291479110718, | |
"rewards/margins": 3.007204294204712, | |
"rewards/rejected": -4.215933799743652, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.30081650193382037, | |
"grad_norm": 4.168415069580078, | |
"learning_rate": 4.357862063693486e-06, | |
"logits/chosen": 2.4198296070098877, | |
"logits/rejected": 1.5391919612884521, | |
"logps/chosen": -1.010558843612671, | |
"logps/rejected": -2.2362923622131348, | |
"loss": 0.5249, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -1.515838384628296, | |
"rewards/margins": 1.8385999202728271, | |
"rewards/rejected": -3.3544387817382812, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.30081650193382037, | |
"eval_logits/chosen": 2.996535539627075, | |
"eval_logits/rejected": 2.064058303833008, | |
"eval_logps/chosen": -0.8687878847122192, | |
"eval_logps/rejected": -2.9790267944335938, | |
"eval_loss": 0.5171241760253906, | |
"eval_rewards/accuracies": 0.7263157963752747, | |
"eval_rewards/chosen": -1.3031818866729736, | |
"eval_rewards/margins": 3.165358781814575, | |
"eval_rewards/rejected": -4.468540668487549, | |
"eval_runtime": 25.8152, | |
"eval_samples_per_second": 29.169, | |
"eval_steps_per_second": 3.68, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.3094112591319295, | |
"grad_norm": 0.5646592378616333, | |
"learning_rate": 4.322421568553529e-06, | |
"logits/chosen": 3.050445556640625, | |
"logits/rejected": 2.0960793495178223, | |
"logps/chosen": -0.7702202796936035, | |
"logps/rejected": -2.5967533588409424, | |
"loss": 0.5067, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -1.1553303003311157, | |
"rewards/margins": 2.739799737930298, | |
"rewards/rejected": -3.895130157470703, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.31800601633003867, | |
"grad_norm": 0.5547713041305542, | |
"learning_rate": 4.286181699082008e-06, | |
"logits/chosen": 2.7148895263671875, | |
"logits/rejected": 1.9958852529525757, | |
"logps/chosen": -0.9548311233520508, | |
"logps/rejected": -3.1348252296447754, | |
"loss": 0.4726, | |
"rewards/accuracies": 0.800000011920929, | |
"rewards/chosen": -1.4322465658187866, | |
"rewards/margins": 3.269991397857666, | |
"rewards/rejected": -4.702237606048584, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.3266007735281478, | |
"grad_norm": 3.4396660327911377, | |
"learning_rate": 4.249158351283414e-06, | |
"logits/chosen": 2.586766004562378, | |
"logits/rejected": 2.070089340209961, | |
"logps/chosen": -0.9903923273086548, | |
"logps/rejected": -3.0135743618011475, | |
"loss": 0.4801, | |
"rewards/accuracies": 0.6625000238418579, | |
"rewards/chosen": -1.485588550567627, | |
"rewards/margins": 3.034773349761963, | |
"rewards/rejected": -4.52036190032959, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.33519553072625696, | |
"grad_norm": 0.9405317306518555, | |
"learning_rate": 4.211367764821722e-06, | |
"logits/chosen": 4.370789527893066, | |
"logits/rejected": 3.165931224822998, | |
"logps/chosen": -0.7785463929176331, | |
"logps/rejected": -2.456723928451538, | |
"loss": 0.4585, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -1.1678194999694824, | |
"rewards/margins": 2.5172665119171143, | |
"rewards/rejected": -3.6850857734680176, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.3437902879243661, | |
"grad_norm": 0.7120731472969055, | |
"learning_rate": 4.172826515897146e-06, | |
"logits/chosen": 3.3425400257110596, | |
"logits/rejected": 2.6448545455932617, | |
"logps/chosen": -0.9174768328666687, | |
"logps/rejected": -3.047037124633789, | |
"loss": 0.4771, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -1.3762153387069702, | |
"rewards/margins": 3.194340229034424, | |
"rewards/rejected": -4.570555686950684, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.3437902879243661, | |
"eval_logits/chosen": 3.548964262008667, | |
"eval_logits/rejected": 2.7726428508758545, | |
"eval_logps/chosen": -1.0053316354751587, | |
"eval_logps/rejected": -3.487654447555542, | |
"eval_loss": 0.47841358184814453, | |
"eval_rewards/accuracies": 0.7368420958518982, | |
"eval_rewards/chosen": -1.5079973936080933, | |
"eval_rewards/margins": 3.723484992980957, | |
"eval_rewards/rejected": -5.231482028961182, | |
"eval_runtime": 25.8148, | |
"eval_samples_per_second": 29.169, | |
"eval_steps_per_second": 3.68, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.3523850451224753, | |
"grad_norm": 2.403956651687622, | |
"learning_rate": 4.133551509975264e-06, | |
"logits/chosen": 3.2028489112854004, | |
"logits/rejected": 2.2486982345581055, | |
"logps/chosen": -0.9957242012023926, | |
"logps/rejected": -3.243959426879883, | |
"loss": 0.4449, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -1.4935863018035889, | |
"rewards/margins": 3.3723526000976562, | |
"rewards/rejected": -4.865939140319824, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.36097980232058446, | |
"grad_norm": 0.39530256390571594, | |
"learning_rate": 4.093559974371725e-06, | |
"logits/chosen": 3.8590214252471924, | |
"logits/rejected": 3.1420931816101074, | |
"logps/chosen": -0.9541120529174805, | |
"logps/rejected": -3.0112829208374023, | |
"loss": 0.4598, | |
"rewards/accuracies": 0.699999988079071, | |
"rewards/chosen": -1.4311680793762207, | |
"rewards/margins": 3.085756301879883, | |
"rewards/rejected": -4.5169243812561035, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.3695745595186936, | |
"grad_norm": 0.29451707005500793, | |
"learning_rate": 4.052869450695776e-06, | |
"logits/chosen": 4.697268486022949, | |
"logits/rejected": 3.7647697925567627, | |
"logps/chosen": -1.1037578582763672, | |
"logps/rejected": -3.8626160621643066, | |
"loss": 0.4275, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -1.6556367874145508, | |
"rewards/margins": 4.138287544250488, | |
"rewards/rejected": -5.793923854827881, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.37816931671680276, | |
"grad_norm": 0.5065125823020935, | |
"learning_rate": 4.011497787155938e-06, | |
"logits/chosen": 3.5233864784240723, | |
"logits/rejected": 2.798567533493042, | |
"logps/chosen": -1.1753087043762207, | |
"logps/rejected": -4.171238899230957, | |
"loss": 0.4132, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -1.7629629373550415, | |
"rewards/margins": 4.493895053863525, | |
"rewards/rejected": -6.256857872009277, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.3867640739149119, | |
"grad_norm": 1.414167881011963, | |
"learning_rate": 3.969463130731183e-06, | |
"logits/chosen": 4.733740329742432, | |
"logits/rejected": 4.114102363586426, | |
"logps/chosen": -1.1846634149551392, | |
"logps/rejected": -4.04649543762207, | |
"loss": 0.4266, | |
"rewards/accuracies": 0.7749999761581421, | |
"rewards/chosen": -1.776995301246643, | |
"rewards/margins": 4.292747974395752, | |
"rewards/rejected": -6.0697431564331055, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.3867640739149119, | |
"eval_logits/chosen": 4.25229549407959, | |
"eval_logits/rejected": 3.900564193725586, | |
"eval_logps/chosen": -1.410205602645874, | |
"eval_logps/rejected": -4.276910781860352, | |
"eval_loss": 0.4397798478603363, | |
"eval_rewards/accuracies": 0.800000011920929, | |
"eval_rewards/chosen": -2.1153085231781006, | |
"eval_rewards/margins": 4.3000569343566895, | |
"eval_rewards/rejected": -6.415364742279053, | |
"eval_runtime": 25.7968, | |
"eval_samples_per_second": 29.19, | |
"eval_steps_per_second": 3.683, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.39535883111302106, | |
"grad_norm": 1.7992101907730103, | |
"learning_rate": 3.92678391921108e-06, | |
"logits/chosen": 4.718934059143066, | |
"logits/rejected": 4.305315971374512, | |
"logps/chosen": -1.423595666885376, | |
"logps/rejected": -3.9873733520507812, | |
"loss": 0.367, | |
"rewards/accuracies": 0.762499988079071, | |
"rewards/chosen": -2.1353936195373535, | |
"rewards/margins": 3.8456661701202393, | |
"rewards/rejected": -5.981060028076172, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.4039535883111302, | |
"grad_norm": 1.9630879163742065, | |
"learning_rate": 3.88347887310836e-06, | |
"logits/chosen": 3.8371150493621826, | |
"logits/rejected": 3.5719306468963623, | |
"logps/chosen": -2.0386481285095215, | |
"logps/rejected": -4.779314994812012, | |
"loss": 0.3957, | |
"rewards/accuracies": 0.8374999761581421, | |
"rewards/chosen": -3.0579724311828613, | |
"rewards/margins": 4.1110005378723145, | |
"rewards/rejected": -7.168972969055176, | |
"step": 470 | |
}, | |
{ | |
"epoch": 0.41254834550923936, | |
"grad_norm": 4.952139854431152, | |
"learning_rate": 3.839566987447492e-06, | |
"logits/chosen": 5.024113655090332, | |
"logits/rejected": 4.880651950836182, | |
"logps/chosen": -2.3612470626831055, | |
"logps/rejected": -4.691690444946289, | |
"loss": 0.3808, | |
"rewards/accuracies": 0.824999988079071, | |
"rewards/chosen": -3.5418708324432373, | |
"rewards/margins": 3.495664596557617, | |
"rewards/rejected": -7.037535190582275, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.4211431027073485, | |
"grad_norm": 2.832200527191162, | |
"learning_rate": 3.795067523432826e-06, | |
"logits/chosen": 4.689079284667969, | |
"logits/rejected": 4.435003280639648, | |
"logps/chosen": -2.437671184539795, | |
"logps/rejected": -5.003944396972656, | |
"loss": 0.3555, | |
"rewards/accuracies": 0.875, | |
"rewards/chosen": -3.6565067768096924, | |
"rewards/margins": 3.849409580230713, | |
"rewards/rejected": -7.505916595458984, | |
"step": 490 | |
}, | |
{ | |
"epoch": 0.42973785990545765, | |
"grad_norm": 3.2997682094573975, | |
"learning_rate": 3.7500000000000005e-06, | |
"logits/chosen": 4.286547660827637, | |
"logits/rejected": 4.181652545928955, | |
"logps/chosen": -2.3390612602233887, | |
"logps/rejected": -4.888935565948486, | |
"loss": 0.3211, | |
"rewards/accuracies": 0.824999988079071, | |
"rewards/chosen": -3.508591890335083, | |
"rewards/margins": 3.8248119354248047, | |
"rewards/rejected": -7.333403587341309, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.42973785990545765, | |
"eval_logits/chosen": 3.5323922634124756, | |
"eval_logits/rejected": 3.5116958618164062, | |
"eval_logps/chosen": -2.679356575012207, | |
"eval_logps/rejected": -5.927057266235352, | |
"eval_loss": 0.37026864290237427, | |
"eval_rewards/accuracies": 0.8736842274665833, | |
"eval_rewards/chosen": -4.019035339355469, | |
"eval_rewards/margins": 4.871551036834717, | |
"eval_rewards/rejected": -8.890586853027344, | |
"eval_runtime": 25.8768, | |
"eval_samples_per_second": 29.099, | |
"eval_steps_per_second": 3.671, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.4383326171035668, | |
"grad_norm": 2.6194217205047607, | |
"learning_rate": 3.7043841852542884e-06, | |
"logits/chosen": 4.265946388244629, | |
"logits/rejected": 3.7863662242889404, | |
"logps/chosen": -2.279764413833618, | |
"logps/rejected": -4.825397968292236, | |
"loss": 0.3375, | |
"rewards/accuracies": 0.887499988079071, | |
"rewards/chosen": -3.4196460247039795, | |
"rewards/margins": 3.818450927734375, | |
"rewards/rejected": -7.238096714019775, | |
"step": 510 | |
}, | |
{ | |
"epoch": 0.44692737430167595, | |
"grad_norm": 4.434008598327637, | |
"learning_rate": 3.658240087799655e-06, | |
"logits/chosen": 3.4983534812927246, | |
"logits/rejected": 3.3109116554260254, | |
"logps/chosen": -2.7063632011413574, | |
"logps/rejected": -5.9535369873046875, | |
"loss": 0.3388, | |
"rewards/accuracies": 0.887499988079071, | |
"rewards/chosen": -4.059545040130615, | |
"rewards/margins": 4.870760917663574, | |
"rewards/rejected": -8.930305480957031, | |
"step": 520 | |
}, | |
{ | |
"epoch": 0.45552213149978515, | |
"grad_norm": 2.4193809032440186, | |
"learning_rate": 3.611587947962319e-06, | |
"logits/chosen": 3.4949145317077637, | |
"logits/rejected": 3.402980089187622, | |
"logps/chosen": -2.5146005153656006, | |
"logps/rejected": -5.63289737701416, | |
"loss": 0.3281, | |
"rewards/accuracies": 0.8500000238418579, | |
"rewards/chosen": -3.7719013690948486, | |
"rewards/margins": 4.677445411682129, | |
"rewards/rejected": -8.449346542358398, | |
"step": 530 | |
}, | |
{ | |
"epoch": 0.4641168886978943, | |
"grad_norm": 10.788633346557617, | |
"learning_rate": 3.564448228912682e-06, | |
"logits/chosen": 3.3073112964630127, | |
"logits/rejected": 3.163470506668091, | |
"logps/chosen": -2.258653163909912, | |
"logps/rejected": -5.642867088317871, | |
"loss": 0.3565, | |
"rewards/accuracies": 0.9125000238418579, | |
"rewards/chosen": -3.387979507446289, | |
"rewards/margins": 5.076320648193359, | |
"rewards/rejected": -8.464300155639648, | |
"step": 540 | |
}, | |
{ | |
"epoch": 0.47271164589600345, | |
"grad_norm": 1.6846323013305664, | |
"learning_rate": 3.516841607689501e-06, | |
"logits/chosen": 3.476361036300659, | |
"logits/rejected": 3.375828504562378, | |
"logps/chosen": -2.5325064659118652, | |
"logps/rejected": -5.799270153045654, | |
"loss": 0.3103, | |
"rewards/accuracies": 0.887499988079071, | |
"rewards/chosen": -3.798759937286377, | |
"rewards/margins": 4.900145530700684, | |
"rewards/rejected": -8.698905944824219, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.47271164589600345, | |
"eval_logits/chosen": 3.451749563217163, | |
"eval_logits/rejected": 3.3771002292633057, | |
"eval_logps/chosen": -2.9835667610168457, | |
"eval_logps/rejected": -6.5389509201049805, | |
"eval_loss": 0.32732319831848145, | |
"eval_rewards/accuracies": 0.9052631855010986, | |
"eval_rewards/chosen": -4.475350379943848, | |
"eval_rewards/margins": 5.333076000213623, | |
"eval_rewards/rejected": -9.808425903320312, | |
"eval_runtime": 25.8141, | |
"eval_samples_per_second": 29.17, | |
"eval_steps_per_second": 3.68, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.4813064030941126, | |
"grad_norm": 2.7683331966400146, | |
"learning_rate": 3.4687889661302577e-06, | |
"logits/chosen": 2.468799591064453, | |
"logits/rejected": 2.4257254600524902, | |
"logps/chosen": -2.6801788806915283, | |
"logps/rejected": -6.136897087097168, | |
"loss": 0.3171, | |
"rewards/accuracies": 0.887499988079071, | |
"rewards/chosen": -4.020268440246582, | |
"rewards/margins": 5.185078144073486, | |
"rewards/rejected": -9.205347061157227, | |
"step": 560 | |
}, | |
{ | |
"epoch": 0.48990116029222175, | |
"grad_norm": 11.559685707092285, | |
"learning_rate": 3.4203113817116955e-06, | |
"logits/chosen": 3.6535427570343018, | |
"logits/rejected": 3.583962917327881, | |
"logps/chosen": -2.7984983921051025, | |
"logps/rejected": -6.468808650970459, | |
"loss": 0.331, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -4.197747707366943, | |
"rewards/margins": 5.505465984344482, | |
"rewards/rejected": -9.703214645385742, | |
"step": 570 | |
}, | |
{ | |
"epoch": 0.4984959174903309, | |
"grad_norm": 7.691457271575928, | |
"learning_rate": 3.3714301183045382e-06, | |
"logits/chosen": 2.7467286586761475, | |
"logits/rejected": 2.4911317825317383, | |
"logps/chosen": -2.4207634925842285, | |
"logps/rejected": -6.385074138641357, | |
"loss": 0.2557, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -3.631145477294922, | |
"rewards/margins": 5.946464538574219, | |
"rewards/rejected": -9.577610969543457, | |
"step": 580 | |
}, | |
{ | |
"epoch": 0.50709067468844, | |
"grad_norm": 5.381045341491699, | |
"learning_rate": 3.3221666168464584e-06, | |
"logits/chosen": 3.239227294921875, | |
"logits/rejected": 3.1982555389404297, | |
"logps/chosen": -3.2162883281707764, | |
"logps/rejected": -6.693168640136719, | |
"loss": 0.2921, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -4.824432373046875, | |
"rewards/margins": 5.215321063995361, | |
"rewards/rejected": -10.039752960205078, | |
"step": 590 | |
}, | |
{ | |
"epoch": 0.5156854318865493, | |
"grad_norm": 2.616710901260376, | |
"learning_rate": 3.272542485937369e-06, | |
"logits/chosen": 2.7326784133911133, | |
"logits/rejected": 2.4248623847961426, | |
"logps/chosen": -3.001952648162842, | |
"logps/rejected": -6.597250461578369, | |
"loss": 0.3225, | |
"rewards/accuracies": 0.9125000238418579, | |
"rewards/chosen": -4.502928733825684, | |
"rewards/margins": 5.392947196960449, | |
"rewards/rejected": -9.89587688446045, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.5156854318865493, | |
"eval_logits/chosen": 3.1995701789855957, | |
"eval_logits/rejected": 3.2761952877044678, | |
"eval_logps/chosen": -3.166325330734253, | |
"eval_logps/rejected": -7.062953472137451, | |
"eval_loss": 0.29786577820777893, | |
"eval_rewards/accuracies": 0.9263157844543457, | |
"eval_rewards/chosen": -4.74948787689209, | |
"eval_rewards/margins": 5.84494161605835, | |
"eval_rewards/rejected": -10.594429969787598, | |
"eval_runtime": 25.8074, | |
"eval_samples_per_second": 29.178, | |
"eval_steps_per_second": 3.681, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.5242801890846583, | |
"grad_norm": 3.5550060272216797, | |
"learning_rate": 3.222579492361179e-06, | |
"logits/chosen": 2.8619818687438965, | |
"logits/rejected": 3.014125347137451, | |
"logps/chosen": -2.9217543601989746, | |
"logps/rejected": -6.349586486816406, | |
"loss": 0.2516, | |
"rewards/accuracies": 0.9125000238418579, | |
"rewards/chosen": -4.382631301879883, | |
"rewards/margins": 5.141747951507568, | |
"rewards/rejected": -9.52437973022461, | |
"step": 610 | |
}, | |
{ | |
"epoch": 0.5328749462827675, | |
"grad_norm": 3.6020898818969727, | |
"learning_rate": 3.1722995515381644e-06, | |
"logits/chosen": 2.770552635192871, | |
"logits/rejected": 2.9711716175079346, | |
"logps/chosen": -2.8700039386749268, | |
"logps/rejected": -6.527164459228516, | |
"loss": 0.3062, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -4.305006504058838, | |
"rewards/margins": 5.485739707946777, | |
"rewards/rejected": -9.790745735168457, | |
"step": 620 | |
}, | |
{ | |
"epoch": 0.5414697034808766, | |
"grad_norm": 2.2209339141845703, | |
"learning_rate": 3.121724717912138e-06, | |
"logits/chosen": 3.1150155067443848, | |
"logits/rejected": 3.038687229156494, | |
"logps/chosen": -2.9821603298187256, | |
"logps/rejected": -6.432187080383301, | |
"loss": 0.2726, | |
"rewards/accuracies": 0.9125000238418579, | |
"rewards/chosen": -4.473240852355957, | |
"rewards/margins": 5.175040245056152, | |
"rewards/rejected": -9.648280143737793, | |
"step": 630 | |
}, | |
{ | |
"epoch": 0.5500644606789858, | |
"grad_norm": 2.148709297180176, | |
"learning_rate": 3.0708771752766397e-06, | |
"logits/chosen": 3.256204605102539, | |
"logits/rejected": 2.9765384197235107, | |
"logps/chosen": -2.7979862689971924, | |
"logps/rejected": -6.7102813720703125, | |
"loss": 0.2861, | |
"rewards/accuracies": 0.9125000238418579, | |
"rewards/chosen": -4.196979522705078, | |
"rewards/margins": 5.868442535400391, | |
"rewards/rejected": -10.065422058105469, | |
"step": 640 | |
}, | |
{ | |
"epoch": 0.5586592178770949, | |
"grad_norm": 7.988170623779297, | |
"learning_rate": 3.019779227044398e-06, | |
"logits/chosen": 2.7312607765197754, | |
"logits/rejected": 2.705409526824951, | |
"logps/chosen": -2.8119194507598877, | |
"logps/rejected": -6.627874851226807, | |
"loss": 0.2507, | |
"rewards/accuracies": 0.9125000238418579, | |
"rewards/chosen": -4.217879295349121, | |
"rewards/margins": 5.723933219909668, | |
"rewards/rejected": -9.941811561584473, | |
"step": 650 | |
}, | |
{ | |
"epoch": 0.5586592178770949, | |
"eval_logits/chosen": 2.9934144020080566, | |
"eval_logits/rejected": 3.124950647354126, | |
"eval_logps/chosen": -3.1934540271759033, | |
"eval_logps/rejected": -7.429007053375244, | |
"eval_loss": 0.28581172227859497, | |
"eval_rewards/accuracies": 0.9368420839309692, | |
"eval_rewards/chosen": -4.7901811599731445, | |
"eval_rewards/margins": 6.353330135345459, | |
"eval_rewards/rejected": -11.143510818481445, | |
"eval_runtime": 25.8075, | |
"eval_samples_per_second": 29.178, | |
"eval_steps_per_second": 3.681, | |
"step": 650 | |
}, | |
{ | |
"epoch": 0.5672539750752041, | |
"grad_norm": 3.133023500442505, | |
"learning_rate": 2.9684532864643123e-06, | |
"logits/chosen": 3.3388328552246094, | |
"logits/rejected": 3.3477108478546143, | |
"logps/chosen": -3.3007023334503174, | |
"logps/rejected": -6.496421813964844, | |
"loss": 0.2635, | |
"rewards/accuracies": 0.9125000238418579, | |
"rewards/chosen": -4.951053619384766, | |
"rewards/margins": 4.793579578399658, | |
"rewards/rejected": -9.744632720947266, | |
"step": 660 | |
}, | |
{ | |
"epoch": 0.5758487322733132, | |
"grad_norm": 3.6694588661193848, | |
"learning_rate": 2.9169218667902562e-06, | |
"logits/chosen": 3.1316323280334473, | |
"logits/rejected": 2.8977527618408203, | |
"logps/chosen": -2.8769032955169678, | |
"logps/rejected": -5.834782123565674, | |
"loss": 0.2854, | |
"rewards/accuracies": 0.875, | |
"rewards/chosen": -4.31535530090332, | |
"rewards/margins": 4.4368181228637695, | |
"rewards/rejected": -8.752172470092773, | |
"step": 670 | |
}, | |
{ | |
"epoch": 0.5844434894714224, | |
"grad_norm": 3.409086227416992, | |
"learning_rate": 2.8652075714060296e-06, | |
"logits/chosen": 2.726431369781494, | |
"logits/rejected": 2.9928715229034424, | |
"logps/chosen": -3.0180654525756836, | |
"logps/rejected": -7.160694122314453, | |
"loss": 0.2461, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -4.527098178863525, | |
"rewards/margins": 6.213942050933838, | |
"rewards/rejected": -10.741040229797363, | |
"step": 680 | |
}, | |
{ | |
"epoch": 0.5930382466695315, | |
"grad_norm": 5.1549153327941895, | |
"learning_rate": 2.813333083910761e-06, | |
"logits/chosen": 3.371833086013794, | |
"logits/rejected": 3.317675828933716, | |
"logps/chosen": -3.1700775623321533, | |
"logps/rejected": -6.8295745849609375, | |
"loss": 0.2458, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -4.755115985870361, | |
"rewards/margins": 5.489245891571045, | |
"rewards/rejected": -10.244361877441406, | |
"step": 690 | |
}, | |
{ | |
"epoch": 0.6016330038676407, | |
"grad_norm": 4.8320112228393555, | |
"learning_rate": 2.761321158169134e-06, | |
"logits/chosen": 2.5933046340942383, | |
"logits/rejected": 2.7114923000335693, | |
"logps/chosen": -3.2017345428466797, | |
"logps/rejected": -7.420884609222412, | |
"loss": 0.2464, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -4.8026018142700195, | |
"rewards/margins": 6.328725814819336, | |
"rewards/rejected": -11.131326675415039, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.6016330038676407, | |
"eval_logits/chosen": 2.7770564556121826, | |
"eval_logits/rejected": 2.9842188358306885, | |
"eval_logps/chosen": -3.5835421085357666, | |
"eval_logps/rejected": -8.199637413024902, | |
"eval_loss": 0.26223084330558777, | |
"eval_rewards/accuracies": 0.9263157844543457, | |
"eval_rewards/chosen": -5.375312805175781, | |
"eval_rewards/margins": 6.924142360687256, | |
"eval_rewards/rejected": -12.299455642700195, | |
"eval_runtime": 25.8482, | |
"eval_samples_per_second": 29.132, | |
"eval_steps_per_second": 3.675, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.6102277610657499, | |
"grad_norm": 3.6024580001831055, | |
"learning_rate": 2.70919460833079e-06, | |
"logits/chosen": 2.4713566303253174, | |
"logits/rejected": 2.523773193359375, | |
"logps/chosen": -2.962407112121582, | |
"logps/rejected": -7.556809902191162, | |
"loss": 0.2785, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -4.443611145019531, | |
"rewards/margins": 6.891604423522949, | |
"rewards/rejected": -11.33521556854248, | |
"step": 710 | |
}, | |
{ | |
"epoch": 0.618822518263859, | |
"grad_norm": 3.6402506828308105, | |
"learning_rate": 2.6569762988232838e-06, | |
"logits/chosen": 3.112736940383911, | |
"logits/rejected": 2.901930809020996, | |
"logps/chosen": -3.1669626235961914, | |
"logps/rejected": -7.049059867858887, | |
"loss": 0.2623, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -4.750443935394287, | |
"rewards/margins": 5.823145866394043, | |
"rewards/rejected": -10.573590278625488, | |
"step": 720 | |
}, | |
{ | |
"epoch": 0.6274172754619682, | |
"grad_norm": 9.418655395507812, | |
"learning_rate": 2.604689134322999e-06, | |
"logits/chosen": 2.8671321868896484, | |
"logits/rejected": 2.763396739959717, | |
"logps/chosen": -3.1475024223327637, | |
"logps/rejected": -7.338767051696777, | |
"loss": 0.2702, | |
"rewards/accuracies": 0.9125000238418579, | |
"rewards/chosen": -4.721253871917725, | |
"rewards/margins": 6.286896705627441, | |
"rewards/rejected": -11.008151054382324, | |
"step": 730 | |
}, | |
{ | |
"epoch": 0.6360120326600773, | |
"grad_norm": 6.016907215118408, | |
"learning_rate": 2.5523560497083927e-06, | |
"logits/chosen": 2.8831398487091064, | |
"logits/rejected": 2.9516844749450684, | |
"logps/chosen": -3.055087089538574, | |
"logps/rejected": -7.092196464538574, | |
"loss": 0.2103, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -4.5826311111450195, | |
"rewards/margins": 6.0556640625, | |
"rewards/rejected": -10.638293266296387, | |
"step": 740 | |
}, | |
{ | |
"epoch": 0.6446067898581865, | |
"grad_norm": 2.791388988494873, | |
"learning_rate": 2.5e-06, | |
"logits/chosen": 3.5676627159118652, | |
"logits/rejected": 3.4635086059570312, | |
"logps/chosen": -2.8061039447784424, | |
"logps/rejected": -6.826286315917969, | |
"loss": 0.2545, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -4.209155082702637, | |
"rewards/margins": 6.030273914337158, | |
"rewards/rejected": -10.239428520202637, | |
"step": 750 | |
}, | |
{ | |
"epoch": 0.6446067898581865, | |
"eval_logits/chosen": 2.94565749168396, | |
"eval_logits/rejected": 3.170260190963745, | |
"eval_logps/chosen": -3.455902338027954, | |
"eval_logps/rejected": -8.303979873657227, | |
"eval_loss": 0.2537091076374054, | |
"eval_rewards/accuracies": 0.9368420839309692, | |
"eval_rewards/chosen": -5.1838531494140625, | |
"eval_rewards/margins": 7.272115707397461, | |
"eval_rewards/rejected": -12.455968856811523, | |
"eval_runtime": 25.8128, | |
"eval_samples_per_second": 29.172, | |
"eval_steps_per_second": 3.68, | |
"step": 750 | |
}, | |
{ | |
"epoch": 0.6532015470562956, | |
"grad_norm": 2.978318452835083, | |
"learning_rate": 2.447643950291608e-06, | |
"logits/chosen": 3.1550345420837402, | |
"logits/rejected": 2.9587855339050293, | |
"logps/chosen": -3.403642177581787, | |
"logps/rejected": -7.627197265625, | |
"loss": 0.2261, | |
"rewards/accuracies": 0.9375, | |
"rewards/chosen": -5.10546350479126, | |
"rewards/margins": 6.335333824157715, | |
"rewards/rejected": -11.440796852111816, | |
"step": 760 | |
}, | |
{ | |
"epoch": 0.6617963042544048, | |
"grad_norm": 4.7188801765441895, | |
"learning_rate": 2.3953108656770018e-06, | |
"logits/chosen": 3.7232565879821777, | |
"logits/rejected": 3.5991597175598145, | |
"logps/chosen": -3.6658260822296143, | |
"logps/rejected": -7.148935794830322, | |
"loss": 0.2703, | |
"rewards/accuracies": 0.862500011920929, | |
"rewards/chosen": -5.498739242553711, | |
"rewards/margins": 5.22466516494751, | |
"rewards/rejected": -10.723405838012695, | |
"step": 770 | |
}, | |
{ | |
"epoch": 0.6703910614525139, | |
"grad_norm": 4.847439289093018, | |
"learning_rate": 2.3430237011767166e-06, | |
"logits/chosen": 2.366446018218994, | |
"logits/rejected": 2.4268651008605957, | |
"logps/chosen": -3.6872811317443848, | |
"logps/rejected": -8.306299209594727, | |
"loss": 0.2493, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -5.530921459197998, | |
"rewards/margins": 6.928528785705566, | |
"rewards/rejected": -12.459450721740723, | |
"step": 780 | |
}, | |
{ | |
"epoch": 0.6789858186506231, | |
"grad_norm": 3.72248911857605, | |
"learning_rate": 2.290805391669212e-06, | |
"logits/chosen": 2.7826573848724365, | |
"logits/rejected": 2.9013209342956543, | |
"logps/chosen": -3.5557899475097656, | |
"logps/rejected": -7.959009647369385, | |
"loss": 0.2461, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -5.33368444442749, | |
"rewards/margins": 6.604828834533691, | |
"rewards/rejected": -11.938512802124023, | |
"step": 790 | |
}, | |
{ | |
"epoch": 0.6875805758487322, | |
"grad_norm": 4.7869343757629395, | |
"learning_rate": 2.238678841830867e-06, | |
"logits/chosen": 2.5417182445526123, | |
"logits/rejected": 2.938063621520996, | |
"logps/chosen": -3.5474331378936768, | |
"logps/rejected": -7.563382148742676, | |
"loss": 0.2483, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -5.3211493492126465, | |
"rewards/margins": 6.023923873901367, | |
"rewards/rejected": -11.345073699951172, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.6875805758487322, | |
"eval_logits/chosen": 2.9645943641662598, | |
"eval_logits/rejected": 3.2736430168151855, | |
"eval_logps/chosen": -3.4982783794403076, | |
"eval_logps/rejected": -8.614095687866211, | |
"eval_loss": 0.24035032093524933, | |
"eval_rewards/accuracies": 0.9263157844543457, | |
"eval_rewards/chosen": -5.24741792678833, | |
"eval_rewards/margins": 7.673725128173828, | |
"eval_rewards/rejected": -12.921142578125, | |
"eval_runtime": 25.8098, | |
"eval_samples_per_second": 29.175, | |
"eval_steps_per_second": 3.681, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.6961753330468414, | |
"grad_norm": 5.635983943939209, | |
"learning_rate": 2.186666916089239e-06, | |
"logits/chosen": 2.7499587535858154, | |
"logits/rejected": 2.6932997703552246, | |
"logps/chosen": -3.2679648399353027, | |
"logps/rejected": -8.038375854492188, | |
"loss": 0.2647, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -4.901947975158691, | |
"rewards/margins": 7.155615329742432, | |
"rewards/rejected": -12.057561874389648, | |
"step": 810 | |
}, | |
{ | |
"epoch": 0.7047700902449506, | |
"grad_norm": 4.1279215812683105, | |
"learning_rate": 2.134792428593971e-06, | |
"logits/chosen": 3.1286826133728027, | |
"logits/rejected": 3.3689827919006348, | |
"logps/chosen": -3.5489754676818848, | |
"logps/rejected": -8.034095764160156, | |
"loss": 0.1888, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -5.323462963104248, | |
"rewards/margins": 6.727679252624512, | |
"rewards/rejected": -12.051143646240234, | |
"step": 820 | |
}, | |
{ | |
"epoch": 0.7133648474430597, | |
"grad_norm": 4.562918186187744, | |
"learning_rate": 2.0830781332097446e-06, | |
"logits/chosen": 2.635140895843506, | |
"logits/rejected": 2.909487247467041, | |
"logps/chosen": -3.598254442214966, | |
"logps/rejected": -8.546786308288574, | |
"loss": 0.2141, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -5.39738130569458, | |
"rewards/margins": 7.422798156738281, | |
"rewards/rejected": -12.820180892944336, | |
"step": 830 | |
}, | |
{ | |
"epoch": 0.7219596046411689, | |
"grad_norm": 5.229101657867432, | |
"learning_rate": 2.031546713535688e-06, | |
"logits/chosen": 3.0485472679138184, | |
"logits/rejected": 3.2676749229431152, | |
"logps/chosen": -3.5765304565429688, | |
"logps/rejected": -8.731932640075684, | |
"loss": 0.1953, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -5.364795207977295, | |
"rewards/margins": 7.7331037521362305, | |
"rewards/rejected": -13.097898483276367, | |
"step": 840 | |
}, | |
{ | |
"epoch": 0.730554361839278, | |
"grad_norm": 3.0395517349243164, | |
"learning_rate": 1.9802207729556023e-06, | |
"logits/chosen": 3.242750644683838, | |
"logits/rejected": 3.410076141357422, | |
"logps/chosen": -3.5732295513153076, | |
"logps/rejected": -7.774885654449463, | |
"loss": 0.2395, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -5.359843730926514, | |
"rewards/margins": 6.302483558654785, | |
"rewards/rejected": -11.662328720092773, | |
"step": 850 | |
}, | |
{ | |
"epoch": 0.730554361839278, | |
"eval_logits/chosen": 2.9314279556274414, | |
"eval_logits/rejected": 3.276207208633423, | |
"eval_logps/chosen": -3.8869926929473877, | |
"eval_logps/rejected": -9.050567626953125, | |
"eval_loss": 0.2363433688879013, | |
"eval_rewards/accuracies": 0.9263157844543457, | |
"eval_rewards/chosen": -5.830489158630371, | |
"eval_rewards/margins": 7.745361328125, | |
"eval_rewards/rejected": -13.575852394104004, | |
"eval_runtime": 25.8152, | |
"eval_samples_per_second": 29.169, | |
"eval_steps_per_second": 3.68, | |
"step": 850 | |
}, | |
{ | |
"epoch": 0.7391491190373872, | |
"grad_norm": 3.7891011238098145, | |
"learning_rate": 1.9291228247233607e-06, | |
"logits/chosen": 1.9879547357559204, | |
"logits/rejected": 2.725163698196411, | |
"logps/chosen": -3.3898093700408936, | |
"logps/rejected": -8.569767951965332, | |
"loss": 0.2234, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -5.084713935852051, | |
"rewards/margins": 7.769936561584473, | |
"rewards/rejected": -12.854650497436523, | |
"step": 860 | |
}, | |
{ | |
"epoch": 0.7477438762354963, | |
"grad_norm": 2.793806552886963, | |
"learning_rate": 1.8782752820878636e-06, | |
"logits/chosen": 2.8827013969421387, | |
"logits/rejected": 3.106083631515503, | |
"logps/chosen": -3.376796245574951, | |
"logps/rejected": -8.713462829589844, | |
"loss": 0.2045, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -5.065195083618164, | |
"rewards/margins": 8.004999160766602, | |
"rewards/rejected": -13.07019329071045, | |
"step": 870 | |
}, | |
{ | |
"epoch": 0.7563386334336055, | |
"grad_norm": 3.8785598278045654, | |
"learning_rate": 1.827700448461836e-06, | |
"logits/chosen": 3.18955659866333, | |
"logits/rejected": 3.332995653152466, | |
"logps/chosen": -3.6388778686523438, | |
"logps/rejected": -8.38855266571045, | |
"loss": 0.231, | |
"rewards/accuracies": 0.9125000238418579, | |
"rewards/chosen": -5.458316802978516, | |
"rewards/margins": 7.124513149261475, | |
"rewards/rejected": -12.582829475402832, | |
"step": 880 | |
}, | |
{ | |
"epoch": 0.7649333906317146, | |
"grad_norm": 3.2375245094299316, | |
"learning_rate": 1.7774205076388207e-06, | |
"logits/chosen": 3.7443747520446777, | |
"logits/rejected": 3.607149839401245, | |
"logps/chosen": -3.7576351165771484, | |
"logps/rejected": -8.150545120239258, | |
"loss": 0.2398, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -5.636452674865723, | |
"rewards/margins": 6.589364528656006, | |
"rewards/rejected": -12.22581672668457, | |
"step": 890 | |
}, | |
{ | |
"epoch": 0.7735281478298238, | |
"grad_norm": 3.1005704402923584, | |
"learning_rate": 1.7274575140626318e-06, | |
"logits/chosen": 2.988206386566162, | |
"logits/rejected": 3.322185516357422, | |
"logps/chosen": -3.125667095184326, | |
"logps/rejected": -8.629180908203125, | |
"loss": 0.2388, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -4.68850040435791, | |
"rewards/margins": 8.255270957946777, | |
"rewards/rejected": -12.943771362304688, | |
"step": 900 | |
}, | |
{ | |
"epoch": 0.7735281478298238, | |
"eval_logits/chosen": 3.079637289047241, | |
"eval_logits/rejected": 3.3415913581848145, | |
"eval_logps/chosen": -3.6503636837005615, | |
"eval_logps/rejected": -8.939910888671875, | |
"eval_loss": 0.22793449461460114, | |
"eval_rewards/accuracies": 0.9157894849777222, | |
"eval_rewards/chosen": -5.4755449295043945, | |
"eval_rewards/margins": 7.934320449829102, | |
"eval_rewards/rejected": -13.409865379333496, | |
"eval_runtime": 25.8118, | |
"eval_samples_per_second": 29.173, | |
"eval_steps_per_second": 3.68, | |
"step": 900 | |
}, | |
{ | |
"epoch": 0.7821229050279329, | |
"grad_norm": 2.3264434337615967, | |
"learning_rate": 1.677833383153542e-06, | |
"logits/chosen": 2.6580982208251953, | |
"logits/rejected": 2.6074371337890625, | |
"logps/chosen": -3.126781702041626, | |
"logps/rejected": -8.055559158325195, | |
"loss": 0.2267, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -4.69017219543457, | |
"rewards/margins": 7.393167018890381, | |
"rewards/rejected": -12.083338737487793, | |
"step": 910 | |
}, | |
{ | |
"epoch": 0.7907176622260421, | |
"grad_norm": 5.1758880615234375, | |
"learning_rate": 1.6285698816954626e-06, | |
"logits/chosen": 3.202477216720581, | |
"logits/rejected": 3.286799907684326, | |
"logps/chosen": -3.5538082122802734, | |
"logps/rejected": -8.186088562011719, | |
"loss": 0.2051, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -5.330712795257568, | |
"rewards/margins": 6.94842004776001, | |
"rewards/rejected": -12.279133796691895, | |
"step": 920 | |
}, | |
{ | |
"epoch": 0.7993124194241513, | |
"grad_norm": 1.7054041624069214, | |
"learning_rate": 1.5796886182883053e-06, | |
"logits/chosen": 3.129915714263916, | |
"logits/rejected": 3.352510929107666, | |
"logps/chosen": -3.1991374492645264, | |
"logps/rejected": -8.518336296081543, | |
"loss": 0.2353, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -4.798706531524658, | |
"rewards/margins": 7.978797912597656, | |
"rewards/rejected": -12.777504920959473, | |
"step": 930 | |
}, | |
{ | |
"epoch": 0.8079071766222604, | |
"grad_norm": 2.9931082725524902, | |
"learning_rate": 1.5312110338697427e-06, | |
"logits/chosen": 2.5596461296081543, | |
"logits/rejected": 2.5915169715881348, | |
"logps/chosen": -3.3413949012756348, | |
"logps/rejected": -7.961850643157959, | |
"loss": 0.2516, | |
"rewards/accuracies": 0.875, | |
"rewards/chosen": -5.012092113494873, | |
"rewards/margins": 6.930683135986328, | |
"rewards/rejected": -11.942774772644043, | |
"step": 940 | |
}, | |
{ | |
"epoch": 0.8165019338203696, | |
"grad_norm": 4.107710838317871, | |
"learning_rate": 1.4831583923105e-06, | |
"logits/chosen": 2.8222692012786865, | |
"logits/rejected": 2.910057306289673, | |
"logps/chosen": -3.256718397140503, | |
"logps/rejected": -8.105169296264648, | |
"loss": 0.2299, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -4.885077476501465, | |
"rewards/margins": 7.272677421569824, | |
"rewards/rejected": -12.157754898071289, | |
"step": 950 | |
}, | |
{ | |
"epoch": 0.8165019338203696, | |
"eval_logits/chosen": 3.0568909645080566, | |
"eval_logits/rejected": 3.3856008052825928, | |
"eval_logps/chosen": -3.493980646133423, | |
"eval_logps/rejected": -8.99133014678955, | |
"eval_loss": 0.2293253093957901, | |
"eval_rewards/accuracies": 0.9368420839309692, | |
"eval_rewards/chosen": -5.240970611572266, | |
"eval_rewards/margins": 8.246024131774902, | |
"eval_rewards/rejected": -13.486994743347168, | |
"eval_runtime": 25.8236, | |
"eval_samples_per_second": 29.159, | |
"eval_steps_per_second": 3.679, | |
"step": 950 | |
}, | |
{ | |
"epoch": 0.8250966910184787, | |
"grad_norm": 4.835093021392822, | |
"learning_rate": 1.4355517710873184e-06, | |
"logits/chosen": 3.3382835388183594, | |
"logits/rejected": 3.328683376312256, | |
"logps/chosen": -3.7020835876464844, | |
"logps/rejected": -8.166964530944824, | |
"loss": 0.2091, | |
"rewards/accuracies": 0.9375, | |
"rewards/chosen": -5.553124904632568, | |
"rewards/margins": 6.697320461273193, | |
"rewards/rejected": -12.250445365905762, | |
"step": 960 | |
}, | |
{ | |
"epoch": 0.8336914482165879, | |
"grad_norm": 4.466915130615234, | |
"learning_rate": 1.388412052037682e-06, | |
"logits/chosen": 2.396477222442627, | |
"logits/rejected": 2.636380672454834, | |
"logps/chosen": -3.1254048347473145, | |
"logps/rejected": -9.067309379577637, | |
"loss": 0.2295, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -4.688107013702393, | |
"rewards/margins": 8.912858963012695, | |
"rewards/rejected": -13.600967407226562, | |
"step": 970 | |
}, | |
{ | |
"epoch": 0.842286205414697, | |
"grad_norm": 2.845311403274536, | |
"learning_rate": 1.3417599122003464e-06, | |
"logits/chosen": 2.760969638824463, | |
"logits/rejected": 2.9475345611572266, | |
"logps/chosen": -3.6206259727478027, | |
"logps/rejected": -9.263578414916992, | |
"loss": 0.1765, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -5.430939674377441, | |
"rewards/margins": 8.464428901672363, | |
"rewards/rejected": -13.895367622375488, | |
"step": 980 | |
}, | |
{ | |
"epoch": 0.8508809626128062, | |
"grad_norm": 4.265077590942383, | |
"learning_rate": 1.2956158147457116e-06, | |
"logits/chosen": 3.060234546661377, | |
"logits/rejected": 3.3040313720703125, | |
"logps/chosen": -3.448155641555786, | |
"logps/rejected": -8.33607292175293, | |
"loss": 0.2154, | |
"rewards/accuracies": 0.9375, | |
"rewards/chosen": -5.172233581542969, | |
"rewards/margins": 7.3318772315979, | |
"rewards/rejected": -12.504110336303711, | |
"step": 990 | |
}, | |
{ | |
"epoch": 0.8594757198109153, | |
"grad_norm": 2.6619937419891357, | |
"learning_rate": 1.2500000000000007e-06, | |
"logits/chosen": 3.205899715423584, | |
"logits/rejected": 3.2403626441955566, | |
"logps/chosen": -2.9734084606170654, | |
"logps/rejected": -7.3107008934021, | |
"loss": 0.1983, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -4.460112571716309, | |
"rewards/margins": 6.5059404373168945, | |
"rewards/rejected": -10.966052055358887, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 0.8594757198109153, | |
"eval_logits/chosen": 3.0807368755340576, | |
"eval_logits/rejected": 3.4346303939819336, | |
"eval_logps/chosen": -3.660179376602173, | |
"eval_logps/rejected": -9.305243492126465, | |
"eval_loss": 0.2214924544095993, | |
"eval_rewards/accuracies": 0.9263157844543457, | |
"eval_rewards/chosen": -5.490269184112549, | |
"eval_rewards/margins": 8.467597007751465, | |
"eval_rewards/rejected": -13.957863807678223, | |
"eval_runtime": 25.8036, | |
"eval_samples_per_second": 29.182, | |
"eval_steps_per_second": 3.682, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 0.8680704770090245, | |
"grad_norm": 3.969745635986328, | |
"learning_rate": 1.204932476567175e-06, | |
"logits/chosen": 3.2750396728515625, | |
"logits/rejected": 3.5089526176452637, | |
"logps/chosen": -3.4845385551452637, | |
"logps/rejected": -7.859654903411865, | |
"loss": 0.2136, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -5.226807594299316, | |
"rewards/margins": 6.562674522399902, | |
"rewards/rejected": -11.789482116699219, | |
"step": 1010 | |
}, | |
{ | |
"epoch": 0.8766652342071336, | |
"grad_norm": 10.868583679199219, | |
"learning_rate": 1.160433012552508e-06, | |
"logits/chosen": 3.5371367931365967, | |
"logits/rejected": 3.6224236488342285, | |
"logps/chosen": -3.4987919330596924, | |
"logps/rejected": -8.714313507080078, | |
"loss": 0.2506, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -5.248188018798828, | |
"rewards/margins": 7.8232831954956055, | |
"rewards/rejected": -13.071470260620117, | |
"step": 1020 | |
}, | |
{ | |
"epoch": 0.8852599914052428, | |
"grad_norm": 13.34189510345459, | |
"learning_rate": 1.11652112689164e-06, | |
"logits/chosen": 3.0136570930480957, | |
"logits/rejected": 3.227113723754883, | |
"logps/chosen": -3.607630968093872, | |
"logps/rejected": -8.083453178405762, | |
"loss": 0.2267, | |
"rewards/accuracies": 0.9125000238418579, | |
"rewards/chosen": -5.411446571350098, | |
"rewards/margins": 6.7137346267700195, | |
"rewards/rejected": -12.125181198120117, | |
"step": 1030 | |
}, | |
{ | |
"epoch": 0.8938547486033519, | |
"grad_norm": 3.0865557193756104, | |
"learning_rate": 1.073216080788921e-06, | |
"logits/chosen": 3.20233154296875, | |
"logits/rejected": 3.1444244384765625, | |
"logps/chosen": -3.677372455596924, | |
"logps/rejected": -8.034679412841797, | |
"loss": 0.2019, | |
"rewards/accuracies": 0.887499988079071, | |
"rewards/chosen": -5.516058921813965, | |
"rewards/margins": 6.535961151123047, | |
"rewards/rejected": -12.052019119262695, | |
"step": 1040 | |
}, | |
{ | |
"epoch": 0.9024495058014611, | |
"grad_norm": 2.896090507507324, | |
"learning_rate": 1.0305368692688175e-06, | |
"logits/chosen": 2.8652000427246094, | |
"logits/rejected": 3.20027494430542, | |
"logps/chosen": -3.3209357261657715, | |
"logps/rejected": -8.070123672485352, | |
"loss": 0.191, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -4.981403350830078, | |
"rewards/margins": 7.123780727386475, | |
"rewards/rejected": -12.105184555053711, | |
"step": 1050 | |
}, | |
{ | |
"epoch": 0.9024495058014611, | |
"eval_logits/chosen": 3.1477556228637695, | |
"eval_logits/rejected": 3.4350297451019287, | |
"eval_logps/chosen": -3.7262520790100098, | |
"eval_logps/rejected": -9.287687301635742, | |
"eval_loss": 0.21767112612724304, | |
"eval_rewards/accuracies": 0.9368420839309692, | |
"eval_rewards/chosen": -5.589378833770752, | |
"eval_rewards/margins": 8.342151641845703, | |
"eval_rewards/rejected": -13.931530952453613, | |
"eval_runtime": 25.816, | |
"eval_samples_per_second": 29.168, | |
"eval_steps_per_second": 3.68, | |
"step": 1050 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 2.392035881788637e+18, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |