|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9997382884061764, |
|
"eval_steps": 100, |
|
"global_step": 955, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0010468463752944255, |
|
"grad_norm": 5.5, |
|
"learning_rate": 5.208333333333333e-08, |
|
"logits/chosen": -0.32969313859939575, |
|
"logits/rejected": -0.3478139042854309, |
|
"logps/chosen": -282.7626953125, |
|
"logps/rejected": -210.6118621826172, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.010468463752944255, |
|
"grad_norm": 6.0, |
|
"learning_rate": 5.208333333333334e-07, |
|
"logits/chosen": -0.5201655030250549, |
|
"logits/rejected": -0.5292737483978271, |
|
"logps/chosen": -312.750244140625, |
|
"logps/rejected": -275.4716491699219, |
|
"loss": 0.6919, |
|
"rewards/accuracies": 0.4652777910232544, |
|
"rewards/chosen": 0.0034895986318588257, |
|
"rewards/margins": 0.002287701005116105, |
|
"rewards/rejected": 0.001201898674480617, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02093692750588851, |
|
"grad_norm": 5.5, |
|
"learning_rate": 1.0416666666666667e-06, |
|
"logits/chosen": -0.4813454747200012, |
|
"logits/rejected": -0.5021841526031494, |
|
"logps/chosen": -304.3456726074219, |
|
"logps/rejected": -268.962158203125, |
|
"loss": 0.6934, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": -0.0017953884089365602, |
|
"rewards/margins": 0.0013109362917020917, |
|
"rewards/rejected": -0.003106324467808008, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.031405391258832765, |
|
"grad_norm": 5.78125, |
|
"learning_rate": 1.5625e-06, |
|
"logits/chosen": -0.4880186915397644, |
|
"logits/rejected": -0.4969080984592438, |
|
"logps/chosen": -287.50286865234375, |
|
"logps/rejected": -249.9375457763672, |
|
"loss": 0.688, |
|
"rewards/accuracies": 0.5562499761581421, |
|
"rewards/chosen": 0.010473029688000679, |
|
"rewards/margins": 0.01431797631084919, |
|
"rewards/rejected": -0.003844945225864649, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04187385501177702, |
|
"grad_norm": 5.375, |
|
"learning_rate": 2.0833333333333334e-06, |
|
"logits/chosen": -0.46122869849205017, |
|
"logits/rejected": -0.4958384037017822, |
|
"logps/chosen": -303.1500549316406, |
|
"logps/rejected": -241.998046875, |
|
"loss": 0.683, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.03339455649256706, |
|
"rewards/margins": 0.0322079211473465, |
|
"rewards/rejected": 0.0011866316199302673, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05234231876472128, |
|
"grad_norm": 4.59375, |
|
"learning_rate": 2.604166666666667e-06, |
|
"logits/chosen": -0.5217183828353882, |
|
"logits/rejected": -0.5492846369743347, |
|
"logps/chosen": -302.4110412597656, |
|
"logps/rejected": -280.4798278808594, |
|
"loss": 0.6784, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.07218249142169952, |
|
"rewards/margins": 0.04362189769744873, |
|
"rewards/rejected": 0.028560593724250793, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06281078251766553, |
|
"grad_norm": 5.03125, |
|
"learning_rate": 3.125e-06, |
|
"logits/chosen": -0.5444208979606628, |
|
"logits/rejected": -0.5450385212898254, |
|
"logps/chosen": -288.3908996582031, |
|
"logps/rejected": -252.4159393310547, |
|
"loss": 0.662, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.10959690809249878, |
|
"rewards/margins": 0.04304002225399017, |
|
"rewards/rejected": 0.0665568858385086, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.07327924627060979, |
|
"grad_norm": 5.21875, |
|
"learning_rate": 3.6458333333333333e-06, |
|
"logits/chosen": -0.5294634699821472, |
|
"logits/rejected": -0.5446035265922546, |
|
"logps/chosen": -284.2558288574219, |
|
"logps/rejected": -261.0161437988281, |
|
"loss": 0.6465, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.1622026413679123, |
|
"rewards/margins": 0.1133759468793869, |
|
"rewards/rejected": 0.04882669448852539, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.08374771002355404, |
|
"grad_norm": 4.78125, |
|
"learning_rate": 4.166666666666667e-06, |
|
"logits/chosen": -0.46931973099708557, |
|
"logits/rejected": -0.4959704279899597, |
|
"logps/chosen": -285.55169677734375, |
|
"logps/rejected": -271.7218017578125, |
|
"loss": 0.6312, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.2240675389766693, |
|
"rewards/margins": 0.17874710261821747, |
|
"rewards/rejected": 0.04532044008374214, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.0942161737764983, |
|
"grad_norm": 5.90625, |
|
"learning_rate": 4.6875000000000004e-06, |
|
"logits/chosen": -0.5021494626998901, |
|
"logits/rejected": -0.5237849950790405, |
|
"logps/chosen": -328.7523193359375, |
|
"logps/rejected": -293.5586853027344, |
|
"loss": 0.6197, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 0.19337503612041473, |
|
"rewards/margins": 0.22977392375469208, |
|
"rewards/rejected": -0.036398887634277344, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.10468463752944256, |
|
"grad_norm": 5.65625, |
|
"learning_rate": 4.9997324926814375e-06, |
|
"logits/chosen": -0.5387491583824158, |
|
"logits/rejected": -0.5343273282051086, |
|
"logps/chosen": -274.4442443847656, |
|
"logps/rejected": -288.30035400390625, |
|
"loss": 0.6297, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.221399188041687, |
|
"rewards/margins": 0.2429426610469818, |
|
"rewards/rejected": -0.0215434692800045, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.10468463752944256, |
|
"eval_logits/chosen": -0.5525645613670349, |
|
"eval_logits/rejected": -0.5478540062904358, |
|
"eval_logps/chosen": -288.4034118652344, |
|
"eval_logps/rejected": -275.7340087890625, |
|
"eval_loss": 0.6140495538711548, |
|
"eval_rewards/accuracies": 0.6959999799728394, |
|
"eval_rewards/chosen": 0.13576927781105042, |
|
"eval_rewards/margins": 0.26342061161994934, |
|
"eval_rewards/rejected": -0.1276513636112213, |
|
"eval_runtime": 351.3471, |
|
"eval_samples_per_second": 5.692, |
|
"eval_steps_per_second": 0.356, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.11515310128238682, |
|
"grad_norm": 4.25, |
|
"learning_rate": 4.996723692767927e-06, |
|
"logits/chosen": -0.5775566101074219, |
|
"logits/rejected": -0.603683590888977, |
|
"logps/chosen": -287.82733154296875, |
|
"logps/rejected": -275.31671142578125, |
|
"loss": 0.606, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 0.15195702016353607, |
|
"rewards/margins": 0.34537211060523987, |
|
"rewards/rejected": -0.1934150755405426, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.12562156503533106, |
|
"grad_norm": 4.96875, |
|
"learning_rate": 4.9903757462135984e-06, |
|
"logits/chosen": -0.5295812487602234, |
|
"logits/rejected": -0.5440601110458374, |
|
"logps/chosen": -260.30078125, |
|
"logps/rejected": -251.58401489257812, |
|
"loss": 0.5924, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.02338511124253273, |
|
"rewards/margins": 0.281913697719574, |
|
"rewards/rejected": -0.25852862000465393, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.1360900287882753, |
|
"grad_norm": 5.46875, |
|
"learning_rate": 4.980697142834315e-06, |
|
"logits/chosen": -0.49980980157852173, |
|
"logits/rejected": -0.5171049237251282, |
|
"logps/chosen": -300.2707214355469, |
|
"logps/rejected": -336.19036865234375, |
|
"loss": 0.5933, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 0.02033509686589241, |
|
"rewards/margins": 0.3162926733493805, |
|
"rewards/rejected": -0.2959575653076172, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.14655849254121958, |
|
"grad_norm": 4.75, |
|
"learning_rate": 4.967700826904229e-06, |
|
"logits/chosen": -0.5714845061302185, |
|
"logits/rejected": -0.5790292024612427, |
|
"logps/chosen": -280.66845703125, |
|
"logps/rejected": -273.6711120605469, |
|
"loss": 0.5811, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.004461909644305706, |
|
"rewards/margins": 0.407376229763031, |
|
"rewards/rejected": -0.4118381440639496, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.15702695629416383, |
|
"grad_norm": 4.84375, |
|
"learning_rate": 4.951404179843963e-06, |
|
"logits/chosen": -0.5678334832191467, |
|
"logits/rejected": -0.5395095944404602, |
|
"logps/chosen": -306.054443359375, |
|
"logps/rejected": -276.5906066894531, |
|
"loss": 0.5777, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.13024172186851501, |
|
"rewards/margins": 0.4546026587486267, |
|
"rewards/rejected": -0.3243609070777893, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.16749542004710807, |
|
"grad_norm": 4.375, |
|
"learning_rate": 4.931828996974498e-06, |
|
"logits/chosen": -0.5105162858963013, |
|
"logits/rejected": -0.5018196702003479, |
|
"logps/chosen": -296.04840087890625, |
|
"logps/rejected": -270.5509033203125, |
|
"loss": 0.5478, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": 0.04980681464076042, |
|
"rewards/margins": 0.5175832509994507, |
|
"rewards/rejected": -0.4677763879299164, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.17796388380005235, |
|
"grad_norm": 4.78125, |
|
"learning_rate": 4.909001458367867e-06, |
|
"logits/chosen": -0.5939355492591858, |
|
"logits/rejected": -0.5804780125617981, |
|
"logps/chosen": -287.94708251953125, |
|
"logps/rejected": -275.73980712890625, |
|
"loss": 0.5789, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.19239839911460876, |
|
"rewards/margins": 0.4369952082633972, |
|
"rewards/rejected": -0.6293936967849731, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.1884323475529966, |
|
"grad_norm": 5.15625, |
|
"learning_rate": 4.882952093833628e-06, |
|
"logits/chosen": -0.5993765592575073, |
|
"logits/rejected": -0.5707160830497742, |
|
"logps/chosen": -301.9080505371094, |
|
"logps/rejected": -265.2618713378906, |
|
"loss": 0.555, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.19447076320648193, |
|
"rewards/margins": 0.48417288064956665, |
|
"rewards/rejected": -0.6786437630653381, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.19890081130594087, |
|
"grad_norm": 5.90625, |
|
"learning_rate": 4.853715742087947e-06, |
|
"logits/chosen": -0.5384653806686401, |
|
"logits/rejected": -0.5204354524612427, |
|
"logps/chosen": -275.14886474609375, |
|
"logps/rejected": -281.9067687988281, |
|
"loss": 0.5706, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.033171653747558594, |
|
"rewards/margins": 0.469027042388916, |
|
"rewards/rejected": -0.5021986961364746, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.2093692750588851, |
|
"grad_norm": 6.5625, |
|
"learning_rate": 4.821331504159906e-06, |
|
"logits/chosen": -0.5440086722373962, |
|
"logits/rejected": -0.5563145279884338, |
|
"logps/chosen": -296.43060302734375, |
|
"logps/rejected": -255.0513916015625, |
|
"loss": 0.5676, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.12640251219272614, |
|
"rewards/margins": 0.5171942710876465, |
|
"rewards/rejected": -0.6435968279838562, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.2093692750588851, |
|
"eval_logits/chosen": -0.5116367340087891, |
|
"eval_logits/rejected": -0.4945087134838104, |
|
"eval_logps/chosen": -290.90509033203125, |
|
"eval_logps/rejected": -281.0560302734375, |
|
"eval_loss": 0.5569251179695129, |
|
"eval_rewards/accuracies": 0.699999988079071, |
|
"eval_rewards/chosen": -0.11436504870653152, |
|
"eval_rewards/margins": 0.5454893708229065, |
|
"eval_rewards/rejected": -0.6598544716835022, |
|
"eval_runtime": 349.6273, |
|
"eval_samples_per_second": 5.72, |
|
"eval_steps_per_second": 0.358, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.21983773881182936, |
|
"grad_norm": 6.15625, |
|
"learning_rate": 4.7858426910973435e-06, |
|
"logits/chosen": -0.5827375650405884, |
|
"logits/rejected": -0.5831678509712219, |
|
"logps/chosen": -278.0469970703125, |
|
"logps/rejected": -271.9605407714844, |
|
"loss": 0.5508, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.025282684713602066, |
|
"rewards/margins": 0.5209798216819763, |
|
"rewards/rejected": -0.5462625622749329, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.23030620256477363, |
|
"grad_norm": 5.125, |
|
"learning_rate": 4.747296766042161e-06, |
|
"logits/chosen": -0.5575278997421265, |
|
"logits/rejected": -0.5416348576545715, |
|
"logps/chosen": -317.4739990234375, |
|
"logps/rejected": -269.8050231933594, |
|
"loss": 0.5368, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.03362050652503967, |
|
"rewards/margins": 0.569665789604187, |
|
"rewards/rejected": -0.536045253276825, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.24077466631771788, |
|
"grad_norm": 5.6875, |
|
"learning_rate": 4.705745280752586e-06, |
|
"logits/chosen": -0.589636504650116, |
|
"logits/rejected": -0.5510541200637817, |
|
"logps/chosen": -290.55206298828125, |
|
"logps/rejected": -287.7290954589844, |
|
"loss": 0.5581, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.08335064351558685, |
|
"rewards/margins": 0.5679007768630981, |
|
"rewards/rejected": -0.6512514352798462, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.2512431300706621, |
|
"grad_norm": 4.96875, |
|
"learning_rate": 4.661243806657256e-06, |
|
"logits/chosen": -0.5663197636604309, |
|
"logits/rejected": -0.525894045829773, |
|
"logps/chosen": -299.3938903808594, |
|
"logps/rejected": -263.598876953125, |
|
"loss": 0.5563, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.1382952779531479, |
|
"rewards/margins": 0.5307878255844116, |
|
"rewards/rejected": -0.6690832376480103, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.26171159382360637, |
|
"grad_norm": 5.6875, |
|
"learning_rate": 4.613851860533367e-06, |
|
"logits/chosen": -0.5768588781356812, |
|
"logits/rejected": -0.5346202850341797, |
|
"logps/chosen": -293.30059814453125, |
|
"logps/rejected": -259.82177734375, |
|
"loss": 0.5789, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.08053269237279892, |
|
"rewards/margins": 0.5023098587989807, |
|
"rewards/rejected": -0.5828425288200378, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.2721800575765506, |
|
"grad_norm": 5.6875, |
|
"learning_rate": 4.563632824908252e-06, |
|
"logits/chosen": -0.5458053946495056, |
|
"logits/rejected": -0.5105060935020447, |
|
"logps/chosen": -292.6302185058594, |
|
"logps/rejected": -279.1827392578125, |
|
"loss": 0.5404, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": 0.12764385342597961, |
|
"rewards/margins": 0.8003277778625488, |
|
"rewards/rejected": -0.6726840138435364, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.2826485213294949, |
|
"grad_norm": 5.78125, |
|
"learning_rate": 4.510653863290871e-06, |
|
"logits/chosen": -0.545439600944519, |
|
"logits/rejected": -0.5205134749412537, |
|
"logps/chosen": -294.71783447265625, |
|
"logps/rejected": -302.39599609375, |
|
"loss": 0.5465, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.015889674425125122, |
|
"rewards/margins": 0.5484660267829895, |
|
"rewards/rejected": -0.564355731010437, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.29311698508243916, |
|
"grad_norm": 4.78125, |
|
"learning_rate": 4.454985830346574e-06, |
|
"logits/chosen": -0.6070719957351685, |
|
"logits/rejected": -0.5709980726242065, |
|
"logps/chosen": -300.3115234375, |
|
"logps/rejected": -283.37689208984375, |
|
"loss": 0.5681, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -0.04909560829401016, |
|
"rewards/margins": 0.496112197637558, |
|
"rewards/rejected": -0.5452078580856323, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.3035854488353834, |
|
"grad_norm": 4.65625, |
|
"learning_rate": 4.396703177135262e-06, |
|
"logits/chosen": -0.5642537474632263, |
|
"logits/rejected": -0.5349927544593811, |
|
"logps/chosen": -285.5408935546875, |
|
"logps/rejected": -256.77691650390625, |
|
"loss": 0.5249, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.10653576999902725, |
|
"rewards/margins": 0.6360087990760803, |
|
"rewards/rejected": -0.5294729471206665, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.31405391258832765, |
|
"grad_norm": 5.75, |
|
"learning_rate": 4.335883851539693e-06, |
|
"logits/chosen": -0.5834770798683167, |
|
"logits/rejected": -0.544857382774353, |
|
"logps/chosen": -296.65936279296875, |
|
"logps/rejected": -293.7464599609375, |
|
"loss": 0.5414, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.30995655059814453, |
|
"rewards/margins": 0.6452124714851379, |
|
"rewards/rejected": -0.9551690220832825, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.31405391258832765, |
|
"eval_logits/chosen": -0.4775179624557495, |
|
"eval_logits/rejected": -0.45395201444625854, |
|
"eval_logps/chosen": -293.56982421875, |
|
"eval_logps/rejected": -284.91802978515625, |
|
"eval_loss": 0.5403068661689758, |
|
"eval_rewards/accuracies": 0.7260000109672546, |
|
"eval_rewards/chosen": -0.38083866238594055, |
|
"eval_rewards/margins": 0.6652126312255859, |
|
"eval_rewards/rejected": -1.0460513830184937, |
|
"eval_runtime": 349.5769, |
|
"eval_samples_per_second": 5.721, |
|
"eval_steps_per_second": 0.358, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.3245223763412719, |
|
"grad_norm": 6.28125, |
|
"learning_rate": 4.2726091940171055e-06, |
|
"logits/chosen": -0.5361314415931702, |
|
"logits/rejected": -0.5678786635398865, |
|
"logps/chosen": -296.285400390625, |
|
"logps/rejected": -342.7236022949219, |
|
"loss": 0.5056, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": -0.19761790335178375, |
|
"rewards/margins": 0.8299154043197632, |
|
"rewards/rejected": -1.0275332927703857, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.33499084009421615, |
|
"grad_norm": 5.21875, |
|
"learning_rate": 4.206963828813555e-06, |
|
"logits/chosen": -0.5878556370735168, |
|
"logits/rejected": -0.5477277040481567, |
|
"logps/chosen": -296.2107849121094, |
|
"logps/rejected": -280.1604919433594, |
|
"loss": 0.524, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.1949264109134674, |
|
"rewards/margins": 0.7900466918945312, |
|
"rewards/rejected": -0.9849729537963867, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.34545930384716045, |
|
"grad_norm": 5.0625, |
|
"learning_rate": 4.139035550786495e-06, |
|
"logits/chosen": -0.6225862503051758, |
|
"logits/rejected": -0.5590678453445435, |
|
"logps/chosen": -289.2728576660156, |
|
"logps/rejected": -260.7643737792969, |
|
"loss": 0.5217, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.2666059136390686, |
|
"rewards/margins": 0.6325812339782715, |
|
"rewards/rejected": -0.8991872072219849, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.3559277676001047, |
|
"grad_norm": 5.96875, |
|
"learning_rate": 4.068915207986931e-06, |
|
"logits/chosen": -0.5779263377189636, |
|
"logits/rejected": -0.5189130902290344, |
|
"logps/chosen": -298.147705078125, |
|
"logps/rejected": -258.82391357421875, |
|
"loss": 0.5224, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.4627823233604431, |
|
"rewards/margins": 0.7694815397262573, |
|
"rewards/rejected": -1.2322638034820557, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.36639623135304894, |
|
"grad_norm": 6.21875, |
|
"learning_rate": 3.996696580158211e-06, |
|
"logits/chosen": -0.5267971158027649, |
|
"logits/rejected": -0.5050491690635681, |
|
"logps/chosen": -335.9749755859375, |
|
"logps/rejected": -290.8840026855469, |
|
"loss": 0.5206, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.16786900162696838, |
|
"rewards/margins": 0.7010205984115601, |
|
"rewards/rejected": -0.8688896894454956, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.3768646951059932, |
|
"grad_norm": 5.3125, |
|
"learning_rate": 3.922476253313921e-06, |
|
"logits/chosen": -0.5035579800605774, |
|
"logits/rejected": -0.5138710141181946, |
|
"logps/chosen": -273.709716796875, |
|
"logps/rejected": -296.85650634765625, |
|
"loss": 0.5587, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.28165164589881897, |
|
"rewards/margins": 0.6714268922805786, |
|
"rewards/rejected": -0.9530784487724304, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.38733315885893743, |
|
"grad_norm": 5.875, |
|
"learning_rate": 3.846353490562664e-06, |
|
"logits/chosen": -0.5329638123512268, |
|
"logits/rejected": -0.5389483571052551, |
|
"logps/chosen": -288.07745361328125, |
|
"logps/rejected": -262.96124267578125, |
|
"loss": 0.5038, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.05612688139081001, |
|
"rewards/margins": 0.8792274594306946, |
|
"rewards/rejected": -0.9353543519973755, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.39780162261188173, |
|
"grad_norm": 5.25, |
|
"learning_rate": 3.768430099352445e-06, |
|
"logits/chosen": -0.5536895990371704, |
|
"logits/rejected": -0.5606611371040344, |
|
"logps/chosen": -304.82574462890625, |
|
"logps/rejected": -277.5342712402344, |
|
"loss": 0.5089, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.04281007498502731, |
|
"rewards/margins": 0.9195000529289246, |
|
"rewards/rejected": -0.9623101353645325, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.408270086364826, |
|
"grad_norm": 6.5, |
|
"learning_rate": 3.6888102953122307e-06, |
|
"logits/chosen": -0.5975215435028076, |
|
"logits/rejected": -0.5678552389144897, |
|
"logps/chosen": -261.8155212402344, |
|
"logps/rejected": -262.86151123046875, |
|
"loss": 0.5554, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.049895767122507095, |
|
"rewards/margins": 0.72125244140625, |
|
"rewards/rejected": -0.7711483240127563, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.4187385501177702, |
|
"grad_norm": 5.40625, |
|
"learning_rate": 3.607600562872785e-06, |
|
"logits/chosen": -0.5524119138717651, |
|
"logits/rejected": -0.5214421153068542, |
|
"logps/chosen": -284.4479675292969, |
|
"logps/rejected": -274.91790771484375, |
|
"loss": 0.5124, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.08701231330633163, |
|
"rewards/margins": 0.7274686694145203, |
|
"rewards/rejected": -0.8144810795783997, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.4187385501177702, |
|
"eval_logits/chosen": -0.45163384079933167, |
|
"eval_logits/rejected": -0.42426639795303345, |
|
"eval_logps/chosen": -292.09857177734375, |
|
"eval_logps/rejected": -284.3532409667969, |
|
"eval_loss": 0.5341187715530396, |
|
"eval_rewards/accuracies": 0.7039999961853027, |
|
"eval_rewards/chosen": -0.23370328545570374, |
|
"eval_rewards/margins": 0.7558707594871521, |
|
"eval_rewards/rejected": -0.9895740151405334, |
|
"eval_runtime": 349.3488, |
|
"eval_samples_per_second": 5.725, |
|
"eval_steps_per_second": 0.358, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.42920701387071447, |
|
"grad_norm": 5.1875, |
|
"learning_rate": 3.5249095128531863e-06, |
|
"logits/chosen": -0.5735105872154236, |
|
"logits/rejected": -0.5243286490440369, |
|
"logps/chosen": -278.07611083984375, |
|
"logps/rejected": -275.5641784667969, |
|
"loss": 0.5147, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.19002243876457214, |
|
"rewards/margins": 0.7818821668624878, |
|
"rewards/rejected": -0.9719046354293823, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.4396754776236587, |
|
"grad_norm": 5.75, |
|
"learning_rate": 3.4408477372034743e-06, |
|
"logits/chosen": -0.5684117078781128, |
|
"logits/rejected": -0.5453870296478271, |
|
"logps/chosen": -308.50128173828125, |
|
"logps/rejected": -296.2371826171875, |
|
"loss": 0.5608, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.22137188911437988, |
|
"rewards/margins": 0.63337242603302, |
|
"rewards/rejected": -0.8547442555427551, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.45014394137660296, |
|
"grad_norm": 6.125, |
|
"learning_rate": 3.355527661097728e-06, |
|
"logits/chosen": -0.5637370347976685, |
|
"logits/rejected": -0.5657171010971069, |
|
"logps/chosen": -280.50177001953125, |
|
"logps/rejected": -281.47467041015625, |
|
"loss": 0.5229, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.39310961961746216, |
|
"rewards/margins": 0.6110748648643494, |
|
"rewards/rejected": -1.0041844844818115, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.46061240512954726, |
|
"grad_norm": 5.53125, |
|
"learning_rate": 3.269063392575352e-06, |
|
"logits/chosen": -0.5321542024612427, |
|
"logits/rejected": -0.5234124660491943, |
|
"logps/chosen": -328.39447021484375, |
|
"logps/rejected": -307.01556396484375, |
|
"loss": 0.5135, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.2074432671070099, |
|
"rewards/margins": 0.7093298435211182, |
|
"rewards/rejected": -0.9167732000350952, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.4710808688824915, |
|
"grad_norm": 6.625, |
|
"learning_rate": 3.181570569931697e-06, |
|
"logits/chosen": -0.5547568202018738, |
|
"logits/rejected": -0.5429819822311401, |
|
"logps/chosen": -286.77447509765625, |
|
"logps/rejected": -282.605224609375, |
|
"loss": 0.5, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.41850847005844116, |
|
"rewards/margins": 0.6984038352966309, |
|
"rewards/rejected": -1.1169124841690063, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.48154933263543576, |
|
"grad_norm": 5.125, |
|
"learning_rate": 3.09316620706208e-06, |
|
"logits/chosen": -0.5107685327529907, |
|
"logits/rejected": -0.5204188227653503, |
|
"logps/chosen": -307.8090515136719, |
|
"logps/rejected": -288.7105407714844, |
|
"loss": 0.4984, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.21866807341575623, |
|
"rewards/margins": 0.888043999671936, |
|
"rewards/rejected": -1.106711983680725, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.49201779638838, |
|
"grad_norm": 5.3125, |
|
"learning_rate": 3.0039685369660785e-06, |
|
"logits/chosen": -0.5120314359664917, |
|
"logits/rejected": -0.46821707487106323, |
|
"logps/chosen": -281.59722900390625, |
|
"logps/rejected": -266.38134765625, |
|
"loss": 0.5331, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.03815505653619766, |
|
"rewards/margins": 0.8301553726196289, |
|
"rewards/rejected": -0.8683103322982788, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.5024862601413242, |
|
"grad_norm": 5.53125, |
|
"learning_rate": 2.91409685362137e-06, |
|
"logits/chosen": -0.5207866430282593, |
|
"logits/rejected": -0.5130727887153625, |
|
"logps/chosen": -278.73504638671875, |
|
"logps/rejected": -276.3402404785156, |
|
"loss": 0.5098, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.15987971425056458, |
|
"rewards/margins": 0.769777774810791, |
|
"rewards/rejected": -0.9296576380729675, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.5129547238942685, |
|
"grad_norm": 4.9375, |
|
"learning_rate": 2.8236713524386085e-06, |
|
"logits/chosen": -0.5813151597976685, |
|
"logits/rejected": -0.5460097193717957, |
|
"logps/chosen": -278.7942810058594, |
|
"logps/rejected": -256.22735595703125, |
|
"loss": 0.498, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.02987014129757881, |
|
"rewards/margins": 0.7739948034286499, |
|
"rewards/rejected": -0.8038649559020996, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.5234231876472127, |
|
"grad_norm": 5.59375, |
|
"learning_rate": 2.7328129695107205e-06, |
|
"logits/chosen": -0.4917908310890198, |
|
"logits/rejected": -0.5057145357131958, |
|
"logps/chosen": -264.3317565917969, |
|
"logps/rejected": -273.6480712890625, |
|
"loss": 0.5529, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.05067148059606552, |
|
"rewards/margins": 0.9685169458389282, |
|
"rewards/rejected": -1.0191885232925415, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.5234231876472127, |
|
"eval_logits/chosen": -0.4290330708026886, |
|
"eval_logits/rejected": -0.3994762897491455, |
|
"eval_logps/chosen": -291.93804931640625, |
|
"eval_logps/rejected": -284.49481201171875, |
|
"eval_loss": 0.5259639620780945, |
|
"eval_rewards/accuracies": 0.7239999771118164, |
|
"eval_rewards/chosen": -0.2176663726568222, |
|
"eval_rewards/margins": 0.7860648036003113, |
|
"eval_rewards/rejected": -1.0037312507629395, |
|
"eval_runtime": 349.4608, |
|
"eval_samples_per_second": 5.723, |
|
"eval_steps_per_second": 0.358, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.533891651400157, |
|
"grad_norm": 4.75, |
|
"learning_rate": 2.641643219871597e-06, |
|
"logits/chosen": -0.5275259017944336, |
|
"logits/rejected": -0.49047979712486267, |
|
"logps/chosen": -313.4814147949219, |
|
"logps/rejected": -297.6827697753906, |
|
"loss": 0.5085, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.3123074471950531, |
|
"rewards/margins": 0.7204921841621399, |
|
"rewards/rejected": -1.0327996015548706, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.5443601151531012, |
|
"grad_norm": 6.90625, |
|
"learning_rate": 2.5502840349805074e-06, |
|
"logits/chosen": -0.5200182199478149, |
|
"logits/rejected": -0.5098154544830322, |
|
"logps/chosen": -310.73944091796875, |
|
"logps/rejected": -297.84326171875, |
|
"loss": 0.5471, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.353024423122406, |
|
"rewards/margins": 0.8153821229934692, |
|
"rewards/rejected": -1.1684064865112305, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.5548285789060455, |
|
"grad_norm": 5.03125, |
|
"learning_rate": 2.4588575996495797e-06, |
|
"logits/chosen": -0.47494420409202576, |
|
"logits/rejected": -0.48031002283096313, |
|
"logps/chosen": -272.15692138671875, |
|
"logps/rejected": -262.4747009277344, |
|
"loss": 0.5205, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.41294246912002563, |
|
"rewards/margins": 0.8126422166824341, |
|
"rewards/rejected": -1.225584626197815, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.5652970426589898, |
|
"grad_norm": 5.53125, |
|
"learning_rate": 2.367486188632446e-06, |
|
"logits/chosen": -0.5167185068130493, |
|
"logits/rejected": -0.514365017414093, |
|
"logps/chosen": -285.01080322265625, |
|
"logps/rejected": -324.41302490234375, |
|
"loss": 0.5084, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.18025778234004974, |
|
"rewards/margins": 0.8430238962173462, |
|
"rewards/rejected": -1.0232816934585571, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.575765506411934, |
|
"grad_norm": 4.1875, |
|
"learning_rate": 2.276292003092593e-06, |
|
"logits/chosen": -0.5328477621078491, |
|
"logits/rejected": -0.514901340007782, |
|
"logps/chosen": -257.83953857421875, |
|
"logps/rejected": -265.4601135253906, |
|
"loss": 0.4871, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.24479135870933533, |
|
"rewards/margins": 0.8982963562011719, |
|
"rewards/rejected": -1.1430877447128296, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.5862339701648783, |
|
"grad_norm": 4.875, |
|
"learning_rate": 2.1853970071701415e-06, |
|
"logits/chosen": -0.5274088978767395, |
|
"logits/rejected": -0.4985182285308838, |
|
"logps/chosen": -278.2223815917969, |
|
"logps/rejected": -279.3916931152344, |
|
"loss": 0.5206, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.12979185581207275, |
|
"rewards/margins": 0.8115113973617554, |
|
"rewards/rejected": -0.9413032531738281, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.5967024339178225, |
|
"grad_norm": 5.84375, |
|
"learning_rate": 2.0949227648656194e-06, |
|
"logits/chosen": -0.5541775822639465, |
|
"logits/rejected": -0.5307848453521729, |
|
"logps/chosen": -294.7757263183594, |
|
"logps/rejected": -261.8687438964844, |
|
"loss": 0.5405, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.2242761105298996, |
|
"rewards/margins": 0.8702728152275085, |
|
"rewards/rejected": -1.0945489406585693, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.6071708976707668, |
|
"grad_norm": 5.78125, |
|
"learning_rate": 2.00499027745888e-06, |
|
"logits/chosen": -0.5282704830169678, |
|
"logits/rejected": -0.5080757141113281, |
|
"logps/chosen": -299.1280212402344, |
|
"logps/rejected": -297.7095031738281, |
|
"loss": 0.5509, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.1550193578004837, |
|
"rewards/margins": 0.7501784563064575, |
|
"rewards/rejected": -0.9051979184150696, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.6176393614237111, |
|
"grad_norm": 5.65625, |
|
"learning_rate": 1.915719821680624e-06, |
|
"logits/chosen": -0.5544322729110718, |
|
"logits/rejected": -0.5079981088638306, |
|
"logps/chosen": -285.83575439453125, |
|
"logps/rejected": -281.488037109375, |
|
"loss": 0.5198, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": 0.1644931137561798, |
|
"rewards/margins": 0.8637059330940247, |
|
"rewards/rejected": -0.6992128491401672, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.6281078251766553, |
|
"grad_norm": 5.34375, |
|
"learning_rate": 1.8272307888529276e-06, |
|
"logits/chosen": -0.47845038771629333, |
|
"logits/rejected": -0.44052690267562866, |
|
"logps/chosen": -261.1684265136719, |
|
"logps/rejected": -279.1067810058594, |
|
"loss": 0.53, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.013519972562789917, |
|
"rewards/margins": 0.8593659400939941, |
|
"rewards/rejected": -0.8728858828544617, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.6281078251766553, |
|
"eval_logits/chosen": -0.4317292273044586, |
|
"eval_logits/rejected": -0.4027506709098816, |
|
"eval_logps/chosen": -290.4488830566406, |
|
"eval_logps/rejected": -283.0403137207031, |
|
"eval_loss": 0.5244117379188538, |
|
"eval_rewards/accuracies": 0.7200000286102295, |
|
"eval_rewards/chosen": -0.06874838471412659, |
|
"eval_rewards/margins": 0.7895320653915405, |
|
"eval_rewards/rejected": -0.8582804203033447, |
|
"eval_runtime": 349.48, |
|
"eval_samples_per_second": 5.723, |
|
"eval_steps_per_second": 0.358, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.6385762889295996, |
|
"grad_norm": 4.625, |
|
"learning_rate": 1.739641525213929e-06, |
|
"logits/chosen": -0.510543942451477, |
|
"logits/rejected": -0.5007991194725037, |
|
"logps/chosen": -264.67572021484375, |
|
"logps/rejected": -270.05438232421875, |
|
"loss": 0.4957, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.04250194877386093, |
|
"rewards/margins": 0.8683522343635559, |
|
"rewards/rejected": -0.9108540415763855, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.6490447526825438, |
|
"grad_norm": 4.5625, |
|
"learning_rate": 1.6530691736402317e-06, |
|
"logits/chosen": -0.5225728750228882, |
|
"logits/rejected": -0.49780869483947754, |
|
"logps/chosen": -291.7324523925781, |
|
"logps/rejected": -280.8247985839844, |
|
"loss": 0.5072, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.15649111568927765, |
|
"rewards/margins": 0.8370261192321777, |
|
"rewards/rejected": -0.9935172200202942, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.6595132164354881, |
|
"grad_norm": 5.34375, |
|
"learning_rate": 1.5676295169786864e-06, |
|
"logits/chosen": -0.5341150164604187, |
|
"logits/rejected": -0.4966842532157898, |
|
"logps/chosen": -283.0377502441406, |
|
"logps/rejected": -271.4356994628906, |
|
"loss": 0.5294, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.335578590631485, |
|
"rewards/margins": 0.8176633715629578, |
|
"rewards/rejected": -1.1532419919967651, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.6699816801884323, |
|
"grad_norm": 5.25, |
|
"learning_rate": 1.4834368231970922e-06, |
|
"logits/chosen": -0.5592583417892456, |
|
"logits/rejected": -0.5109054446220398, |
|
"logps/chosen": -287.51068115234375, |
|
"logps/rejected": -272.8183288574219, |
|
"loss": 0.5, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.3240619897842407, |
|
"rewards/margins": 0.7095167636871338, |
|
"rewards/rejected": -1.0335787534713745, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.6804501439413766, |
|
"grad_norm": 5.28125, |
|
"learning_rate": 1.4006036925609245e-06, |
|
"logits/chosen": -0.5300047993659973, |
|
"logits/rejected": -0.49794530868530273, |
|
"logps/chosen": -300.1281433105469, |
|
"logps/rejected": -249.81936645507812, |
|
"loss": 0.5413, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.32234618067741394, |
|
"rewards/margins": 0.8122032880783081, |
|
"rewards/rejected": -1.1345494985580444, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.6909186076943209, |
|
"grad_norm": 5.15625, |
|
"learning_rate": 1.3192409070404582e-06, |
|
"logits/chosen": -0.550862193107605, |
|
"logits/rejected": -0.532629668712616, |
|
"logps/chosen": -303.74505615234375, |
|
"logps/rejected": -305.6463928222656, |
|
"loss": 0.5143, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.20528188347816467, |
|
"rewards/margins": 0.7565289735794067, |
|
"rewards/rejected": -0.9618108868598938, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.7013870714472651, |
|
"grad_norm": 6.59375, |
|
"learning_rate": 1.2394572821496953e-06, |
|
"logits/chosen": -0.5544015169143677, |
|
"logits/rejected": -0.5221393704414368, |
|
"logps/chosen": -277.2763977050781, |
|
"logps/rejected": -258.49676513671875, |
|
"loss": 0.5181, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.20379216969013214, |
|
"rewards/margins": 0.7780588269233704, |
|
"rewards/rejected": -0.9818509817123413, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.7118555352002094, |
|
"grad_norm": 5.09375, |
|
"learning_rate": 1.1613595214152713e-06, |
|
"logits/chosen": -0.5822625160217285, |
|
"logits/rejected": -0.540503203868866, |
|
"logps/chosen": -286.454833984375, |
|
"logps/rejected": -275.0696105957031, |
|
"loss": 0.5043, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.32862305641174316, |
|
"rewards/margins": 0.7822999954223633, |
|
"rewards/rejected": -1.1109230518341064, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.7223239989531536, |
|
"grad_norm": 4.3125, |
|
"learning_rate": 1.0850520736699362e-06, |
|
"logits/chosen": -0.5231102705001831, |
|
"logits/rejected": -0.49847880005836487, |
|
"logps/chosen": -340.5146484375, |
|
"logps/rejected": -315.625, |
|
"loss": 0.5348, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.29148069024086, |
|
"rewards/margins": 0.8814651370048523, |
|
"rewards/rejected": -1.1729459762573242, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.7327924627060979, |
|
"grad_norm": 5.625, |
|
"learning_rate": 1.0106369933615043e-06, |
|
"logits/chosen": -0.5652047991752625, |
|
"logits/rejected": -0.5295856595039368, |
|
"logps/chosen": -315.13458251953125, |
|
"logps/rejected": -262.6495361328125, |
|
"loss": 0.5028, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.3104090690612793, |
|
"rewards/margins": 0.7127448320388794, |
|
"rewards/rejected": -1.0231537818908691, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.7327924627060979, |
|
"eval_logits/chosen": -0.417924702167511, |
|
"eval_logits/rejected": -0.38744810223579407, |
|
"eval_logps/chosen": -293.1184387207031, |
|
"eval_logps/rejected": -285.8177185058594, |
|
"eval_loss": 0.5190439224243164, |
|
"eval_rewards/accuracies": 0.7319999933242798, |
|
"eval_rewards/chosen": -0.33567702770233154, |
|
"eval_rewards/margins": 0.8003441095352173, |
|
"eval_rewards/rejected": -1.1360211372375488, |
|
"eval_runtime": 349.5304, |
|
"eval_samples_per_second": 5.722, |
|
"eval_steps_per_second": 0.358, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.7432609264590422, |
|
"grad_norm": 6.1875, |
|
"learning_rate": 9.382138040640714e-07, |
|
"logits/chosen": -0.5737341046333313, |
|
"logits/rejected": -0.5333853363990784, |
|
"logps/chosen": -264.9908752441406, |
|
"logps/rejected": -278.7557067871094, |
|
"loss": 0.5569, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.3085269331932068, |
|
"rewards/margins": 0.818738579750061, |
|
"rewards/rejected": -1.1272655725479126, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.7537293902119864, |
|
"grad_norm": 5.34375, |
|
"learning_rate": 8.678793653740633e-07, |
|
"logits/chosen": -0.4978073537349701, |
|
"logits/rejected": -0.49744582176208496, |
|
"logps/chosen": -263.29010009765625, |
|
"logps/rejected": -263.02618408203125, |
|
"loss": 0.5204, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.2587752938270569, |
|
"rewards/margins": 0.8211803436279297, |
|
"rewards/rejected": -1.0799555778503418, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.7641978539649307, |
|
"grad_norm": 4.75, |
|
"learning_rate": 7.997277433690984e-07, |
|
"logits/chosen": -0.5180292725563049, |
|
"logits/rejected": -0.47244367003440857, |
|
"logps/chosen": -301.2723693847656, |
|
"logps/rejected": -287.05584716796875, |
|
"loss": 0.5153, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.28081485629081726, |
|
"rewards/margins": 0.7739724516868591, |
|
"rewards/rejected": -1.054787278175354, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.7746663177178749, |
|
"grad_norm": 4.65625, |
|
"learning_rate": 7.338500848029603e-07, |
|
"logits/chosen": -0.47029241919517517, |
|
"logits/rejected": -0.48912668228149414, |
|
"logps/chosen": -290.9686584472656, |
|
"logps/rejected": -274.376953125, |
|
"loss": 0.4939, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.0831630602478981, |
|
"rewards/margins": 0.7876149415969849, |
|
"rewards/rejected": -0.8707780838012695, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.7851347814708192, |
|
"grad_norm": 5.09375, |
|
"learning_rate": 6.70334495204884e-07, |
|
"logits/chosen": -0.5157723426818848, |
|
"logits/rejected": -0.49868693947792053, |
|
"logps/chosen": -323.7619323730469, |
|
"logps/rejected": -285.82916259765625, |
|
"loss": 0.5023, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.11948978900909424, |
|
"rewards/margins": 0.8135945200920105, |
|
"rewards/rejected": -0.9330843687057495, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.7956032452237635, |
|
"grad_norm": 5.46875, |
|
"learning_rate": 6.092659210462232e-07, |
|
"logits/chosen": -0.5377079248428345, |
|
"logits/rejected": -0.5193135142326355, |
|
"logps/chosen": -268.98455810546875, |
|
"logps/rejected": -268.32025146484375, |
|
"loss": 0.5455, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.2502976357936859, |
|
"rewards/margins": 0.698966920375824, |
|
"rewards/rejected": -0.949264645576477, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.8060717089767077, |
|
"grad_norm": 5.28125, |
|
"learning_rate": 5.507260361320738e-07, |
|
"logits/chosen": -0.5280566215515137, |
|
"logits/rejected": -0.5302770137786865, |
|
"logps/chosen": -284.72637939453125, |
|
"logps/rejected": -279.0828552246094, |
|
"loss": 0.5093, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.12712079286575317, |
|
"rewards/margins": 0.6862561106681824, |
|
"rewards/rejected": -0.8133770227432251, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.816540172729652, |
|
"grad_norm": 5.21875, |
|
"learning_rate": 4.947931323697983e-07, |
|
"logits/chosen": -0.5104556083679199, |
|
"logits/rejected": -0.48463624715805054, |
|
"logps/chosen": -286.16925048828125, |
|
"logps/rejected": -279.46295166015625, |
|
"loss": 0.5278, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.2269764244556427, |
|
"rewards/margins": 0.6587584018707275, |
|
"rewards/rejected": -0.8857349157333374, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.8270086364825961, |
|
"grad_norm": 5.375, |
|
"learning_rate": 4.4154201506053985e-07, |
|
"logits/chosen": -0.5380119681358337, |
|
"logits/rejected": -0.5271115303039551, |
|
"logps/chosen": -300.27313232421875, |
|
"logps/rejected": -263.89984130859375, |
|
"loss": 0.511, |
|
"rewards/accuracies": 0.8187500238418579, |
|
"rewards/chosen": -0.15483184158802032, |
|
"rewards/margins": 0.8044347763061523, |
|
"rewards/rejected": -0.9592665433883667, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.8374771002355405, |
|
"grad_norm": 4.84375, |
|
"learning_rate": 3.910439028537638e-07, |
|
"logits/chosen": -0.5491563677787781, |
|
"logits/rejected": -0.5051991939544678, |
|
"logps/chosen": -347.644287109375, |
|
"logps/rejected": -302.0996398925781, |
|
"loss": 0.5347, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.08171938359737396, |
|
"rewards/margins": 0.7037029266357422, |
|
"rewards/rejected": -0.7854223251342773, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.8374771002355405, |
|
"eval_logits/chosen": -0.42248398065567017, |
|
"eval_logits/rejected": -0.39241039752960205, |
|
"eval_logps/chosen": -291.1650085449219, |
|
"eval_logps/rejected": -283.87603759765625, |
|
"eval_loss": 0.5191403031349182, |
|
"eval_rewards/accuracies": 0.7319999933242798, |
|
"eval_rewards/chosen": -0.14035792648792267, |
|
"eval_rewards/margins": 0.8014954924583435, |
|
"eval_rewards/rejected": -0.9418535232543945, |
|
"eval_runtime": 349.5564, |
|
"eval_samples_per_second": 5.722, |
|
"eval_steps_per_second": 0.358, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.8479455639884846, |
|
"grad_norm": 4.6875, |
|
"learning_rate": 3.4336633249862084e-07, |
|
"logits/chosen": -0.5698053240776062, |
|
"logits/rejected": -0.5044723153114319, |
|
"logps/chosen": -319.55126953125, |
|
"logps/rejected": -288.7781066894531, |
|
"loss": 0.5021, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.17804962396621704, |
|
"rewards/margins": 0.8044875860214233, |
|
"rewards/rejected": -0.9825371503829956, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.8584140277414289, |
|
"grad_norm": 5.25, |
|
"learning_rate": 2.98573068519539e-07, |
|
"logits/chosen": -0.5468612909317017, |
|
"logits/rejected": -0.5319810509681702, |
|
"logps/chosen": -307.5137939453125, |
|
"logps/rejected": -293.56561279296875, |
|
"loss": 0.5358, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.15299567580223083, |
|
"rewards/margins": 0.7571539282798767, |
|
"rewards/rejected": -0.9101495742797852, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.8688824914943732, |
|
"grad_norm": 5.375, |
|
"learning_rate": 2.5672401793681854e-07, |
|
"logits/chosen": -0.565108597278595, |
|
"logits/rejected": -0.5492611527442932, |
|
"logps/chosen": -274.61260986328125, |
|
"logps/rejected": -269.25439453125, |
|
"loss": 0.5048, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.17146927118301392, |
|
"rewards/margins": 0.8092424273490906, |
|
"rewards/rejected": -0.9807117581367493, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.8793509552473174, |
|
"grad_norm": 5.1875, |
|
"learning_rate": 2.178751501463036e-07, |
|
"logits/chosen": -0.533309280872345, |
|
"logits/rejected": -0.5187536478042603, |
|
"logps/chosen": -313.8269958496094, |
|
"logps/rejected": -307.17987060546875, |
|
"loss": 0.517, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.20717601478099823, |
|
"rewards/margins": 0.7723677158355713, |
|
"rewards/rejected": -0.9795438051223755, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.8898194190002617, |
|
"grad_norm": 5.4375, |
|
"learning_rate": 1.820784220652766e-07, |
|
"logits/chosen": -0.5832973122596741, |
|
"logits/rejected": -0.5479104518890381, |
|
"logps/chosen": -345.9014587402344, |
|
"logps/rejected": -279.53985595703125, |
|
"loss": 0.5083, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.0073674083687365055, |
|
"rewards/margins": 0.875013530254364, |
|
"rewards/rejected": -0.8823810815811157, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.9002878827532059, |
|
"grad_norm": 5.375, |
|
"learning_rate": 1.4938170864468636e-07, |
|
"logits/chosen": -0.5060287117958069, |
|
"logits/rejected": -0.4896390438079834, |
|
"logps/chosen": -289.37042236328125, |
|
"logps/rejected": -268.93487548828125, |
|
"loss": 0.4806, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.11767210066318512, |
|
"rewards/margins": 0.9038281440734863, |
|
"rewards/rejected": -1.0215001106262207, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.9107563465061502, |
|
"grad_norm": 5.53125, |
|
"learning_rate": 1.1982873884064466e-07, |
|
"logits/chosen": -0.4754942059516907, |
|
"logits/rejected": -0.4765293598175049, |
|
"logps/chosen": -287.2554931640625, |
|
"logps/rejected": -276.70794677734375, |
|
"loss": 0.5263, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.16617272794246674, |
|
"rewards/margins": 0.6353666186332703, |
|
"rewards/rejected": -0.801539421081543, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.9212248102590945, |
|
"grad_norm": 5.75, |
|
"learning_rate": 9.345903713082305e-08, |
|
"logits/chosen": -0.5580819845199585, |
|
"logits/rejected": -0.5499614477157593, |
|
"logps/chosen": -315.15478515625, |
|
"logps/rejected": -280.8697814941406, |
|
"loss": 0.5463, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.1284041702747345, |
|
"rewards/margins": 0.6124891042709351, |
|
"rewards/rejected": -0.7408932447433472, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.9316932740120387, |
|
"grad_norm": 5.125, |
|
"learning_rate": 7.030787065396866e-08, |
|
"logits/chosen": -0.5307958722114563, |
|
"logits/rejected": -0.49479198455810547, |
|
"logps/chosen": -318.7244567871094, |
|
"logps/rejected": -293.06219482421875, |
|
"loss": 0.5198, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.11201741546392441, |
|
"rewards/margins": 0.724387526512146, |
|
"rewards/rejected": -0.8364049792289734, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.942161737764983, |
|
"grad_norm": 4.96875, |
|
"learning_rate": 5.0406202043228604e-08, |
|
"logits/chosen": -0.541701078414917, |
|
"logits/rejected": -0.5375146865844727, |
|
"logps/chosen": -333.6339111328125, |
|
"logps/rejected": -274.9059753417969, |
|
"loss": 0.4783, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.0002611428499221802, |
|
"rewards/margins": 0.9774513244628906, |
|
"rewards/rejected": -0.9777124524116516, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.942161737764983, |
|
"eval_logits/chosen": -0.4219192862510681, |
|
"eval_logits/rejected": -0.391748309135437, |
|
"eval_logps/chosen": -291.15997314453125, |
|
"eval_logps/rejected": -283.916259765625, |
|
"eval_loss": 0.5189567804336548, |
|
"eval_rewards/accuracies": 0.7260000109672546, |
|
"eval_rewards/chosen": -0.13985459506511688, |
|
"eval_rewards/margins": 0.8060198426246643, |
|
"eval_rewards/rejected": -0.9458745121955872, |
|
"eval_runtime": 349.4658, |
|
"eval_samples_per_second": 5.723, |
|
"eval_steps_per_second": 0.358, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.9526302015179272, |
|
"grad_norm": 5.15625, |
|
"learning_rate": 3.378064801637687e-08, |
|
"logits/chosen": -0.5818526148796082, |
|
"logits/rejected": -0.5350463390350342, |
|
"logps/chosen": -314.6712341308594, |
|
"logps/rejected": -313.63116455078125, |
|
"loss": 0.5208, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.047819893807172775, |
|
"rewards/margins": 0.7616057991981506, |
|
"rewards/rejected": -0.8094256520271301, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.9630986652708715, |
|
"grad_norm": 4.6875, |
|
"learning_rate": 2.0453443778310766e-08, |
|
"logits/chosen": -0.5121539831161499, |
|
"logits/rejected": -0.48242586851119995, |
|
"logps/chosen": -329.2786865234375, |
|
"logps/rejected": -304.5957336425781, |
|
"loss": 0.5356, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.06458105146884918, |
|
"rewards/margins": 0.869833767414093, |
|
"rewards/rejected": -0.9344147443771362, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.9735671290238157, |
|
"grad_norm": 4.78125, |
|
"learning_rate": 1.0442413283435759e-08, |
|
"logits/chosen": -0.509577751159668, |
|
"logits/rejected": -0.47219425439834595, |
|
"logps/chosen": -317.72943115234375, |
|
"logps/rejected": -278.4623718261719, |
|
"loss": 0.5143, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.05142117291688919, |
|
"rewards/margins": 1.1160078048706055, |
|
"rewards/rejected": -1.167428970336914, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.98403559277676, |
|
"grad_norm": 4.1875, |
|
"learning_rate": 3.760945397705828e-09, |
|
"logits/chosen": -0.5354495048522949, |
|
"logits/rejected": -0.4856652617454529, |
|
"logps/chosen": -290.7127380371094, |
|
"logps/rejected": -260.788330078125, |
|
"loss": 0.487, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.03208203241229057, |
|
"rewards/margins": 1.0025131702423096, |
|
"rewards/rejected": -0.9704310297966003, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.9945040565297043, |
|
"grad_norm": 4.6875, |
|
"learning_rate": 4.1797599220405605e-10, |
|
"logits/chosen": -0.5512427687644958, |
|
"logits/rejected": -0.5348908305168152, |
|
"logps/chosen": -295.21319580078125, |
|
"logps/rejected": -278.82257080078125, |
|
"loss": 0.5053, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.0824163556098938, |
|
"rewards/margins": 0.8457223176956177, |
|
"rewards/rejected": -0.9281387329101562, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.9997382884061764, |
|
"step": 955, |
|
"total_flos": 0.0, |
|
"train_loss": 0.543355998318857, |
|
"train_runtime": 19333.2554, |
|
"train_samples_per_second": 3.162, |
|
"train_steps_per_second": 0.049 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 955, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|