|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.97196261682243, |
|
"eval_steps": 50, |
|
"global_step": 159, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09345794392523364, |
|
"grad_norm": 64.25907086156798, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": -2.7352654933929443, |
|
"logits/rejected": -2.715487003326416, |
|
"logps/chosen": -269.43072509765625, |
|
"logps/rejected": -218.1147918701172, |
|
"loss": 0.6899, |
|
"rewards/accuracies": 0.36250001192092896, |
|
"rewards/chosen": 0.018381251022219658, |
|
"rewards/margins": 0.008254295215010643, |
|
"rewards/rejected": 0.010126957669854164, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.18691588785046728, |
|
"grad_norm": 46.235718976229606, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -2.6723597049713135, |
|
"logits/rejected": -2.683154821395874, |
|
"logps/chosen": -270.26348876953125, |
|
"logps/rejected": -199.8683624267578, |
|
"loss": 0.6442, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.5999149084091187, |
|
"rewards/margins": 0.22199003398418427, |
|
"rewards/rejected": 0.3779248595237732, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2803738317757009, |
|
"grad_norm": 40.05093234219346, |
|
"learning_rate": 9.972240926774166e-07, |
|
"logits/chosen": -2.583867073059082, |
|
"logits/rejected": -2.5568575859069824, |
|
"logps/chosen": -238.4031982421875, |
|
"logps/rejected": -207.7652130126953, |
|
"loss": 0.614, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 1.6396440267562866, |
|
"rewards/margins": 0.7280690670013428, |
|
"rewards/rejected": 0.9115749597549438, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.37383177570093457, |
|
"grad_norm": 46.527169597733305, |
|
"learning_rate": 9.889271933555212e-07, |
|
"logits/chosen": -2.450702667236328, |
|
"logits/rejected": -2.4366519451141357, |
|
"logps/chosen": -239.6464080810547, |
|
"logps/rejected": -188.81138610839844, |
|
"loss": 0.5975, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 1.8856239318847656, |
|
"rewards/margins": 0.852216362953186, |
|
"rewards/rejected": 1.0334076881408691, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.4672897196261682, |
|
"grad_norm": 40.30005689440578, |
|
"learning_rate": 9.752014277286431e-07, |
|
"logits/chosen": -2.350933790206909, |
|
"logits/rejected": -2.3373398780822754, |
|
"logps/chosen": -251.7142791748047, |
|
"logps/rejected": -187.19984436035156, |
|
"loss": 0.566, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 2.030355215072632, |
|
"rewards/margins": 1.2459053993225098, |
|
"rewards/rejected": 0.7844498157501221, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.5607476635514018, |
|
"grad_norm": 38.93034965929662, |
|
"learning_rate": 9.561992016100291e-07, |
|
"logits/chosen": -2.2758495807647705, |
|
"logits/rejected": -2.2311055660247803, |
|
"logps/chosen": -269.446044921875, |
|
"logps/rejected": -203.4172821044922, |
|
"loss": 0.558, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 1.7135908603668213, |
|
"rewards/margins": 1.3296939134597778, |
|
"rewards/rejected": 0.3838968873023987, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.6542056074766355, |
|
"grad_norm": 46.97283156919659, |
|
"learning_rate": 9.321315086741915e-07, |
|
"logits/chosen": -2.253507137298584, |
|
"logits/rejected": -2.221372127532959, |
|
"logps/chosen": -247.94827270507812, |
|
"logps/rejected": -200.51922607421875, |
|
"loss": 0.579, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 1.2021849155426025, |
|
"rewards/margins": 1.4166221618652344, |
|
"rewards/rejected": -0.21443717181682587, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.7476635514018691, |
|
"grad_norm": 44.407618951863526, |
|
"learning_rate": 9.032655876613635e-07, |
|
"logits/chosen": -2.3225274085998535, |
|
"logits/rejected": -2.321155071258545, |
|
"logps/chosen": -245.7095947265625, |
|
"logps/rejected": -196.00083923339844, |
|
"loss": 0.5802, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.9343897104263306, |
|
"rewards/margins": 1.0495737791061401, |
|
"rewards/rejected": -0.11518410593271255, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.8411214953271028, |
|
"grad_norm": 35.484899569132175, |
|
"learning_rate": 8.699219550575952e-07, |
|
"logits/chosen": -2.3453164100646973, |
|
"logits/rejected": -2.3334288597106934, |
|
"logps/chosen": -239.4101104736328, |
|
"logps/rejected": -217.1162109375, |
|
"loss": 0.538, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": 1.2655799388885498, |
|
"rewards/margins": 1.8225781917572021, |
|
"rewards/rejected": -0.5569985508918762, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.9345794392523364, |
|
"grad_norm": 38.47661119202761, |
|
"learning_rate": 8.324708461985124e-07, |
|
"logits/chosen": -2.336545944213867, |
|
"logits/rejected": -2.3381569385528564, |
|
"logps/chosen": -242.3145751953125, |
|
"logps/rejected": -205.20687866210938, |
|
"loss": 0.5069, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 1.2903130054473877, |
|
"rewards/margins": 1.6887279748916626, |
|
"rewards/rejected": -0.39841485023498535, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.9345794392523364, |
|
"eval_logits/chosen": -2.3625147342681885, |
|
"eval_logits/rejected": -2.3323090076446533, |
|
"eval_logps/chosen": -268.0925598144531, |
|
"eval_logps/rejected": -191.181396484375, |
|
"eval_loss": 0.544481635093689, |
|
"eval_rewards/accuracies": 0.7708333134651184, |
|
"eval_rewards/chosen": 1.1549440622329712, |
|
"eval_rewards/margins": 1.484925627708435, |
|
"eval_rewards/rejected": -0.3299814760684967, |
|
"eval_runtime": 101.1546, |
|
"eval_samples_per_second": 15.027, |
|
"eval_steps_per_second": 0.237, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.02803738317757, |
|
"grad_norm": 21.885999790849485, |
|
"learning_rate": 7.913281043133977e-07, |
|
"logits/chosen": -2.3184032440185547, |
|
"logits/rejected": -2.2938473224639893, |
|
"logps/chosen": -250.3560028076172, |
|
"logps/rejected": -233.30630493164062, |
|
"loss": 0.4787, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 1.2203357219696045, |
|
"rewards/margins": 2.1394553184509277, |
|
"rewards/rejected": -0.9191198348999023, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.1214953271028036, |
|
"grad_norm": 14.465910382172174, |
|
"learning_rate": 7.469505631561317e-07, |
|
"logits/chosen": -2.2429802417755127, |
|
"logits/rejected": -2.2311806678771973, |
|
"logps/chosen": -239.5115966796875, |
|
"logps/rejected": -209.9403076171875, |
|
"loss": 0.2297, |
|
"rewards/accuracies": 0.893750011920929, |
|
"rewards/chosen": 1.8955681324005127, |
|
"rewards/margins": 3.0370676517486572, |
|
"rewards/rejected": -1.1414995193481445, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.2149532710280373, |
|
"grad_norm": 22.768586089667867, |
|
"learning_rate": 6.998309744925411e-07, |
|
"logits/chosen": -2.2206153869628906, |
|
"logits/rejected": -2.1977906227111816, |
|
"logps/chosen": -241.6571044921875, |
|
"logps/rejected": -230.4139404296875, |
|
"loss": 0.2463, |
|
"rewards/accuracies": 0.9437500238418579, |
|
"rewards/chosen": 2.490936756134033, |
|
"rewards/margins": 3.5978896617889404, |
|
"rewards/rejected": -1.1069526672363281, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.308411214953271, |
|
"grad_norm": 21.234164671655243, |
|
"learning_rate": 6.504925367674594e-07, |
|
"logits/chosen": -2.2947399616241455, |
|
"logits/rejected": -2.2711234092712402, |
|
"logps/chosen": -247.6901397705078, |
|
"logps/rejected": -232.25143432617188, |
|
"loss": 0.2289, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 2.7376725673675537, |
|
"rewards/margins": 4.084096431732178, |
|
"rewards/rejected": -1.3464237451553345, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.4018691588785046, |
|
"grad_norm": 23.214123112548595, |
|
"learning_rate": 5.994830857031499e-07, |
|
"logits/chosen": -2.3061466217041016, |
|
"logits/rejected": -2.2935497760772705, |
|
"logps/chosen": -235.4070587158203, |
|
"logps/rejected": -238.2843475341797, |
|
"loss": 0.2618, |
|
"rewards/accuracies": 0.893750011920929, |
|
"rewards/chosen": 2.649362802505493, |
|
"rewards/margins": 3.7771477699279785, |
|
"rewards/rejected": -1.1277848482131958, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.4953271028037383, |
|
"grad_norm": 23.002370346021507, |
|
"learning_rate": 5.473690113345342e-07, |
|
"logits/chosen": -2.30381441116333, |
|
"logits/rejected": -2.2565557956695557, |
|
"logps/chosen": -241.03811645507812, |
|
"logps/rejected": -209.29025268554688, |
|
"loss": 0.2933, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 2.297919750213623, |
|
"rewards/margins": 3.4454472064971924, |
|
"rewards/rejected": -1.1475274562835693, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.588785046728972, |
|
"grad_norm": 17.986443749085126, |
|
"learning_rate": 4.947289690242102e-07, |
|
"logits/chosen": -2.259481906890869, |
|
"logits/rejected": -2.2490382194519043, |
|
"logps/chosen": -242.26708984375, |
|
"logps/rejected": -219.21975708007812, |
|
"loss": 0.2607, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 2.5205655097961426, |
|
"rewards/margins": 3.4021854400634766, |
|
"rewards/rejected": -0.8816197514533997, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.6822429906542056, |
|
"grad_norm": 23.527129100232212, |
|
"learning_rate": 4.421474542878194e-07, |
|
"logits/chosen": -2.250342845916748, |
|
"logits/rejected": -2.206367015838623, |
|
"logps/chosen": -218.79141235351562, |
|
"logps/rejected": -249.013916015625, |
|
"loss": 0.2821, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 2.248781442642212, |
|
"rewards/margins": 3.799814224243164, |
|
"rewards/rejected": -1.5510326623916626, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.7757009345794392, |
|
"grad_norm": 20.08726736786646, |
|
"learning_rate": 3.902083127725186e-07, |
|
"logits/chosen": -2.210190534591675, |
|
"logits/rejected": -2.192523241043091, |
|
"logps/chosen": -221.87985229492188, |
|
"logps/rejected": -220.18179321289062, |
|
"loss": 0.2499, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 2.266261577606201, |
|
"rewards/margins": 3.49079966545105, |
|
"rewards/rejected": -1.224537968635559, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.8691588785046729, |
|
"grad_norm": 23.627843810647025, |
|
"learning_rate": 3.394882574513519e-07, |
|
"logits/chosen": -2.202239990234375, |
|
"logits/rejected": -2.1847739219665527, |
|
"logps/chosen": -247.0044708251953, |
|
"logps/rejected": -197.84573364257812, |
|
"loss": 0.33, |
|
"rewards/accuracies": 0.9437500238418579, |
|
"rewards/chosen": 2.2673051357269287, |
|
"rewards/margins": 3.611363649368286, |
|
"rewards/rejected": -1.3440587520599365, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.8691588785046729, |
|
"eval_logits/chosen": -2.227185010910034, |
|
"eval_logits/rejected": -2.1957762241363525, |
|
"eval_logps/chosen": -261.9975891113281, |
|
"eval_logps/rejected": -194.4145965576172, |
|
"eval_loss": 0.5182002186775208, |
|
"eval_rewards/accuracies": 0.8229166865348816, |
|
"eval_rewards/chosen": 1.7644418478012085, |
|
"eval_rewards/margins": 2.417741060256958, |
|
"eval_rewards/rejected": -0.6532991528511047, |
|
"eval_runtime": 100.7777, |
|
"eval_samples_per_second": 15.083, |
|
"eval_steps_per_second": 0.238, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.9626168224299065, |
|
"grad_norm": 21.751449762111058, |
|
"learning_rate": 2.9055046501619083e-07, |
|
"logits/chosen": -2.224174976348877, |
|
"logits/rejected": -2.170074701309204, |
|
"logps/chosen": -228.6382293701172, |
|
"logps/rejected": -245.8506317138672, |
|
"loss": 0.3113, |
|
"rewards/accuracies": 0.918749988079071, |
|
"rewards/chosen": 2.3702425956726074, |
|
"rewards/margins": 3.569148302078247, |
|
"rewards/rejected": -1.1989054679870605, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 2.05607476635514, |
|
"grad_norm": 14.770503354093574, |
|
"learning_rate": 2.439383225725225e-07, |
|
"logits/chosen": -2.189666986465454, |
|
"logits/rejected": -2.1793408393859863, |
|
"logps/chosen": -235.7542724609375, |
|
"logps/rejected": -218.64761352539062, |
|
"loss": 0.1889, |
|
"rewards/accuracies": 0.9437500238418579, |
|
"rewards/chosen": 2.4319849014282227, |
|
"rewards/margins": 3.8941452503204346, |
|
"rewards/rejected": -1.4621602296829224, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.149532710280374, |
|
"grad_norm": 12.833265134737909, |
|
"learning_rate": 2.0016939407046986e-07, |
|
"logits/chosen": -2.2413907051086426, |
|
"logits/rejected": -2.1873061656951904, |
|
"logps/chosen": -228.26724243164062, |
|
"logps/rejected": -209.82168579101562, |
|
"loss": 0.1437, |
|
"rewards/accuracies": 0.96875, |
|
"rewards/chosen": 2.307067394256592, |
|
"rewards/margins": 3.9373679161071777, |
|
"rewards/rejected": -1.6303008794784546, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 2.2429906542056073, |
|
"grad_norm": 11.822968447156347, |
|
"learning_rate": 1.5972967346655448e-07, |
|
"logits/chosen": -2.276632308959961, |
|
"logits/rejected": -2.248224973678589, |
|
"logps/chosen": -223.19857788085938, |
|
"logps/rejected": -219.5464630126953, |
|
"loss": 0.1335, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": 2.4452242851257324, |
|
"rewards/margins": 3.830425977706909, |
|
"rewards/rejected": -1.3852016925811768, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.336448598130841, |
|
"grad_norm": 18.39765754158861, |
|
"learning_rate": 1.2306818842696715e-07, |
|
"logits/chosen": -2.324186325073242, |
|
"logits/rejected": -2.2840449810028076, |
|
"logps/chosen": -234.97903442382812, |
|
"logps/rejected": -232.93582153320312, |
|
"loss": 0.1592, |
|
"rewards/accuracies": 0.9312499761581421, |
|
"rewards/chosen": 2.6819796562194824, |
|
"rewards/margins": 4.276698112487793, |
|
"rewards/rejected": -1.5947178602218628, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 2.4299065420560746, |
|
"grad_norm": 15.47036404967669, |
|
"learning_rate": 9.059201449082043e-08, |
|
"logits/chosen": -2.314243793487549, |
|
"logits/rejected": -2.2900912761688232, |
|
"logps/chosen": -240.45388793945312, |
|
"logps/rejected": -212.1385498046875, |
|
"loss": 0.1483, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 2.4335389137268066, |
|
"rewards/margins": 3.873295307159424, |
|
"rewards/rejected": -1.4397567510604858, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.5233644859813085, |
|
"grad_norm": 19.170309374619883, |
|
"learning_rate": 6.266175505426957e-08, |
|
"logits/chosen": -2.283237934112549, |
|
"logits/rejected": -2.266099691390991, |
|
"logps/chosen": -241.7128448486328, |
|
"logps/rejected": -223.68429565429688, |
|
"loss": 0.155, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 2.2079215049743652, |
|
"rewards/margins": 4.085274696350098, |
|
"rewards/rejected": -1.8773536682128906, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 2.616822429906542, |
|
"grad_norm": 14.296438566762903, |
|
"learning_rate": 3.958753736408105e-08, |
|
"logits/chosen": -2.3092122077941895, |
|
"logits/rejected": -2.2786989212036133, |
|
"logps/chosen": -230.52639770507812, |
|
"logps/rejected": -231.3815460205078, |
|
"loss": 0.1404, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 2.2946927547454834, |
|
"rewards/margins": 4.114151954650879, |
|
"rewards/rejected": -1.8194587230682373, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.710280373831776, |
|
"grad_norm": 16.63743908750786, |
|
"learning_rate": 2.162556897965101e-08, |
|
"logits/chosen": -2.3131165504455566, |
|
"logits/rejected": -2.294689178466797, |
|
"logps/chosen": -237.7254180908203, |
|
"logps/rejected": -216.6897430419922, |
|
"loss": 0.158, |
|
"rewards/accuracies": 0.918749988079071, |
|
"rewards/chosen": 2.0540032386779785, |
|
"rewards/margins": 3.7803573608398438, |
|
"rewards/rejected": -1.7263540029525757, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 2.803738317757009, |
|
"grad_norm": 16.098164019474485, |
|
"learning_rate": 8.975292939244927e-09, |
|
"logits/chosen": -2.31139874458313, |
|
"logits/rejected": -2.2886271476745605, |
|
"logps/chosen": -231.0320281982422, |
|
"logps/rejected": -239.72476196289062, |
|
"loss": 0.1617, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 2.205547332763672, |
|
"rewards/margins": 4.294602870941162, |
|
"rewards/rejected": -2.0890560150146484, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.803738317757009, |
|
"eval_logits/chosen": -2.32828426361084, |
|
"eval_logits/rejected": -2.301532030105591, |
|
"eval_logps/chosen": -264.09381103515625, |
|
"eval_logps/rejected": -198.0356903076172, |
|
"eval_loss": 0.5213030576705933, |
|
"eval_rewards/accuracies": 0.8177083134651184, |
|
"eval_rewards/chosen": 1.5548186302185059, |
|
"eval_rewards/margins": 2.570230484008789, |
|
"eval_rewards/rejected": -1.0154114961624146, |
|
"eval_runtime": 100.5048, |
|
"eval_samples_per_second": 15.124, |
|
"eval_steps_per_second": 0.239, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.897196261682243, |
|
"grad_norm": 15.518735395097615, |
|
"learning_rate": 1.7771732184357901e-09, |
|
"logits/chosen": -2.3752760887145996, |
|
"logits/rejected": -2.2865824699401855, |
|
"logps/chosen": -246.28573608398438, |
|
"logps/rejected": -245.54965209960938, |
|
"loss": 0.1333, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": 2.6093807220458984, |
|
"rewards/margins": 4.653998374938965, |
|
"rewards/rejected": -2.044617176055908, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 2.97196261682243, |
|
"step": 159, |
|
"total_flos": 1874604243025920.0, |
|
"train_loss": 0.33642754952112836, |
|
"train_runtime": 6089.7617, |
|
"train_samples_per_second": 6.737, |
|
"train_steps_per_second": 0.026 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 159, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1874604243025920.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|