|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 50, |
|
"global_step": 436, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.022935779816513763, |
|
"grad_norm": 3.1641646757229265, |
|
"learning_rate": 1.1363636363636363e-07, |
|
"logits/chosen": -2.6194896697998047, |
|
"logits/rejected": -2.552656650543213, |
|
"logps/chosen": -265.41778564453125, |
|
"logps/rejected": -236.15280151367188, |
|
"loss": 0.0585, |
|
"rewards/accuracies": 0.39375001192092896, |
|
"rewards/chosen": 0.00028036616276949644, |
|
"rewards/margins": 0.0005085940356366336, |
|
"rewards/rejected": -0.00022822784376330674, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.045871559633027525, |
|
"grad_norm": 2.9506602825308903, |
|
"learning_rate": 2.2727272727272726e-07, |
|
"logits/chosen": -2.6579580307006836, |
|
"logits/rejected": -2.575996160507202, |
|
"logps/chosen": -298.8293151855469, |
|
"logps/rejected": -274.3349304199219, |
|
"loss": 0.059, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.0004406004154589027, |
|
"rewards/margins": 0.0018091611564159393, |
|
"rewards/rejected": -0.0022497617173939943, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06880733944954129, |
|
"grad_norm": 2.7648586784794067, |
|
"learning_rate": 3.4090909090909085e-07, |
|
"logits/chosen": -2.6760480403900146, |
|
"logits/rejected": -2.6023683547973633, |
|
"logps/chosen": -290.41986083984375, |
|
"logps/rejected": -234.40579223632812, |
|
"loss": 0.0571, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.009129038080573082, |
|
"rewards/margins": 0.01336810551583767, |
|
"rewards/rejected": -0.004239068366587162, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09174311926605505, |
|
"grad_norm": 2.7634574887860825, |
|
"learning_rate": 4.545454545454545e-07, |
|
"logits/chosen": -2.6598825454711914, |
|
"logits/rejected": -2.610196352005005, |
|
"logps/chosen": -280.923828125, |
|
"logps/rejected": -267.7273864746094, |
|
"loss": 0.0529, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.04245365411043167, |
|
"rewards/margins": 0.04179743677377701, |
|
"rewards/rejected": 0.0006562232738360763, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.11467889908256881, |
|
"grad_norm": 2.9044861722207567, |
|
"learning_rate": 4.997110275491701e-07, |
|
"logits/chosen": -2.620276927947998, |
|
"logits/rejected": -2.6136012077331543, |
|
"logps/chosen": -292.2350158691406, |
|
"logps/rejected": -302.38018798828125, |
|
"loss": 0.0487, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 0.04016200453042984, |
|
"rewards/margins": 0.07212186604738235, |
|
"rewards/rejected": -0.031959857791662216, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11467889908256881, |
|
"eval_logits/chosen": -2.5716989040374756, |
|
"eval_logits/rejected": -2.4927897453308105, |
|
"eval_logps/chosen": -282.0911865234375, |
|
"eval_logps/rejected": -254.96926879882812, |
|
"eval_loss": 0.04548575356602669, |
|
"eval_rewards/accuracies": 0.7025862336158752, |
|
"eval_rewards/chosen": 0.02998923696577549, |
|
"eval_rewards/margins": 0.11445755511522293, |
|
"eval_rewards/rejected": -0.08446833491325378, |
|
"eval_runtime": 95.7446, |
|
"eval_samples_per_second": 18.988, |
|
"eval_steps_per_second": 0.303, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13761467889908258, |
|
"grad_norm": 3.2642316994426857, |
|
"learning_rate": 4.979475034558115e-07, |
|
"logits/chosen": -2.5661163330078125, |
|
"logits/rejected": -2.50827956199646, |
|
"logps/chosen": -289.7759704589844, |
|
"logps/rejected": -267.379638671875, |
|
"loss": 0.0449, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.0029130387119948864, |
|
"rewards/margins": 0.09531786292791367, |
|
"rewards/rejected": -0.09823091328144073, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.16055045871559634, |
|
"grad_norm": 3.167167289379583, |
|
"learning_rate": 4.945923025551788e-07, |
|
"logits/chosen": -2.460839033126831, |
|
"logits/rejected": -2.4139201641082764, |
|
"logps/chosen": -324.13690185546875, |
|
"logps/rejected": -276.5367126464844, |
|
"loss": 0.0446, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.034340955317020416, |
|
"rewards/margins": 0.1856319159269333, |
|
"rewards/rejected": -0.15129096806049347, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.1834862385321101, |
|
"grad_norm": 3.00053019606861, |
|
"learning_rate": 4.896669632591651e-07, |
|
"logits/chosen": -2.356163740158081, |
|
"logits/rejected": -2.2467029094696045, |
|
"logps/chosen": -287.4019470214844, |
|
"logps/rejected": -272.3543701171875, |
|
"loss": 0.0416, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.015520763583481312, |
|
"rewards/margins": 0.18146219849586487, |
|
"rewards/rejected": -0.1969829499721527, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.20642201834862386, |
|
"grad_norm": 3.936347429433164, |
|
"learning_rate": 4.832031033425662e-07, |
|
"logits/chosen": -2.0837090015411377, |
|
"logits/rejected": -2.0328478813171387, |
|
"logps/chosen": -287.70654296875, |
|
"logps/rejected": -268.2559509277344, |
|
"loss": 0.0405, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.001131159020587802, |
|
"rewards/margins": 0.2134155035018921, |
|
"rewards/rejected": -0.21454665064811707, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.22935779816513763, |
|
"grad_norm": 3.652555898125068, |
|
"learning_rate": 4.752422169756047e-07, |
|
"logits/chosen": -1.5697656869888306, |
|
"logits/rejected": -1.561859130859375, |
|
"logps/chosen": -289.6307678222656, |
|
"logps/rejected": -307.66363525390625, |
|
"loss": 0.0408, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.17206048965454102, |
|
"rewards/margins": 0.18529178202152252, |
|
"rewards/rejected": -0.35735225677490234, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.22935779816513763, |
|
"eval_logits/chosen": -1.8404057025909424, |
|
"eval_logits/rejected": -1.7416090965270996, |
|
"eval_logps/chosen": -295.8004455566406, |
|
"eval_logps/rejected": -277.408935546875, |
|
"eval_loss": 0.03911930322647095, |
|
"eval_rewards/accuracies": 0.6896551847457886, |
|
"eval_rewards/chosen": -0.10710364580154419, |
|
"eval_rewards/margins": 0.201760932803154, |
|
"eval_rewards/rejected": -0.308864563703537, |
|
"eval_runtime": 95.267, |
|
"eval_samples_per_second": 19.083, |
|
"eval_steps_per_second": 0.304, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.25229357798165136, |
|
"grad_norm": 3.509676092042844, |
|
"learning_rate": 4.658354083558188e-07, |
|
"logits/chosen": -1.9262816905975342, |
|
"logits/rejected": -1.80355966091156, |
|
"logps/chosen": -279.3715515136719, |
|
"logps/rejected": -263.81549072265625, |
|
"loss": 0.0402, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.14183419942855835, |
|
"rewards/margins": 0.15898862481117249, |
|
"rewards/rejected": -0.30082279443740845, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.27522935779816515, |
|
"grad_norm": 3.984377752564919, |
|
"learning_rate": 4.550430636492389e-07, |
|
"logits/chosen": -1.8143861293792725, |
|
"logits/rejected": -1.7607357501983643, |
|
"logps/chosen": -295.43243408203125, |
|
"logps/rejected": -289.3270568847656, |
|
"loss": 0.0395, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.19613118469715118, |
|
"rewards/margins": 0.18250016868114471, |
|
"rewards/rejected": -0.3786313831806183, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.2981651376146789, |
|
"grad_norm": 3.895538046894766, |
|
"learning_rate": 4.429344633468004e-07, |
|
"logits/chosen": -1.742222785949707, |
|
"logits/rejected": -1.6753323078155518, |
|
"logps/chosen": -268.340087890625, |
|
"logps/rejected": -275.31500244140625, |
|
"loss": 0.0396, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.12159997224807739, |
|
"rewards/margins": 0.20542438328266144, |
|
"rewards/rejected": -0.32702434062957764, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.3211009174311927, |
|
"grad_norm": 4.968671690125824, |
|
"learning_rate": 4.2958733752443187e-07, |
|
"logits/chosen": -1.6684496402740479, |
|
"logits/rejected": -1.5686622858047485, |
|
"logps/chosen": -286.41973876953125, |
|
"logps/rejected": -254.42691040039062, |
|
"loss": 0.0381, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.10234608501195908, |
|
"rewards/margins": 0.19895747303962708, |
|
"rewards/rejected": -0.30130359530448914, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.3440366972477064, |
|
"grad_norm": 3.5584001205811377, |
|
"learning_rate": 4.150873668617898e-07, |
|
"logits/chosen": -1.703791856765747, |
|
"logits/rejected": -1.5906720161437988, |
|
"logps/chosen": -280.5138854980469, |
|
"logps/rejected": -269.3014831542969, |
|
"loss": 0.0379, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.13970300555229187, |
|
"rewards/margins": 0.22143635153770447, |
|
"rewards/rejected": -0.36113935708999634, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3440366972477064, |
|
"eval_logits/chosen": -1.7903783321380615, |
|
"eval_logits/rejected": -1.635672688484192, |
|
"eval_logps/chosen": -299.1519470214844, |
|
"eval_logps/rejected": -287.88739013671875, |
|
"eval_loss": 0.03649381175637245, |
|
"eval_rewards/accuracies": 0.7155172228813171, |
|
"eval_rewards/chosen": -0.14061833918094635, |
|
"eval_rewards/margins": 0.27303099632263184, |
|
"eval_rewards/rejected": -0.413649320602417, |
|
"eval_runtime": 94.6259, |
|
"eval_samples_per_second": 19.212, |
|
"eval_steps_per_second": 0.306, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3669724770642202, |
|
"grad_norm": 5.2149351876346905, |
|
"learning_rate": 3.9952763262280397e-07, |
|
"logits/chosen": -1.6572643518447876, |
|
"logits/rejected": -1.5417451858520508, |
|
"logps/chosen": -309.5204162597656, |
|
"logps/rejected": -324.3457946777344, |
|
"loss": 0.0359, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.12969312071800232, |
|
"rewards/margins": 0.254078209400177, |
|
"rewards/rejected": -0.3837713599205017, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.38990825688073394, |
|
"grad_norm": 2.9551901080690395, |
|
"learning_rate": 3.8300801912883414e-07, |
|
"logits/chosen": -1.6375774145126343, |
|
"logits/rejected": -1.5148413181304932, |
|
"logps/chosen": -277.994873046875, |
|
"logps/rejected": -301.9854431152344, |
|
"loss": 0.0339, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.06393978744745255, |
|
"rewards/margins": 0.2552258372306824, |
|
"rewards/rejected": -0.3191656172275543, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.41284403669724773, |
|
"grad_norm": 3.6761291533851423, |
|
"learning_rate": 3.6563457256020884e-07, |
|
"logits/chosen": -1.527038335800171, |
|
"logits/rejected": -1.2771427631378174, |
|
"logps/chosen": -323.6756896972656, |
|
"logps/rejected": -284.1974792480469, |
|
"loss": 0.0365, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.20317766070365906, |
|
"rewards/margins": 0.2538478374481201, |
|
"rewards/rejected": -0.4570254683494568, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.43577981651376146, |
|
"grad_norm": 5.675419312165092, |
|
"learning_rate": 3.475188202022617e-07, |
|
"logits/chosen": -1.6745436191558838, |
|
"logits/rejected": -1.604264259338379, |
|
"logps/chosen": -268.5342102050781, |
|
"logps/rejected": -302.51898193359375, |
|
"loss": 0.0368, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.10419674217700958, |
|
"rewards/margins": 0.27754858136177063, |
|
"rewards/rejected": -0.38174527883529663, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.45871559633027525, |
|
"grad_norm": 3.4589111733943283, |
|
"learning_rate": 3.287770545059052e-07, |
|
"logits/chosen": -1.8974136114120483, |
|
"logits/rejected": -1.7577918767929077, |
|
"logps/chosen": -291.4000549316406, |
|
"logps/rejected": -281.4195861816406, |
|
"loss": 0.0365, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.1311841905117035, |
|
"rewards/margins": 0.24445155262947083, |
|
"rewards/rejected": -0.3756357431411743, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.45871559633027525, |
|
"eval_logits/chosen": -1.9178062677383423, |
|
"eval_logits/rejected": -1.7552225589752197, |
|
"eval_logps/chosen": -291.5865783691406, |
|
"eval_logps/rejected": -279.1630859375, |
|
"eval_loss": 0.03504867106676102, |
|
"eval_rewards/accuracies": 0.7543103694915771, |
|
"eval_rewards/chosen": -0.06496486067771912, |
|
"eval_rewards/margins": 0.2614416182041168, |
|
"eval_rewards/rejected": -0.32640647888183594, |
|
"eval_runtime": 95.5023, |
|
"eval_samples_per_second": 19.036, |
|
"eval_steps_per_second": 0.304, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.481651376146789, |
|
"grad_norm": 2.8163994777137717, |
|
"learning_rate": 3.0952958655864954e-07, |
|
"logits/chosen": -1.794647455215454, |
|
"logits/rejected": -1.7260096073150635, |
|
"logps/chosen": -280.9212951660156, |
|
"logps/rejected": -287.47674560546875, |
|
"loss": 0.0349, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.07616890966892242, |
|
"rewards/margins": 0.2551766633987427, |
|
"rewards/rejected": -0.3313455581665039, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.5045871559633027, |
|
"grad_norm": 3.602353089915511, |
|
"learning_rate": 2.898999737583448e-07, |
|
"logits/chosen": -1.672721266746521, |
|
"logits/rejected": -1.4848198890686035, |
|
"logps/chosen": -344.14825439453125, |
|
"logps/rejected": -337.38470458984375, |
|
"loss": 0.0336, |
|
"rewards/accuracies": 0.8062499761581421, |
|
"rewards/chosen": -0.2146792709827423, |
|
"rewards/margins": 0.3094654977321625, |
|
"rewards/rejected": -0.5241447687149048, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.5275229357798165, |
|
"grad_norm": 3.2287234258499633, |
|
"learning_rate": 2.7001422664752333e-07, |
|
"logits/chosen": -1.4607375860214233, |
|
"logits/rejected": -1.3907452821731567, |
|
"logps/chosen": -288.92095947265625, |
|
"logps/rejected": -309.4497985839844, |
|
"loss": 0.0354, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.20977428555488586, |
|
"rewards/margins": 0.22852472960948944, |
|
"rewards/rejected": -0.4382990002632141, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.5504587155963303, |
|
"grad_norm": 3.200697411595364, |
|
"learning_rate": 2.5e-07, |
|
"logits/chosen": -1.6722291707992554, |
|
"logits/rejected": -1.496265172958374, |
|
"logps/chosen": -304.0669250488281, |
|
"logps/rejected": -301.3495788574219, |
|
"loss": 0.0352, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.16535088419914246, |
|
"rewards/margins": 0.23464509844779968, |
|
"rewards/rejected": -0.39999598264694214, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.573394495412844, |
|
"grad_norm": 3.068087094061713, |
|
"learning_rate": 2.2998577335247667e-07, |
|
"logits/chosen": -1.8118059635162354, |
|
"logits/rejected": -1.58005952835083, |
|
"logps/chosen": -316.8459777832031, |
|
"logps/rejected": -301.0585632324219, |
|
"loss": 0.0346, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.1305214762687683, |
|
"rewards/margins": 0.27562573552131653, |
|
"rewards/rejected": -0.40614718198776245, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.573394495412844, |
|
"eval_logits/chosen": -1.719161868095398, |
|
"eval_logits/rejected": -1.4870593547821045, |
|
"eval_logps/chosen": -298.28277587890625, |
|
"eval_logps/rejected": -291.9156494140625, |
|
"eval_loss": 0.033715467900037766, |
|
"eval_rewards/accuracies": 0.7543103694915771, |
|
"eval_rewards/chosen": -0.1319267898797989, |
|
"eval_rewards/margins": 0.3220053017139435, |
|
"eval_rewards/rejected": -0.4539320766925812, |
|
"eval_runtime": 94.8489, |
|
"eval_samples_per_second": 19.167, |
|
"eval_steps_per_second": 0.306, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5963302752293578, |
|
"grad_norm": 2.973513185196363, |
|
"learning_rate": 2.1010002624165524e-07, |
|
"logits/chosen": -1.625998854637146, |
|
"logits/rejected": -1.528102159500122, |
|
"logps/chosen": -289.2758483886719, |
|
"logps/rejected": -324.4759216308594, |
|
"loss": 0.0343, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.18561328947544098, |
|
"rewards/margins": 0.3152090609073639, |
|
"rewards/rejected": -0.5008223652839661, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.6192660550458715, |
|
"grad_norm": 3.3992650920748635, |
|
"learning_rate": 1.9047041344135043e-07, |
|
"logits/chosen": -1.468390703201294, |
|
"logits/rejected": -1.4237253665924072, |
|
"logps/chosen": -287.03558349609375, |
|
"logps/rejected": -301.84613037109375, |
|
"loss": 0.0339, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -0.209732323884964, |
|
"rewards/margins": 0.2902582287788391, |
|
"rewards/rejected": -0.4999905526638031, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.6422018348623854, |
|
"grad_norm": 3.2930402827157863, |
|
"learning_rate": 1.7122294549409482e-07, |
|
"logits/chosen": -1.6038357019424438, |
|
"logits/rejected": -1.4040790796279907, |
|
"logps/chosen": -287.1599426269531, |
|
"logps/rejected": -311.6736755371094, |
|
"loss": 0.0349, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.14161059260368347, |
|
"rewards/margins": 0.3371681869029999, |
|
"rewards/rejected": -0.4787788391113281, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.6651376146788991, |
|
"grad_norm": 3.539437987763887, |
|
"learning_rate": 1.524811797977383e-07, |
|
"logits/chosen": -1.6025069952011108, |
|
"logits/rejected": -1.4434142112731934, |
|
"logps/chosen": -300.49578857421875, |
|
"logps/rejected": -295.3720397949219, |
|
"loss": 0.0334, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.12764978408813477, |
|
"rewards/margins": 0.2869029641151428, |
|
"rewards/rejected": -0.4145527780056, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.6880733944954128, |
|
"grad_norm": 3.11392798700287, |
|
"learning_rate": 1.3436542743979125e-07, |
|
"logits/chosen": -1.6770799160003662, |
|
"logits/rejected": -1.6160328388214111, |
|
"logps/chosen": -319.42236328125, |
|
"logps/rejected": -295.0827331542969, |
|
"loss": 0.036, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.1347302943468094, |
|
"rewards/margins": 0.22749857604503632, |
|
"rewards/rejected": -0.3622289299964905, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6880733944954128, |
|
"eval_logits/chosen": -1.6834876537322998, |
|
"eval_logits/rejected": -1.4841814041137695, |
|
"eval_logps/chosen": -298.45037841796875, |
|
"eval_logps/rejected": -289.4286193847656, |
|
"eval_loss": 0.03310078755021095, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": -0.13360273838043213, |
|
"eval_rewards/margins": 0.29545897245407104, |
|
"eval_rewards/rejected": -0.4290617108345032, |
|
"eval_runtime": 94.9394, |
|
"eval_samples_per_second": 19.149, |
|
"eval_steps_per_second": 0.305, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.7110091743119266, |
|
"grad_norm": 5.19818595642257, |
|
"learning_rate": 1.1699198087116588e-07, |
|
"logits/chosen": -1.741039514541626, |
|
"logits/rejected": -1.6022865772247314, |
|
"logps/chosen": -294.25433349609375, |
|
"logps/rejected": -306.18817138671875, |
|
"loss": 0.0357, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.16289182007312775, |
|
"rewards/margins": 0.23449257016181946, |
|
"rewards/rejected": -0.397384375333786, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.7339449541284404, |
|
"grad_norm": 3.4771448226315957, |
|
"learning_rate": 1.00472367377196e-07, |
|
"logits/chosen": -1.6833099126815796, |
|
"logits/rejected": -1.5138328075408936, |
|
"logps/chosen": -290.2877197265625, |
|
"logps/rejected": -281.9583435058594, |
|
"loss": 0.0353, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.16055694222450256, |
|
"rewards/margins": 0.3141244947910309, |
|
"rewards/rejected": -0.47468137741088867, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.7568807339449541, |
|
"grad_norm": 3.5214688030455066, |
|
"learning_rate": 8.49126331382102e-08, |
|
"logits/chosen": -1.6057231426239014, |
|
"logits/rejected": -1.4786289930343628, |
|
"logps/chosen": -291.7789306640625, |
|
"logps/rejected": -288.677001953125, |
|
"loss": 0.0348, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.17256753146648407, |
|
"rewards/margins": 0.24320188164710999, |
|
"rewards/rejected": -0.41576939821243286, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.7798165137614679, |
|
"grad_norm": 3.5006792226854495, |
|
"learning_rate": 7.041266247556812e-08, |
|
"logits/chosen": -1.7440093755722046, |
|
"logits/rejected": -1.6453059911727905, |
|
"logps/chosen": -304.0636901855469, |
|
"logps/rejected": -299.057861328125, |
|
"loss": 0.0337, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": -0.16071084141731262, |
|
"rewards/margins": 0.23002786934375763, |
|
"rewards/rejected": -0.39073869585990906, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.8027522935779816, |
|
"grad_norm": 3.5127255839066094, |
|
"learning_rate": 5.706553665319955e-08, |
|
"logits/chosen": -1.7355537414550781, |
|
"logits/rejected": -1.5236848592758179, |
|
"logps/chosen": -292.27850341796875, |
|
"logps/rejected": -280.9353332519531, |
|
"loss": 0.0359, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.17787429690361023, |
|
"rewards/margins": 0.25486475229263306, |
|
"rewards/rejected": -0.4327389597892761, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.8027522935779816, |
|
"eval_logits/chosen": -1.6786376237869263, |
|
"eval_logits/rejected": -1.465800166130066, |
|
"eval_logps/chosen": -298.86663818359375, |
|
"eval_logps/rejected": -291.2398681640625, |
|
"eval_loss": 0.0327293798327446, |
|
"eval_rewards/accuracies": 0.7586206793785095, |
|
"eval_rewards/chosen": -0.1377655267715454, |
|
"eval_rewards/margins": 0.30940860509872437, |
|
"eval_rewards/rejected": -0.44717416167259216, |
|
"eval_runtime": 93.7918, |
|
"eval_samples_per_second": 19.383, |
|
"eval_steps_per_second": 0.309, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.8256880733944955, |
|
"grad_norm": 2.7935239013392237, |
|
"learning_rate": 4.4956936350761005e-08, |
|
"logits/chosen": -1.6336042881011963, |
|
"logits/rejected": -1.5328700542449951, |
|
"logps/chosen": -265.51251220703125, |
|
"logps/rejected": -297.154541015625, |
|
"loss": 0.0328, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.1319248378276825, |
|
"rewards/margins": 0.2542495131492615, |
|
"rewards/rejected": -0.38617435097694397, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.8486238532110092, |
|
"grad_norm": 3.203845878056458, |
|
"learning_rate": 3.416459164418123e-08, |
|
"logits/chosen": -1.746138572692871, |
|
"logits/rejected": -1.5625007152557373, |
|
"logps/chosen": -313.5556640625, |
|
"logps/rejected": -302.9591369628906, |
|
"loss": 0.0326, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.13971158862113953, |
|
"rewards/margins": 0.2641337513923645, |
|
"rewards/rejected": -0.4038453996181488, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.8715596330275229, |
|
"grad_norm": 3.9723478814606876, |
|
"learning_rate": 2.475778302439524e-08, |
|
"logits/chosen": -1.700884461402893, |
|
"logits/rejected": -1.4876903295516968, |
|
"logps/chosen": -308.3489074707031, |
|
"logps/rejected": -299.65850830078125, |
|
"loss": 0.0342, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": -0.11970899254083633, |
|
"rewards/margins": 0.3003136217594147, |
|
"rewards/rejected": -0.4200226366519928, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.8944954128440367, |
|
"grad_norm": 3.433718178917216, |
|
"learning_rate": 1.6796896657433805e-08, |
|
"logits/chosen": -1.6044851541519165, |
|
"logits/rejected": -1.4463411569595337, |
|
"logps/chosen": -265.7919006347656, |
|
"logps/rejected": -267.8514099121094, |
|
"loss": 0.0356, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.13834409415721893, |
|
"rewards/margins": 0.2471076250076294, |
|
"rewards/rejected": -0.3854517340660095, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.9174311926605505, |
|
"grad_norm": 3.420165449234039, |
|
"learning_rate": 1.0333036740834855e-08, |
|
"logits/chosen": -1.5561296939849854, |
|
"logits/rejected": -1.4712246656417847, |
|
"logps/chosen": -236.0593719482422, |
|
"logps/rejected": -266.1527404785156, |
|
"loss": 0.0351, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.11499743163585663, |
|
"rewards/margins": 0.2543741464614868, |
|
"rewards/rejected": -0.36937159299850464, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.9174311926605505, |
|
"eval_logits/chosen": -1.6862857341766357, |
|
"eval_logits/rejected": -1.476194977760315, |
|
"eval_logps/chosen": -297.669189453125, |
|
"eval_logps/rejected": -289.0508728027344, |
|
"eval_loss": 0.0327373668551445, |
|
"eval_rewards/accuracies": 0.7543103694915771, |
|
"eval_rewards/chosen": -0.12579074501991272, |
|
"eval_rewards/margins": 0.2994934022426605, |
|
"eval_rewards/rejected": -0.42528414726257324, |
|
"eval_runtime": 95.429, |
|
"eval_samples_per_second": 19.051, |
|
"eval_steps_per_second": 0.304, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.9403669724770642, |
|
"grad_norm": 3.5258604798563833, |
|
"learning_rate": 5.4076974448211685e-09, |
|
"logits/chosen": -1.5972687005996704, |
|
"logits/rejected": -1.4251827001571655, |
|
"logps/chosen": -284.83367919921875, |
|
"logps/rejected": -281.9726257324219, |
|
"loss": 0.0339, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.13881805539131165, |
|
"rewards/margins": 0.30044105648994446, |
|
"rewards/rejected": -0.4392591118812561, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.963302752293578, |
|
"grad_norm": 5.008109192532265, |
|
"learning_rate": 2.052496544188487e-09, |
|
"logits/chosen": -1.5873286724090576, |
|
"logits/rejected": -1.4294344186782837, |
|
"logps/chosen": -275.04388427734375, |
|
"logps/rejected": -290.7890319824219, |
|
"loss": 0.0341, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.1560489982366562, |
|
"rewards/margins": 0.293067991733551, |
|
"rewards/rejected": -0.4491170048713684, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.9862385321100917, |
|
"grad_norm": 3.385882880558537, |
|
"learning_rate": 2.889724508297886e-10, |
|
"logits/chosen": -1.6612422466278076, |
|
"logits/rejected": -1.3801202774047852, |
|
"logps/chosen": -316.4678039550781, |
|
"logps/rejected": -282.8833923339844, |
|
"loss": 0.0343, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.13292355835437775, |
|
"rewards/margins": 0.25554487109184265, |
|
"rewards/rejected": -0.3884683847427368, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 436, |
|
"total_flos": 0.0, |
|
"train_loss": 0.0384325077500912, |
|
"train_runtime": 11659.9634, |
|
"train_samples_per_second": 4.782, |
|
"train_steps_per_second": 0.037 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 436, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|