|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 50, |
|
"global_step": 321, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04672897196261682, |
|
"grad_norm": 55.02521133123827, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": -2.7216238975524902, |
|
"logits/rejected": -2.7209055423736572, |
|
"logps/chosen": -268.4510192871094, |
|
"logps/rejected": -203.9590606689453, |
|
"loss": 0.6914, |
|
"rewards/accuracies": 0.33125001192092896, |
|
"rewards/chosen": 0.017813727259635925, |
|
"rewards/margins": 0.009322145953774452, |
|
"rewards/rejected": 0.008491581305861473, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.09345794392523364, |
|
"grad_norm": 50.12554517439661, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -2.660832166671753, |
|
"logits/rejected": -2.6669700145721436, |
|
"logps/chosen": -256.93609619140625, |
|
"logps/rejected": -211.64938354492188, |
|
"loss": 0.6467, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.4671781659126282, |
|
"rewards/margins": 0.18480566143989563, |
|
"rewards/rejected": 0.28237253427505493, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14018691588785046, |
|
"grad_norm": 48.457281147169425, |
|
"learning_rate": 9.993623730611148e-07, |
|
"logits/chosen": -2.494657278060913, |
|
"logits/rejected": -2.5133018493652344, |
|
"logps/chosen": -232.6891632080078, |
|
"logps/rejected": -212.8677215576172, |
|
"loss": 0.6204, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 1.2435152530670166, |
|
"rewards/margins": 0.6126660108566284, |
|
"rewards/rejected": 0.6308490037918091, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.18691588785046728, |
|
"grad_norm": 42.23668651632898, |
|
"learning_rate": 9.97451118516912e-07, |
|
"logits/chosen": -2.3121209144592285, |
|
"logits/rejected": -2.302377462387085, |
|
"logps/chosen": -234.3399658203125, |
|
"logps/rejected": -191.3181610107422, |
|
"loss": 0.6223, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 1.3283790349960327, |
|
"rewards/margins": 0.9071598052978516, |
|
"rewards/rejected": 0.4212193489074707, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2336448598130841, |
|
"grad_norm": 49.68505743626908, |
|
"learning_rate": 9.94271111036929e-07, |
|
"logits/chosen": -2.2619333267211914, |
|
"logits/rejected": -2.2323482036590576, |
|
"logps/chosen": -230.17385864257812, |
|
"logps/rejected": -205.64108276367188, |
|
"loss": 0.616, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 1.3353136777877808, |
|
"rewards/margins": 1.208017110824585, |
|
"rewards/rejected": 0.127296581864357, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2803738317757009, |
|
"grad_norm": 39.50573463077347, |
|
"learning_rate": 9.898304612549066e-07, |
|
"logits/chosen": -2.308243989944458, |
|
"logits/rejected": -2.2968266010284424, |
|
"logps/chosen": -241.26632690429688, |
|
"logps/rejected": -207.3319854736328, |
|
"loss": 0.5998, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": 0.9092999696731567, |
|
"rewards/margins": 0.9556086659431458, |
|
"rewards/rejected": -0.046308644115924835, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.32710280373831774, |
|
"grad_norm": 42.78736900246308, |
|
"learning_rate": 9.841404950825536e-07, |
|
"logits/chosen": -2.3728604316711426, |
|
"logits/rejected": -2.3580102920532227, |
|
"logps/chosen": -256.8548583984375, |
|
"logps/rejected": -205.154052734375, |
|
"loss": 0.5996, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": 1.0152027606964111, |
|
"rewards/margins": 1.2137099504470825, |
|
"rewards/rejected": -0.1985071450471878, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.37383177570093457, |
|
"grad_norm": 41.73908597429494, |
|
"learning_rate": 9.77215724822721e-07, |
|
"logits/chosen": -2.4492850303649902, |
|
"logits/rejected": -2.4539356231689453, |
|
"logps/chosen": -243.1707763671875, |
|
"logps/rejected": -213.95166015625, |
|
"loss": 0.6098, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 1.0353302955627441, |
|
"rewards/margins": 1.2659428119659424, |
|
"rewards/rejected": -0.2306123673915863, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.4205607476635514, |
|
"grad_norm": 41.530064757148224, |
|
"learning_rate": 9.69073812155662e-07, |
|
"logits/chosen": -2.5637125968933105, |
|
"logits/rejected": -2.5535428524017334, |
|
"logps/chosen": -244.7168731689453, |
|
"logps/rejected": -205.80923461914062, |
|
"loss": 0.5974, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 0.8133939504623413, |
|
"rewards/margins": 0.9837163686752319, |
|
"rewards/rejected": -0.17032238841056824, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.4672897196261682, |
|
"grad_norm": 38.26706141308248, |
|
"learning_rate": 9.597355230927788e-07, |
|
"logits/chosen": -2.5823917388916016, |
|
"logits/rejected": -2.562842607498169, |
|
"logps/chosen": -240.04067993164062, |
|
"logps/rejected": -209.23428344726562, |
|
"loss": 0.572, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.9298027753829956, |
|
"rewards/margins": 1.2456680536270142, |
|
"rewards/rejected": -0.3158652186393738, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4672897196261682, |
|
"eval_logits/chosen": -2.530949115753174, |
|
"eval_logits/rejected": -2.529101610183716, |
|
"eval_logps/chosen": -245.5291748046875, |
|
"eval_logps/rejected": -217.46429443359375, |
|
"eval_loss": 0.5720326900482178, |
|
"eval_rewards/accuracies": 0.7578125, |
|
"eval_rewards/chosen": 1.0708366632461548, |
|
"eval_rewards/margins": 1.28933846950531, |
|
"eval_rewards/rejected": -0.2185017466545105, |
|
"eval_runtime": 202.2601, |
|
"eval_samples_per_second": 15.03, |
|
"eval_steps_per_second": 0.237, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.514018691588785, |
|
"grad_norm": 40.54073508413725, |
|
"learning_rate": 9.4922467501275e-07, |
|
"logits/chosen": -2.495945930480957, |
|
"logits/rejected": -2.487422466278076, |
|
"logps/chosen": -250.51620483398438, |
|
"logps/rejected": -228.90200805664062, |
|
"loss": 0.5176, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 1.0155770778656006, |
|
"rewards/margins": 1.9236654043197632, |
|
"rewards/rejected": -0.9080885648727417, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5607476635514018, |
|
"grad_norm": 38.23797310786567, |
|
"learning_rate": 9.375680759151206e-07, |
|
"logits/chosen": -2.474236249923706, |
|
"logits/rejected": -2.4737977981567383, |
|
"logps/chosen": -255.09298706054688, |
|
"logps/rejected": -200.73593139648438, |
|
"loss": 0.5654, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": 1.0740002393722534, |
|
"rewards/margins": 1.5434155464172363, |
|
"rewards/rejected": -0.4694152772426605, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.6074766355140186, |
|
"grad_norm": 42.648181943788025, |
|
"learning_rate": 9.247954560462927e-07, |
|
"logits/chosen": -2.505916118621826, |
|
"logits/rejected": -2.506608724594116, |
|
"logps/chosen": -255.432861328125, |
|
"logps/rejected": -205.4224090576172, |
|
"loss": 0.5628, |
|
"rewards/accuracies": 0.8062499761581421, |
|
"rewards/chosen": 1.1411590576171875, |
|
"rewards/margins": 1.7762504816055298, |
|
"rewards/rejected": -0.6350914239883423, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.6542056074766355, |
|
"grad_norm": 38.81572593341751, |
|
"learning_rate": 9.109393920723001e-07, |
|
"logits/chosen": -2.4328043460845947, |
|
"logits/rejected": -2.4342734813690186, |
|
"logps/chosen": -233.8389129638672, |
|
"logps/rejected": -212.91085815429688, |
|
"loss": 0.5378, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.6836588978767395, |
|
"rewards/margins": 1.282029390335083, |
|
"rewards/rejected": -0.5983705520629883, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.7009345794392523, |
|
"grad_norm": 32.317612654080975, |
|
"learning_rate": 8.960352239917699e-07, |
|
"logits/chosen": -2.450084924697876, |
|
"logits/rejected": -2.401425361633301, |
|
"logps/chosen": -240.6315460205078, |
|
"logps/rejected": -227.21084594726562, |
|
"loss": 0.5154, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": 0.6731350421905518, |
|
"rewards/margins": 1.5562646389007568, |
|
"rewards/rejected": -0.8831297755241394, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.7476635514018691, |
|
"grad_norm": 30.17721204804764, |
|
"learning_rate": 8.801209650009814e-07, |
|
"logits/chosen": -2.4172046184539795, |
|
"logits/rejected": -2.400567054748535, |
|
"logps/chosen": -245.4665985107422, |
|
"logps/rejected": -214.18515014648438, |
|
"loss": 0.5206, |
|
"rewards/accuracies": 0.8187500238418579, |
|
"rewards/chosen": 0.9583255052566528, |
|
"rewards/margins": 1.7562158107757568, |
|
"rewards/rejected": -0.7978904843330383, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.794392523364486, |
|
"grad_norm": 32.24535114623233, |
|
"learning_rate": 8.632372045409141e-07, |
|
"logits/chosen": -2.320589065551758, |
|
"logits/rejected": -2.3311946392059326, |
|
"logps/chosen": -245.598388671875, |
|
"logps/rejected": -234.7646026611328, |
|
"loss": 0.5472, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": 1.1685658693313599, |
|
"rewards/margins": 1.754003882408142, |
|
"rewards/rejected": -0.5854381322860718, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.8411214953271028, |
|
"grad_norm": 34.061291659967246, |
|
"learning_rate": 8.454270047735642e-07, |
|
"logits/chosen": -2.329784870147705, |
|
"logits/rejected": -2.304997682571411, |
|
"logps/chosen": -238.0483856201172, |
|
"logps/rejected": -195.24313354492188, |
|
"loss": 0.5291, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.6335947513580322, |
|
"rewards/margins": 1.5654070377349854, |
|
"rewards/rejected": -0.9318124055862427, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.8878504672897196, |
|
"grad_norm": 31.735542564732725, |
|
"learning_rate": 8.267357907515661e-07, |
|
"logits/chosen": -2.298316478729248, |
|
"logits/rejected": -2.2975010871887207, |
|
"logps/chosen": -246.3526153564453, |
|
"logps/rejected": -206.03524780273438, |
|
"loss": 0.5418, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": 0.5418449640274048, |
|
"rewards/margins": 1.539952039718628, |
|
"rewards/rejected": -0.9981070756912231, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.9345794392523364, |
|
"grad_norm": 32.65915062987667, |
|
"learning_rate": 8.072112345612433e-07, |
|
"logits/chosen": -2.2663910388946533, |
|
"logits/rejected": -2.218681812286377, |
|
"logps/chosen": -246.5704803466797, |
|
"logps/rejected": -218.6560821533203, |
|
"loss": 0.4997, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.8927062153816223, |
|
"rewards/margins": 2.236736297607422, |
|
"rewards/rejected": -1.3440301418304443, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9345794392523364, |
|
"eval_logits/chosen": -2.2007782459259033, |
|
"eval_logits/rejected": -2.177567720413208, |
|
"eval_logps/chosen": -247.5850067138672, |
|
"eval_logps/rejected": -224.4142608642578, |
|
"eval_loss": 0.5101521015167236, |
|
"eval_rewards/accuracies": 0.7864583134651184, |
|
"eval_rewards/chosen": 0.8652558326721191, |
|
"eval_rewards/margins": 1.7787574529647827, |
|
"eval_rewards/rejected": -0.9135015606880188, |
|
"eval_runtime": 201.8599, |
|
"eval_samples_per_second": 15.06, |
|
"eval_steps_per_second": 0.238, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9813084112149533, |
|
"grad_norm": 28.6005139133492, |
|
"learning_rate": 7.869031337345827e-07, |
|
"logits/chosen": -2.1810142993927, |
|
"logits/rejected": -2.1466403007507324, |
|
"logps/chosen": -273.2081298828125, |
|
"logps/rejected": -224.4601593017578, |
|
"loss": 0.474, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.8940876126289368, |
|
"rewards/margins": 2.1252331733703613, |
|
"rewards/rejected": -1.2311456203460693, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.02803738317757, |
|
"grad_norm": 17.790000678929353, |
|
"learning_rate": 7.658632842402432e-07, |
|
"logits/chosen": -2.1617965698242188, |
|
"logits/rejected": -2.118025064468384, |
|
"logps/chosen": -248.0548858642578, |
|
"logps/rejected": -209.18603515625, |
|
"loss": 0.3432, |
|
"rewards/accuracies": 0.8687499761581421, |
|
"rewards/chosen": 0.46495524048805237, |
|
"rewards/margins": 2.547100067138672, |
|
"rewards/rejected": -2.0821449756622314, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.074766355140187, |
|
"grad_norm": 18.641161706325903, |
|
"learning_rate": 7.441453483775353e-07, |
|
"logits/chosen": -2.2370879650115967, |
|
"logits/rejected": -2.1639482975006104, |
|
"logps/chosen": -252.3055419921875, |
|
"logps/rejected": -231.727294921875, |
|
"loss": 0.2131, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 1.1295344829559326, |
|
"rewards/margins": 2.7920079231262207, |
|
"rewards/rejected": -1.6624739170074463, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.1214953271028036, |
|
"grad_norm": 22.27642795647513, |
|
"learning_rate": 7.218047179103112e-07, |
|
"logits/chosen": -2.2182936668395996, |
|
"logits/rejected": -2.2140285968780518, |
|
"logps/chosen": -248.0435028076172, |
|
"logps/rejected": -233.4251251220703, |
|
"loss": 0.2493, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 1.1873703002929688, |
|
"rewards/margins": 3.075556755065918, |
|
"rewards/rejected": -1.8881866931915283, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.1682242990654206, |
|
"grad_norm": 24.129663206875104, |
|
"learning_rate": 6.988983727898413e-07, |
|
"logits/chosen": -2.2516915798187256, |
|
"logits/rejected": -2.2213852405548096, |
|
"logps/chosen": -239.94125366210938, |
|
"logps/rejected": -213.9071807861328, |
|
"loss": 0.2434, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 1.149621844291687, |
|
"rewards/margins": 2.956752300262451, |
|
"rewards/rejected": -1.8071304559707642, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.2149532710280373, |
|
"grad_norm": 19.098258762584823, |
|
"learning_rate": 6.754847358270066e-07, |
|
"logits/chosen": -2.268832206726074, |
|
"logits/rejected": -2.2357370853424072, |
|
"logps/chosen": -252.8026885986328, |
|
"logps/rejected": -218.2414093017578, |
|
"loss": 0.2656, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 1.428450345993042, |
|
"rewards/margins": 3.458483934402466, |
|
"rewards/rejected": -2.030033826828003, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.2616822429906542, |
|
"grad_norm": 16.69096458544893, |
|
"learning_rate": 6.516235236844661e-07, |
|
"logits/chosen": -2.2503199577331543, |
|
"logits/rejected": -2.223175048828125, |
|
"logps/chosen": -251.1974639892578, |
|
"logps/rejected": -224.8771209716797, |
|
"loss": 0.229, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 1.4385788440704346, |
|
"rewards/margins": 3.5852439403533936, |
|
"rewards/rejected": -2.146665573120117, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.308411214953271, |
|
"grad_norm": 24.370271103680654, |
|
"learning_rate": 6.273755945688457e-07, |
|
"logits/chosen": -2.30786395072937, |
|
"logits/rejected": -2.2820160388946533, |
|
"logps/chosen": -247.8080596923828, |
|
"logps/rejected": -242.51596069335938, |
|
"loss": 0.2745, |
|
"rewards/accuracies": 0.893750011920929, |
|
"rewards/chosen": 1.6472270488739014, |
|
"rewards/margins": 3.4453282356262207, |
|
"rewards/rejected": -1.7981010675430298, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.355140186915888, |
|
"grad_norm": 21.195332919733744, |
|
"learning_rate": 6.02802793011411e-07, |
|
"logits/chosen": -2.304081916809082, |
|
"logits/rejected": -2.2989423274993896, |
|
"logps/chosen": -221.5400848388672, |
|
"logps/rejected": -236.2010955810547, |
|
"loss": 0.2682, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 1.4084885120391846, |
|
"rewards/margins": 3.7067673206329346, |
|
"rewards/rejected": -2.29827880859375, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.4018691588785046, |
|
"grad_norm": 22.224652369004666, |
|
"learning_rate": 5.779677921331093e-07, |
|
"logits/chosen": -2.2803831100463867, |
|
"logits/rejected": -2.2711875438690186, |
|
"logps/chosen": -247.6720733642578, |
|
"logps/rejected": -217.12509155273438, |
|
"loss": 0.2873, |
|
"rewards/accuracies": 0.893750011920929, |
|
"rewards/chosen": 1.7340141534805298, |
|
"rewards/margins": 3.378777265548706, |
|
"rewards/rejected": -1.6447633504867554, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.4018691588785046, |
|
"eval_logits/chosen": -2.275045394897461, |
|
"eval_logits/rejected": -2.263206720352173, |
|
"eval_logps/chosen": -245.67860412597656, |
|
"eval_logps/rejected": -227.55787658691406, |
|
"eval_loss": 0.5675327181816101, |
|
"eval_rewards/accuracies": 0.7890625, |
|
"eval_rewards/chosen": 1.055895209312439, |
|
"eval_rewards/margins": 2.2837564945220947, |
|
"eval_rewards/rejected": -1.2278612852096558, |
|
"eval_runtime": 202.0364, |
|
"eval_samples_per_second": 15.047, |
|
"eval_steps_per_second": 0.238, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.4485981308411215, |
|
"grad_norm": 24.024418676682174, |
|
"learning_rate": 5.529339337962897e-07, |
|
"logits/chosen": -2.26741361618042, |
|
"logits/rejected": -2.2619667053222656, |
|
"logps/chosen": -228.74258422851562, |
|
"logps/rejected": -199.869873046875, |
|
"loss": 0.3185, |
|
"rewards/accuracies": 0.9312499761581421, |
|
"rewards/chosen": 1.8829383850097656, |
|
"rewards/margins": 3.547306776046753, |
|
"rewards/rejected": -1.6643686294555664, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.4953271028037383, |
|
"grad_norm": 22.765211926437665, |
|
"learning_rate": 5.277650670507915e-07, |
|
"logits/chosen": -2.2662367820739746, |
|
"logits/rejected": -2.241522789001465, |
|
"logps/chosen": -241.50253295898438, |
|
"logps/rejected": -211.10791015625, |
|
"loss": 0.2582, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 1.677080512046814, |
|
"rewards/margins": 3.3089568614959717, |
|
"rewards/rejected": -1.6318763494491577, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.542056074766355, |
|
"grad_norm": 20.952241807232628, |
|
"learning_rate": 5.025253852864471e-07, |
|
"logits/chosen": -2.2016148567199707, |
|
"logits/rejected": -2.2075283527374268, |
|
"logps/chosen": -247.7741241455078, |
|
"logps/rejected": -224.11892700195312, |
|
"loss": 0.2816, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 1.5324174165725708, |
|
"rewards/margins": 3.230978488922119, |
|
"rewards/rejected": -1.6985607147216797, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.588785046728972, |
|
"grad_norm": 23.145444455236966, |
|
"learning_rate": 4.77279262507344e-07, |
|
"logits/chosen": -2.2021024227142334, |
|
"logits/rejected": -2.1827890872955322, |
|
"logps/chosen": -243.9816131591797, |
|
"logps/rejected": -249.20703125, |
|
"loss": 0.2787, |
|
"rewards/accuracies": 0.918749988079071, |
|
"rewards/chosen": 1.669757604598999, |
|
"rewards/margins": 3.6465446949005127, |
|
"rewards/rejected": -1.9767868518829346, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.6355140186915889, |
|
"grad_norm": 26.563830659774606, |
|
"learning_rate": 4.5209108914542714e-07, |
|
"logits/chosen": -2.1774230003356934, |
|
"logits/rejected": -2.1725821495056152, |
|
"logps/chosen": -224.1102294921875, |
|
"logps/rejected": -234.7071075439453, |
|
"loss": 0.3123, |
|
"rewards/accuracies": 0.8812500238418579, |
|
"rewards/chosen": 1.2634233236312866, |
|
"rewards/margins": 3.5007872581481934, |
|
"rewards/rejected": -2.2373640537261963, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.6822429906542056, |
|
"grad_norm": 25.254447136991615, |
|
"learning_rate": 4.2702510783220475e-07, |
|
"logits/chosen": -2.168032169342041, |
|
"logits/rejected": -2.1451544761657715, |
|
"logps/chosen": -228.2743682861328, |
|
"logps/rejected": -211.07705688476562, |
|
"loss": 0.2948, |
|
"rewards/accuracies": 0.918749988079071, |
|
"rewards/chosen": 1.1032750606536865, |
|
"rewards/margins": 3.2025279998779297, |
|
"rewards/rejected": -2.0992531776428223, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.7289719626168223, |
|
"grad_norm": 20.89817797522474, |
|
"learning_rate": 4.0214524954741586e-07, |
|
"logits/chosen": -2.1714885234832764, |
|
"logits/rejected": -2.148820400238037, |
|
"logps/chosen": -250.5221405029297, |
|
"logps/rejected": -223.18399047851562, |
|
"loss": 0.2802, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 1.3431367874145508, |
|
"rewards/margins": 3.7224392890930176, |
|
"rewards/rejected": -2.3793022632598877, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.7757009345794392, |
|
"grad_norm": 17.107545008827852, |
|
"learning_rate": 3.7751497056257305e-07, |
|
"logits/chosen": -2.1603405475616455, |
|
"logits/rejected": -2.145948648452759, |
|
"logps/chosen": -234.90872192382812, |
|
"logps/rejected": -240.06298828125, |
|
"loss": 0.269, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 1.084341049194336, |
|
"rewards/margins": 3.5826897621154785, |
|
"rewards/rejected": -2.4983482360839844, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.8224299065420562, |
|
"grad_norm": 27.137427341683352, |
|
"learning_rate": 3.531970905952478e-07, |
|
"logits/chosen": -2.1491293907165527, |
|
"logits/rejected": -2.1209685802459717, |
|
"logps/chosen": -221.9265899658203, |
|
"logps/rejected": -215.990478515625, |
|
"loss": 0.2937, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.9956735372543335, |
|
"rewards/margins": 3.271005630493164, |
|
"rewards/rejected": -2.275331974029541, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.8691588785046729, |
|
"grad_norm": 23.508295318902285, |
|
"learning_rate": 3.2925363258689553e-07, |
|
"logits/chosen": -2.161498785018921, |
|
"logits/rejected": -2.1209306716918945, |
|
"logps/chosen": -248.2667999267578, |
|
"logps/rejected": -227.0295867919922, |
|
"loss": 0.2853, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 1.2955918312072754, |
|
"rewards/margins": 3.347618818283081, |
|
"rewards/rejected": -2.0520269870758057, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.8691588785046729, |
|
"eval_logits/chosen": -2.147773504257202, |
|
"eval_logits/rejected": -2.1250855922698975, |
|
"eval_logps/chosen": -249.0491485595703, |
|
"eval_logps/rejected": -232.39312744140625, |
|
"eval_loss": 0.5163093209266663, |
|
"eval_rewards/accuracies": 0.8203125, |
|
"eval_rewards/chosen": 0.718841552734375, |
|
"eval_rewards/margins": 2.4302282333374023, |
|
"eval_rewards/rejected": -1.711386799812317, |
|
"eval_runtime": 201.455, |
|
"eval_samples_per_second": 15.09, |
|
"eval_steps_per_second": 0.238, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.9158878504672896, |
|
"grad_norm": 23.066010809349862, |
|
"learning_rate": 3.0574566451286086e-07, |
|
"logits/chosen": -2.1637778282165527, |
|
"logits/rejected": -2.132652997970581, |
|
"logps/chosen": -242.0740203857422, |
|
"logps/rejected": -231.40139770507812, |
|
"loss": 0.3089, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 1.1851091384887695, |
|
"rewards/margins": 3.5020480155944824, |
|
"rewards/rejected": -2.316938877105713, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.9626168224299065, |
|
"grad_norm": 21.925966755838537, |
|
"learning_rate": 2.8273314362803333e-07, |
|
"logits/chosen": -2.143448829650879, |
|
"logits/rejected": -2.142066240310669, |
|
"logps/chosen": -250.38720703125, |
|
"logps/rejected": -226.64315795898438, |
|
"loss": 0.2716, |
|
"rewards/accuracies": 0.918749988079071, |
|
"rewards/chosen": 1.1868922710418701, |
|
"rewards/margins": 3.174391269683838, |
|
"rewards/rejected": -1.9874988794326782, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.0093457943925235, |
|
"grad_norm": 13.114607410778014, |
|
"learning_rate": 2.602747635454047e-07, |
|
"logits/chosen": -2.18164324760437, |
|
"logits/rejected": -2.160330295562744, |
|
"logps/chosen": -235.2315216064453, |
|
"logps/rejected": -223.3993682861328, |
|
"loss": 0.2259, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 1.4251452684402466, |
|
"rewards/margins": 3.5938689708709717, |
|
"rewards/rejected": -2.1687240600585938, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 2.05607476635514, |
|
"grad_norm": 16.881870959615693, |
|
"learning_rate": 2.384278045375523e-07, |
|
"logits/chosen": -2.2383639812469482, |
|
"logits/rejected": -2.227437973022461, |
|
"logps/chosen": -235.4667205810547, |
|
"logps/rejected": -229.47811889648438, |
|
"loss": 0.1641, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 1.6337192058563232, |
|
"rewards/margins": 3.944089412689209, |
|
"rewards/rejected": -2.310370683670044, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.102803738317757, |
|
"grad_norm": 17.530383979048825, |
|
"learning_rate": 2.1724798744286071e-07, |
|
"logits/chosen": -2.266674518585205, |
|
"logits/rejected": -2.2329540252685547, |
|
"logps/chosen": -245.1803436279297, |
|
"logps/rejected": -238.8377685546875, |
|
"loss": 0.1503, |
|
"rewards/accuracies": 0.9437500238418579, |
|
"rewards/chosen": 1.8679962158203125, |
|
"rewards/margins": 4.181756496429443, |
|
"rewards/rejected": -2.3137600421905518, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 2.149532710280374, |
|
"grad_norm": 19.41034220051436, |
|
"learning_rate": 1.9678933154909095e-07, |
|
"logits/chosen": -2.2616019248962402, |
|
"logits/rejected": -2.260685443878174, |
|
"logps/chosen": -251.1139373779297, |
|
"logps/rejected": -249.1596221923828, |
|
"loss": 0.1462, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 2.074453115463257, |
|
"rewards/margins": 4.364731788635254, |
|
"rewards/rejected": -2.290278911590576, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.196261682242991, |
|
"grad_norm": 15.416496217582845, |
|
"learning_rate": 1.77104016816768e-07, |
|
"logits/chosen": -2.259556293487549, |
|
"logits/rejected": -2.2295475006103516, |
|
"logps/chosen": -243.61294555664062, |
|
"logps/rejected": -226.0421600341797, |
|
"loss": 0.155, |
|
"rewards/accuracies": 0.918749988079071, |
|
"rewards/chosen": 1.5110995769500732, |
|
"rewards/margins": 4.157734394073486, |
|
"rewards/rejected": -2.646634578704834, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 2.2429906542056073, |
|
"grad_norm": 21.341300239829316, |
|
"learning_rate": 1.5824225079378684e-07, |
|
"logits/chosen": -2.2581722736358643, |
|
"logits/rejected": -2.2538020610809326, |
|
"logps/chosen": -234.7251434326172, |
|
"logps/rejected": -247.6671600341797, |
|
"loss": 0.1613, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 1.555023193359375, |
|
"rewards/margins": 4.231776237487793, |
|
"rewards/rejected": -2.676752805709839, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.289719626168224, |
|
"grad_norm": 19.28302760698744, |
|
"learning_rate": 1.4025214056067237e-07, |
|
"logits/chosen": -2.26164174079895, |
|
"logits/rejected": -2.2327027320861816, |
|
"logps/chosen": -230.2368621826172, |
|
"logps/rejected": -244.13900756835938, |
|
"loss": 0.159, |
|
"rewards/accuracies": 0.9437500238418579, |
|
"rewards/chosen": 1.3178222179412842, |
|
"rewards/margins": 4.314841270446777, |
|
"rewards/rejected": -2.9970195293426514, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 2.336448598130841, |
|
"grad_norm": 20.865529370197397, |
|
"learning_rate": 1.2317957003309725e-07, |
|
"logits/chosen": -2.2660677433013916, |
|
"logits/rejected": -2.2249627113342285, |
|
"logps/chosen": -249.3228759765625, |
|
"logps/rejected": -236.3594512939453, |
|
"loss": 0.1541, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": 1.5125614404678345, |
|
"rewards/margins": 4.342096328735352, |
|
"rewards/rejected": -2.8295350074768066, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.336448598130841, |
|
"eval_logits/chosen": -2.235158920288086, |
|
"eval_logits/rejected": -2.2153029441833496, |
|
"eval_logps/chosen": -250.26039123535156, |
|
"eval_logps/rejected": -236.7135467529297, |
|
"eval_loss": 0.5270811319351196, |
|
"eval_rewards/accuracies": 0.8177083134651184, |
|
"eval_rewards/chosen": 0.5977155566215515, |
|
"eval_rewards/margins": 2.7411410808563232, |
|
"eval_rewards/rejected": -2.143425226211548, |
|
"eval_runtime": 203.3157, |
|
"eval_samples_per_second": 14.952, |
|
"eval_steps_per_second": 0.236, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.383177570093458, |
|
"grad_norm": 12.155538402681515, |
|
"learning_rate": 1.0706808293459873e-07, |
|
"logits/chosen": -2.2205164432525635, |
|
"logits/rejected": -2.209555149078369, |
|
"logps/chosen": -223.6109619140625, |
|
"logps/rejected": -224.11154174804688, |
|
"loss": 0.1437, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 1.015878438949585, |
|
"rewards/margins": 3.95696759223938, |
|
"rewards/rejected": -2.941089153289795, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 2.4299065420560746, |
|
"grad_norm": 13.826515054647423, |
|
"learning_rate": 9.195877173797534e-08, |
|
"logits/chosen": -2.226680040359497, |
|
"logits/rejected": -2.2163913249969482, |
|
"logps/chosen": -226.86441040039062, |
|
"logps/rejected": -242.27633666992188, |
|
"loss": 0.163, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 1.5521247386932373, |
|
"rewards/margins": 4.2657151222229, |
|
"rewards/rejected": -2.7135910987854004, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.4766355140186915, |
|
"grad_norm": 15.75649594362957, |
|
"learning_rate": 7.789017285861438e-08, |
|
"logits/chosen": -2.2129428386688232, |
|
"logits/rejected": -2.1899213790893555, |
|
"logps/chosen": -253.51864624023438, |
|
"logps/rejected": -233.0913848876953, |
|
"loss": 0.1529, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": 1.8306633234024048, |
|
"rewards/margins": 4.6044087409973145, |
|
"rewards/rejected": -2.773745059967041, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.5233644859813085, |
|
"grad_norm": 14.072200590843078, |
|
"learning_rate": 6.489816836706785e-08, |
|
"logits/chosen": -2.1907477378845215, |
|
"logits/rejected": -2.1698105335235596, |
|
"logps/chosen": -234.4886932373047, |
|
"logps/rejected": -205.0101318359375, |
|
"loss": 0.1452, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 1.6402314901351929, |
|
"rewards/margins": 4.284368515014648, |
|
"rewards/rejected": -2.644136905670166, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.5700934579439254, |
|
"grad_norm": 15.994347574292076, |
|
"learning_rate": 5.3015894471550914e-08, |
|
"logits/chosen": -2.1613574028015137, |
|
"logits/rejected": -2.137498378753662, |
|
"logps/chosen": -229.953125, |
|
"logps/rejected": -234.6221923828125, |
|
"loss": 0.1289, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 1.5998098850250244, |
|
"rewards/margins": 4.529351234436035, |
|
"rewards/rejected": -2.9295413494110107, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.616822429906542, |
|
"grad_norm": 15.819926843742866, |
|
"learning_rate": 4.227365700378799e-08, |
|
"logits/chosen": -2.1940014362335205, |
|
"logits/rejected": -2.140695571899414, |
|
"logps/chosen": -248.3275909423828, |
|
"logps/rejected": -266.1819763183594, |
|
"loss": 0.1526, |
|
"rewards/accuracies": 0.9437500238418579, |
|
"rewards/chosen": 1.9989855289459229, |
|
"rewards/margins": 4.9421706199646, |
|
"rewards/rejected": -2.9431850910186768, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.663551401869159, |
|
"grad_norm": 17.059099314353197, |
|
"learning_rate": 3.269885412375223e-08, |
|
"logits/chosen": -2.1719181537628174, |
|
"logits/rejected": -2.1409342288970947, |
|
"logps/chosen": -241.63809204101562, |
|
"logps/rejected": -226.8559112548828, |
|
"loss": 0.151, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 1.2887296676635742, |
|
"rewards/margins": 4.394392490386963, |
|
"rewards/rejected": -3.1056625843048096, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.710280373831776, |
|
"grad_norm": 16.789145370360544, |
|
"learning_rate": 2.4315906440446952e-08, |
|
"logits/chosen": -2.1815662384033203, |
|
"logits/rejected": -2.1283011436462402, |
|
"logps/chosen": -244.0872802734375, |
|
"logps/rejected": -240.93655395507812, |
|
"loss": 0.1788, |
|
"rewards/accuracies": 0.9312499761581421, |
|
"rewards/chosen": 1.2021782398223877, |
|
"rewards/margins": 4.522456169128418, |
|
"rewards/rejected": -3.320277452468872, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.7570093457943923, |
|
"grad_norm": 16.725526736330913, |
|
"learning_rate": 1.7146194726952778e-08, |
|
"logits/chosen": -2.1704020500183105, |
|
"logits/rejected": -2.1348023414611816, |
|
"logps/chosen": -229.75509643554688, |
|
"logps/rejected": -227.176513671875, |
|
"loss": 0.139, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": 1.2168376445770264, |
|
"rewards/margins": 4.267210483551025, |
|
"rewards/rejected": -3.05037260055542, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.803738317757009, |
|
"grad_norm": 15.148406064058884, |
|
"learning_rate": 1.1208005388599951e-08, |
|
"logits/chosen": -2.1668269634246826, |
|
"logits/rejected": -2.1430649757385254, |
|
"logps/chosen": -227.9604034423828, |
|
"logps/rejected": -239.50302124023438, |
|
"loss": 0.1566, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 1.1434178352355957, |
|
"rewards/margins": 4.731930732727051, |
|
"rewards/rejected": -3.588513135910034, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.803738317757009, |
|
"eval_logits/chosen": -2.16743540763855, |
|
"eval_logits/rejected": -2.144243001937866, |
|
"eval_logps/chosen": -250.66944885253906, |
|
"eval_logps/rejected": -238.1006622314453, |
|
"eval_loss": 0.5241575837135315, |
|
"eval_rewards/accuracies": 0.8307291865348816, |
|
"eval_rewards/chosen": 0.5568115711212158, |
|
"eval_rewards/margins": 2.8389499187469482, |
|
"eval_rewards/rejected": -2.2821381092071533, |
|
"eval_runtime": 202.2678, |
|
"eval_samples_per_second": 15.03, |
|
"eval_steps_per_second": 0.237, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.850467289719626, |
|
"grad_norm": 21.997578495348503, |
|
"learning_rate": 6.516483823349794e-09, |
|
"logits/chosen": -2.163015842437744, |
|
"logits/rejected": -2.1393959522247314, |
|
"logps/chosen": -250.9050750732422, |
|
"logps/rejected": -243.2065887451172, |
|
"loss": 0.1765, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 1.1963839530944824, |
|
"rewards/margins": 4.203527450561523, |
|
"rewards/rejected": -3.007143497467041, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 2.897196261682243, |
|
"grad_norm": 18.632181777742066, |
|
"learning_rate": 3.0835957933397773e-09, |
|
"logits/chosen": -2.1690900325775146, |
|
"logits/rejected": -2.139923095703125, |
|
"logps/chosen": -236.43856811523438, |
|
"logps/rejected": -243.2851104736328, |
|
"loss": 0.1542, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": 1.3358101844787598, |
|
"rewards/margins": 4.739504814147949, |
|
"rewards/rejected": -3.4036941528320312, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.94392523364486, |
|
"grad_norm": 20.3481331130334, |
|
"learning_rate": 9.180969061143851e-10, |
|
"logits/chosen": -2.1655070781707764, |
|
"logits/rejected": -2.1447086334228516, |
|
"logps/chosen": -254.8720245361328, |
|
"logps/rejected": -229.35009765625, |
|
"loss": 0.1623, |
|
"rewards/accuracies": 0.918749988079071, |
|
"rewards/chosen": 1.4708306789398193, |
|
"rewards/margins": 4.39527702331543, |
|
"rewards/rejected": -2.9244461059570312, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 2.9906542056074765, |
|
"grad_norm": 17.060050486891257, |
|
"learning_rate": 2.5510283379992504e-11, |
|
"logits/chosen": -2.176898956298828, |
|
"logits/rejected": -2.1477956771850586, |
|
"logps/chosen": -230.62814331054688, |
|
"logps/rejected": -222.4974822998047, |
|
"loss": 0.1441, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": 1.4323546886444092, |
|
"rewards/margins": 4.373940944671631, |
|
"rewards/rejected": -2.9415860176086426, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 321, |
|
"total_flos": 3785055088410624.0, |
|
"train_loss": 0.33236435687059185, |
|
"train_runtime": 12185.597, |
|
"train_samples_per_second": 6.734, |
|
"train_steps_per_second": 0.026 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 321, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3785055088410624.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|