Howard881010's picture
Upload folder using huggingface_hub
ec84c3f verified
raw
history blame
32.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.4444444444444444,
"eval_steps": 60,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008888888888888889,
"grad_norm": 8.760091781616211,
"learning_rate": 4.4247787610619474e-07,
"logits/chosen": -0.8248252868652344,
"logits/rejected": -0.8263720273971558,
"logps/chosen": -0.36086463928222656,
"logps/rejected": -5.696224689483643,
"loss": 1.1038,
"rewards/accuracies": 0.5125000476837158,
"rewards/chosen": 17.43745994567871,
"rewards/margins": 0.5984855890274048,
"rewards/rejected": 16.838973999023438,
"step": 10
},
{
"epoch": 0.017777777777777778,
"grad_norm": 8.855981826782227,
"learning_rate": 8.849557522123895e-07,
"logits/chosen": -0.8169006109237671,
"logits/rejected": -0.819770872592926,
"logps/chosen": -0.12464660406112671,
"logps/rejected": -7.139842987060547,
"loss": 1.1887,
"rewards/accuracies": 0.4000000059604645,
"rewards/chosen": 17.17649269104004,
"rewards/margins": 0.19107049703598022,
"rewards/rejected": 16.98542022705078,
"step": 20
},
{
"epoch": 0.02666666666666667,
"grad_norm": 16.764184951782227,
"learning_rate": 1.3274336283185843e-06,
"logits/chosen": -0.8003113865852356,
"logits/rejected": -0.8030117750167847,
"logps/chosen": -0.34651467204093933,
"logps/rejected": -6.967917442321777,
"loss": 1.0563,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": 17.280975341796875,
"rewards/margins": 0.40005987882614136,
"rewards/rejected": 16.88091468811035,
"step": 30
},
{
"epoch": 0.035555555555555556,
"grad_norm": 8.33682918548584,
"learning_rate": 1.769911504424779e-06,
"logits/chosen": -0.7695047855377197,
"logits/rejected": -0.7739207148551941,
"logps/chosen": -1.5993006229400635,
"logps/rejected": -8.504932403564453,
"loss": 0.7596,
"rewards/accuracies": 0.5,
"rewards/chosen": 17.283912658691406,
"rewards/margins": 0.6976072192192078,
"rewards/rejected": 16.5863037109375,
"step": 40
},
{
"epoch": 0.044444444444444446,
"grad_norm": 4.494723320007324,
"learning_rate": 2.212389380530974e-06,
"logits/chosen": -0.7154140472412109,
"logits/rejected": -0.7225576043128967,
"logps/chosen": -3.112199068069458,
"logps/rejected": -12.212080001831055,
"loss": 0.6083,
"rewards/accuracies": 0.4625000059604645,
"rewards/chosen": 17.03064727783203,
"rewards/margins": 0.7148451805114746,
"rewards/rejected": 16.3158016204834,
"step": 50
},
{
"epoch": 0.05333333333333334,
"grad_norm": 5.110287666320801,
"learning_rate": 2.6548672566371687e-06,
"logits/chosen": -0.6322453022003174,
"logits/rejected": -0.6387485265731812,
"logps/chosen": -5.650620460510254,
"logps/rejected": -12.759811401367188,
"loss": 0.3835,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 17.101289749145508,
"rewards/margins": 1.1824612617492676,
"rewards/rejected": 15.918828964233398,
"step": 60
},
{
"epoch": 0.05333333333333334,
"eval_logits/chosen": -0.5826543569564819,
"eval_logits/rejected": -0.5914276838302612,
"eval_logps/chosen": -3.5471787452697754,
"eval_logps/rejected": -16.51181983947754,
"eval_loss": 0.3286525011062622,
"eval_rewards/accuracies": 0.9280000925064087,
"eval_rewards/chosen": 17.148174285888672,
"eval_rewards/margins": 1.4386365413665771,
"eval_rewards/rejected": 15.709539413452148,
"eval_runtime": 372.0227,
"eval_samples_per_second": 2.688,
"eval_steps_per_second": 0.336,
"step": 60
},
{
"epoch": 0.06222222222222222,
"grad_norm": 5.098133563995361,
"learning_rate": 3.097345132743363e-06,
"logits/chosen": -0.5378152132034302,
"logits/rejected": -0.5494933724403381,
"logps/chosen": -1.5099802017211914,
"logps/rejected": -21.206321716308594,
"loss": 0.2931,
"rewards/accuracies": 0.9375,
"rewards/chosen": 17.083791732788086,
"rewards/margins": 1.5844331979751587,
"rewards/rejected": 15.499359130859375,
"step": 70
},
{
"epoch": 0.07111111111111111,
"grad_norm": 29.787437438964844,
"learning_rate": 3.539823008849558e-06,
"logits/chosen": -0.443774938583374,
"logits/rejected": -0.45571577548980713,
"logps/chosen": -1.5804342031478882,
"logps/rejected": -22.606929779052734,
"loss": 0.202,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": 17.302125930786133,
"rewards/margins": 2.174014091491699,
"rewards/rejected": 15.128110885620117,
"step": 80
},
{
"epoch": 0.08,
"grad_norm": 23.14398193359375,
"learning_rate": 3.982300884955752e-06,
"logits/chosen": -0.3626072406768799,
"logits/rejected": -0.3787815570831299,
"logps/chosen": -2.203828811645508,
"logps/rejected": -29.433551788330078,
"loss": 0.2123,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 17.00284194946289,
"rewards/margins": 2.320391893386841,
"rewards/rejected": 14.682450294494629,
"step": 90
},
{
"epoch": 0.08888888888888889,
"grad_norm": 29.672739028930664,
"learning_rate": 4.424778761061948e-06,
"logits/chosen": -0.3035663962364197,
"logits/rejected": -0.31762221455574036,
"logps/chosen": -3.433589458465576,
"logps/rejected": -29.9322509765625,
"loss": 0.2592,
"rewards/accuracies": 0.9375,
"rewards/chosen": 16.929956436157227,
"rewards/margins": 2.31272029876709,
"rewards/rejected": 14.617237091064453,
"step": 100
},
{
"epoch": 0.09777777777777778,
"grad_norm": 1.873722791671753,
"learning_rate": 4.867256637168142e-06,
"logits/chosen": -0.2679600715637207,
"logits/rejected": -0.2826440930366516,
"logps/chosen": -0.9653514623641968,
"logps/rejected": -30.235322952270508,
"loss": 0.1336,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 17.462385177612305,
"rewards/margins": 3.1994175910949707,
"rewards/rejected": 14.26296615600586,
"step": 110
},
{
"epoch": 0.10666666666666667,
"grad_norm": 1.6913721561431885,
"learning_rate": 4.999409761242696e-06,
"logits/chosen": -0.22222033143043518,
"logits/rejected": -0.23720571398735046,
"logps/chosen": -4.4953508377075195,
"logps/rejected": -34.074745178222656,
"loss": 0.2552,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 17.04866600036621,
"rewards/margins": 3.1014418601989746,
"rewards/rejected": 13.947224617004395,
"step": 120
},
{
"epoch": 0.10666666666666667,
"eval_logits/chosen": -0.206527978181839,
"eval_logits/rejected": -0.22178640961647034,
"eval_logps/chosen": -3.69442081451416,
"eval_logps/rejected": -36.072166442871094,
"eval_loss": 0.18996010720729828,
"eval_rewards/accuracies": 0.9320000410079956,
"eval_rewards/chosen": 17.133451461791992,
"eval_rewards/margins": 3.379946708679199,
"eval_rewards/rejected": 13.753504753112793,
"eval_runtime": 361.5279,
"eval_samples_per_second": 2.766,
"eval_steps_per_second": 0.346,
"step": 120
},
{
"epoch": 0.11555555555555555,
"grad_norm": 61.80262756347656,
"learning_rate": 4.996519466816778e-06,
"logits/chosen": -0.18473535776138306,
"logits/rejected": -0.1988501250743866,
"logps/chosen": -3.7009687423706055,
"logps/rejected": -39.289939880371094,
"loss": 0.1394,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": 17.106964111328125,
"rewards/margins": 3.633338212966919,
"rewards/rejected": 13.473625183105469,
"step": 130
},
{
"epoch": 0.12444444444444444,
"grad_norm": 1.6732702255249023,
"learning_rate": 4.9912234871722805e-06,
"logits/chosen": -0.16134041547775269,
"logits/rejected": -0.17547868192195892,
"logps/chosen": -3.0637736320495605,
"logps/rejected": -40.07548522949219,
"loss": 0.1408,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": 17.392223358154297,
"rewards/margins": 4.242353439331055,
"rewards/rejected": 13.149867057800293,
"step": 140
},
{
"epoch": 0.13333333333333333,
"grad_norm": 0.346453994512558,
"learning_rate": 4.98352692559805e-06,
"logits/chosen": -0.13797929883003235,
"logits/rejected": -0.15283086895942688,
"logps/chosen": -5.14492130279541,
"logps/rejected": -47.97212219238281,
"loss": 0.2153,
"rewards/accuracies": 0.9375,
"rewards/chosen": 16.896778106689453,
"rewards/margins": 4.227695465087891,
"rewards/rejected": 12.669081687927246,
"step": 150
},
{
"epoch": 0.14222222222222222,
"grad_norm": 0.21871662139892578,
"learning_rate": 4.973437198621237e-06,
"logits/chosen": -0.12396670132875443,
"logits/rejected": -0.13780555129051208,
"logps/chosen": -6.108860015869141,
"logps/rejected": -54.90739440917969,
"loss": 0.0388,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": 16.75935935974121,
"rewards/margins": 4.755282878875732,
"rewards/rejected": 12.004077911376953,
"step": 160
},
{
"epoch": 0.1511111111111111,
"grad_norm": 235.12429809570312,
"learning_rate": 4.960964028860621e-06,
"logits/chosen": -0.1140839159488678,
"logits/rejected": -0.1263057291507721,
"logps/chosen": -12.605452537536621,
"logps/rejected": -53.81230926513672,
"loss": 0.4651,
"rewards/accuracies": 0.875,
"rewards/chosen": 16.101238250732422,
"rewards/margins": 3.9864249229431152,
"rewards/rejected": 12.114812850952148,
"step": 170
},
{
"epoch": 0.16,
"grad_norm": 190.97048950195312,
"learning_rate": 4.946119435657738e-06,
"logits/chosen": -0.10746976733207703,
"logits/rejected": -0.11878640949726105,
"logps/chosen": -8.5105562210083,
"logps/rejected": -51.314781188964844,
"loss": 0.2362,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 16.719980239868164,
"rewards/margins": 4.549674034118652,
"rewards/rejected": 12.170306205749512,
"step": 180
},
{
"epoch": 0.16,
"eval_logits/chosen": -0.10870806127786636,
"eval_logits/rejected": -0.12223993986845016,
"eval_logps/chosen": -4.414996147155762,
"eval_logps/rejected": -53.885032653808594,
"eval_loss": 0.20236633718013763,
"eval_rewards/accuracies": 0.9510000944137573,
"eval_rewards/chosen": 17.06139373779297,
"eval_rewards/margins": 5.089176177978516,
"eval_rewards/rejected": 11.97221851348877,
"eval_runtime": 361.4355,
"eval_samples_per_second": 2.767,
"eval_steps_per_second": 0.346,
"step": 180
},
{
"epoch": 0.1688888888888889,
"grad_norm": 56.81266784667969,
"learning_rate": 4.928917723494854e-06,
"logits/chosen": -0.10682469606399536,
"logits/rejected": -0.12124393880367279,
"logps/chosen": -3.058413028717041,
"logps/rejected": -55.052528381347656,
"loss": 0.2442,
"rewards/accuracies": 0.9500000476837158,
"rewards/chosen": 17.058589935302734,
"rewards/margins": 5.056097984313965,
"rewards/rejected": 12.002490043640137,
"step": 190
},
{
"epoch": 0.17777777777777778,
"grad_norm": 175.06552124023438,
"learning_rate": 4.909375468210947e-06,
"logits/chosen": -0.10520349442958832,
"logits/rejected": -0.12018950283527374,
"logps/chosen": -4.114959716796875,
"logps/rejected": -55.9394645690918,
"loss": 0.1915,
"rewards/accuracies": 0.9500000476837158,
"rewards/chosen": 16.98603057861328,
"rewards/margins": 5.105838775634766,
"rewards/rejected": 11.880191802978516,
"step": 200
},
{
"epoch": 0.18666666666666668,
"grad_norm": 78.06558990478516,
"learning_rate": 4.8875115010289655e-06,
"logits/chosen": -0.10475558042526245,
"logits/rejected": -0.11949175596237183,
"logps/chosen": -6.760301113128662,
"logps/rejected": -53.91607666015625,
"loss": 0.2843,
"rewards/accuracies": 0.9375,
"rewards/chosen": 16.857545852661133,
"rewards/margins": 4.917357921600342,
"rewards/rejected": 11.94018840789795,
"step": 210
},
{
"epoch": 0.19555555555555557,
"grad_norm": 15.880486488342285,
"learning_rate": 4.863346890409768e-06,
"logits/chosen": -0.11213523149490356,
"logits/rejected": -0.12581588327884674,
"logps/chosen": -6.759585380554199,
"logps/rejected": -51.10936737060547,
"loss": 0.5104,
"rewards/accuracies": 0.875,
"rewards/chosen": 16.859071731567383,
"rewards/margins": 4.638372898101807,
"rewards/rejected": 12.220698356628418,
"step": 220
},
{
"epoch": 0.20444444444444446,
"grad_norm": 46.97845458984375,
"learning_rate": 4.836904921750224e-06,
"logits/chosen": -0.11947059631347656,
"logits/rejected": -0.1329912692308426,
"logps/chosen": -3.608184814453125,
"logps/rejected": -48.794761657714844,
"loss": 0.2134,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 17.235904693603516,
"rewards/margins": 4.859888076782227,
"rewards/rejected": 12.376014709472656,
"step": 230
},
{
"epoch": 0.21333333333333335,
"grad_norm": 24.032859802246094,
"learning_rate": 4.808211074945042e-06,
"logits/chosen": -0.1200513243675232,
"logits/rejected": -0.1333036869764328,
"logps/chosen": -3.7552154064178467,
"logps/rejected": -49.87453079223633,
"loss": 0.1781,
"rewards/accuracies": 0.9500000476837158,
"rewards/chosen": 17.094650268554688,
"rewards/margins": 4.68077278137207,
"rewards/rejected": 12.41387939453125,
"step": 240
},
{
"epoch": 0.21333333333333335,
"eval_logits/chosen": -0.12433278560638428,
"eval_logits/rejected": -0.13808581233024597,
"eval_logps/chosen": -4.408891201019287,
"eval_logps/rejected": -50.744781494140625,
"eval_loss": 0.1546352356672287,
"eval_rewards/accuracies": 0.9500000476837158,
"eval_rewards/chosen": 17.06200408935547,
"eval_rewards/margins": 4.775761604309082,
"eval_rewards/rejected": 12.286243438720703,
"eval_runtime": 361.4974,
"eval_samples_per_second": 2.766,
"eval_steps_per_second": 0.346,
"step": 240
},
{
"epoch": 0.2222222222222222,
"grad_norm": 0.25737640261650085,
"learning_rate": 4.7772929998339485e-06,
"logits/chosen": -0.12348780035972595,
"logits/rejected": -0.13704943656921387,
"logps/chosen": -4.4299187660217285,
"logps/rejected": -53.074607849121094,
"loss": 0.1373,
"rewards/accuracies": 0.9375,
"rewards/chosen": 17.087068557739258,
"rewards/margins": 5.06691837310791,
"rewards/rejected": 12.020149230957031,
"step": 250
},
{
"epoch": 0.2311111111111111,
"grad_norm": 0.1839389204978943,
"learning_rate": 4.744180489557859e-06,
"logits/chosen": -0.12177034467458725,
"logits/rejected": -0.1342695653438568,
"logps/chosen": -3.775188446044922,
"logps/rejected": -53.98720932006836,
"loss": 0.1896,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 17.12021255493164,
"rewards/margins": 5.148064613342285,
"rewards/rejected": 11.972146987915039,
"step": 260
},
{
"epoch": 0.24,
"grad_norm": 12.258485794067383,
"learning_rate": 4.708905451849754e-06,
"logits/chosen": -0.11067859083414078,
"logits/rejected": -0.12377731502056122,
"logps/chosen": -6.418317794799805,
"logps/rejected": -56.57402801513672,
"loss": 0.2315,
"rewards/accuracies": 0.9375,
"rewards/chosen": 16.738832473754883,
"rewards/margins": 4.884931564331055,
"rewards/rejected": 11.853900909423828,
"step": 270
},
{
"epoch": 0.24888888888888888,
"grad_norm": 77.56194305419922,
"learning_rate": 4.671501878287879e-06,
"logits/chosen": -0.1184445172548294,
"logits/rejected": -0.1339874565601349,
"logps/chosen": -10.12116527557373,
"logps/rejected": -53.403907775878906,
"loss": 0.5343,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 16.458633422851562,
"rewards/margins": 4.402472496032715,
"rewards/rejected": 12.056160926818848,
"step": 280
},
{
"epoch": 0.2577777777777778,
"grad_norm": 67.53883361816406,
"learning_rate": 4.6320058115409295e-06,
"logits/chosen": -0.1448262631893158,
"logits/rejected": -0.15793387591838837,
"logps/chosen": -3.4666190147399902,
"logps/rejected": -48.79213333129883,
"loss": 0.5017,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 16.945899963378906,
"rewards/margins": 4.2686333656311035,
"rewards/rejected": 12.677268028259277,
"step": 290
},
{
"epoch": 0.26666666666666666,
"grad_norm": 0.17521341145038605,
"learning_rate": 4.590455310636778e-06,
"logits/chosen": -0.16128253936767578,
"logits/rejected": -0.17375555634498596,
"logps/chosen": -2.9032950401306152,
"logps/rejected": -47.69734191894531,
"loss": 0.265,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 17.18383026123047,
"rewards/margins": 4.541309356689453,
"rewards/rejected": 12.642518997192383,
"step": 300
},
{
"epoch": 0.26666666666666666,
"eval_logits/chosen": -0.17444846034049988,
"eval_logits/rejected": -0.18559777736663818,
"eval_logps/chosen": -2.535512924194336,
"eval_logps/rejected": -47.16367721557617,
"eval_loss": 0.15360687673091888,
"eval_rewards/accuracies": 0.9440000653266907,
"eval_rewards/chosen": 17.249343872070312,
"eval_rewards/margins": 4.604989051818848,
"eval_rewards/rejected": 12.644353866577148,
"eval_runtime": 361.4575,
"eval_samples_per_second": 2.767,
"eval_steps_per_second": 0.346,
"step": 300
},
{
"epoch": 0.27555555555555555,
"grad_norm": 0.5040452480316162,
"learning_rate": 4.54689041428819e-06,
"logits/chosen": -0.16974106431007385,
"logits/rejected": -0.1810058057308197,
"logps/chosen": -1.233938217163086,
"logps/rejected": -49.907745361328125,
"loss": 0.1132,
"rewards/accuracies": 0.9500000476837158,
"rewards/chosen": 17.34117889404297,
"rewards/margins": 4.934173583984375,
"rewards/rejected": 12.407005310058594,
"step": 310
},
{
"epoch": 0.28444444444444444,
"grad_norm": 100.02949523925781,
"learning_rate": 4.501353102310901e-06,
"logits/chosen": -0.15705889463424683,
"logits/rejected": -0.1695334017276764,
"logps/chosen": -1.0820492506027222,
"logps/rejected": -52.577110290527344,
"loss": 0.1194,
"rewards/accuracies": 0.9500000476837158,
"rewards/chosen": 17.33388900756836,
"rewards/margins": 5.154760837554932,
"rewards/rejected": 12.179126739501953,
"step": 320
},
{
"epoch": 0.29333333333333333,
"grad_norm": 0.2689219117164612,
"learning_rate": 4.453887255171206e-06,
"logits/chosen": -0.13849371671676636,
"logits/rejected": -0.14990833401679993,
"logps/chosen": -1.8435032367706299,
"logps/rejected": -54.79044723510742,
"loss": 0.0926,
"rewards/accuracies": 0.9500000476837158,
"rewards/chosen": 17.2423095703125,
"rewards/margins": 5.28987979888916,
"rewards/rejected": 11.952428817749023,
"step": 330
},
{
"epoch": 0.3022222222222222,
"grad_norm": 0.09305431693792343,
"learning_rate": 4.404538611702055e-06,
"logits/chosen": -0.12299702316522598,
"logits/rejected": -0.13453055918216705,
"logps/chosen": -2.9897143840789795,
"logps/rejected": -52.954498291015625,
"loss": 0.2873,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 17.17474365234375,
"rewards/margins": 5.071004867553711,
"rewards/rejected": 12.103739738464355,
"step": 340
},
{
"epoch": 0.3111111111111111,
"grad_norm": 59.282073974609375,
"learning_rate": 4.3533547250284015e-06,
"logits/chosen": -0.11913029849529266,
"logits/rejected": -0.12785324454307556,
"logps/chosen": -3.9456872940063477,
"logps/rejected": -48.68487548828125,
"loss": 0.4332,
"rewards/accuracies": 0.875,
"rewards/chosen": 17.12805938720703,
"rewards/margins": 4.669450283050537,
"rewards/rejected": 12.458610534667969,
"step": 350
},
{
"epoch": 0.32,
"grad_norm": 0.31101909279823303,
"learning_rate": 4.300384916744261e-06,
"logits/chosen": -0.11280188709497452,
"logits/rejected": -0.12300585210323334,
"logps/chosen": -2.1714723110198975,
"logps/rejected": -54.74174118041992,
"loss": 0.1605,
"rewards/accuracies": 0.9500000476837158,
"rewards/chosen": 17.326162338256836,
"rewards/margins": 5.467062473297119,
"rewards/rejected": 11.859098434448242,
"step": 360
},
{
"epoch": 0.32,
"eval_logits/chosen": -0.10620756447315216,
"eval_logits/rejected": -0.11727114766836166,
"eval_logps/chosen": -1.4165427684783936,
"eval_logps/rejected": -50.9525146484375,
"eval_loss": 0.3194349706172943,
"eval_rewards/accuracies": 0.9210000038146973,
"eval_rewards/chosen": 17.36124038696289,
"eval_rewards/margins": 5.095769882202148,
"eval_rewards/rejected": 12.26546859741211,
"eval_runtime": 361.5072,
"eval_samples_per_second": 2.766,
"eval_steps_per_second": 0.346,
"step": 360
},
{
"epoch": 0.3288888888888889,
"grad_norm": 6.1126532554626465,
"learning_rate": 4.24568022938566e-06,
"logits/chosen": -0.10354311764240265,
"logits/rejected": -0.11526636779308319,
"logps/chosen": -1.2935255765914917,
"logps/rejected": -55.57566833496094,
"loss": 0.1711,
"rewards/accuracies": 0.9500000476837158,
"rewards/chosen": 17.439346313476562,
"rewards/margins": 5.700921058654785,
"rewards/rejected": 11.738424301147461,
"step": 370
},
{
"epoch": 0.3377777777777778,
"grad_norm": 34.15927505493164,
"learning_rate": 4.189293377245241e-06,
"logits/chosen": -0.1029932051897049,
"logits/rejected": -0.11382515728473663,
"logps/chosen": -2.5132687091827393,
"logps/rejected": -55.50346374511719,
"loss": 0.4359,
"rewards/accuracies": 0.8875000476837158,
"rewards/chosen": 16.731037139892578,
"rewards/margins": 4.368172645568848,
"rewards/rejected": 12.362865447998047,
"step": 380
},
{
"epoch": 0.3466666666666667,
"grad_norm": 2.8422904014587402,
"learning_rate": 4.131278695575952e-06,
"logits/chosen": -0.10793520510196686,
"logits/rejected": -0.12109285593032837,
"logps/chosen": -3.014652729034424,
"logps/rejected": -53.98411560058594,
"loss": 0.2161,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 17.137393951416016,
"rewards/margins": 5.105995178222656,
"rewards/rejected": 12.03139877319336,
"step": 390
},
{
"epoch": 0.35555555555555557,
"grad_norm": 54.0329475402832,
"learning_rate": 4.071692088232743e-06,
"logits/chosen": -0.10393750667572021,
"logits/rejected": -0.11834606528282166,
"logps/chosen": -2.1508543491363525,
"logps/rejected": -45.60733413696289,
"loss": 0.2077,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 17.586124420166016,
"rewards/margins": 5.077212333679199,
"rewards/rejected": 12.5089111328125,
"step": 400
},
{
"epoch": 0.36444444444444446,
"grad_norm": 81.61144256591797,
"learning_rate": 4.010590973802737e-06,
"logits/chosen": -0.09564584493637085,
"logits/rejected": -0.10617707669734955,
"logps/chosen": -3.4572842121124268,
"logps/rejected": -50.92162322998047,
"loss": 0.2478,
"rewards/accuracies": 0.8875000476837158,
"rewards/chosen": 17.010910034179688,
"rewards/margins": 4.556198596954346,
"rewards/rejected": 12.454713821411133,
"step": 410
},
{
"epoch": 0.37333333333333335,
"grad_norm": 0.30974289774894714,
"learning_rate": 3.948034230275781e-06,
"logits/chosen": -0.09134417027235031,
"logits/rejected": -0.1020016297698021,
"logps/chosen": -5.046698570251465,
"logps/rejected": -48.908958435058594,
"loss": 0.2894,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 17.007888793945312,
"rewards/margins": 4.53641414642334,
"rewards/rejected": 12.471475601196289,
"step": 420
},
{
"epoch": 0.37333333333333335,
"eval_logits/chosen": -0.09054450690746307,
"eval_logits/rejected": -0.10264354199171066,
"eval_logps/chosen": -1.913105845451355,
"eval_logps/rejected": -51.11127471923828,
"eval_loss": 0.16789735853672028,
"eval_rewards/accuracies": 0.9450000524520874,
"eval_rewards/chosen": 17.311582565307617,
"eval_rewards/margins": 5.061989784240723,
"eval_rewards/rejected": 12.249593734741211,
"eval_runtime": 361.5337,
"eval_samples_per_second": 2.766,
"eval_steps_per_second": 0.346,
"step": 420
},
{
"epoch": 0.38222222222222224,
"grad_norm": 12.824393272399902,
"learning_rate": 3.884082138308699e-06,
"logits/chosen": -0.08666776865720749,
"logits/rejected": -0.0997733399271965,
"logps/chosen": -1.7306327819824219,
"logps/rejected": -54.273292541503906,
"loss": 0.2298,
"rewards/accuracies": 0.9500000476837158,
"rewards/chosen": 17.167621612548828,
"rewards/margins": 5.065673351287842,
"rewards/rejected": 12.101947784423828,
"step": 430
},
{
"epoch": 0.39111111111111113,
"grad_norm": 0.30713599920272827,
"learning_rate": 3.818796323137896e-06,
"logits/chosen": -0.09174907952547073,
"logits/rejected": -0.10376611351966858,
"logps/chosen": -1.489154577255249,
"logps/rejected": -54.580726623535156,
"loss": 0.2513,
"rewards/accuracies": 0.9375,
"rewards/chosen": 17.22280502319336,
"rewards/margins": 5.175349235534668,
"rewards/rejected": 12.047454833984375,
"step": 440
},
{
"epoch": 0.4,
"grad_norm": 87.4791488647461,
"learning_rate": 3.7522396951963303e-06,
"logits/chosen": -0.09688778221607208,
"logits/rejected": -0.10897806286811829,
"logps/chosen": -3.157695770263672,
"logps/rejected": -50.96417236328125,
"loss": 0.1758,
"rewards/accuracies": 0.9500000476837158,
"rewards/chosen": 17.345651626586914,
"rewards/margins": 5.245656967163086,
"rewards/rejected": 12.099993705749512,
"step": 450
},
{
"epoch": 0.4088888888888889,
"grad_norm": 146.2008056640625,
"learning_rate": 3.684476389492026e-06,
"logits/chosen": -0.09378582239151001,
"logits/rejected": -0.10475654900074005,
"logps/chosen": -0.5611928701400757,
"logps/rejected": -56.518890380859375,
"loss": 0.1981,
"rewards/accuracies": 0.9500000476837158,
"rewards/chosen": 17.113712310791016,
"rewards/margins": 5.068872928619385,
"rewards/rejected": 12.044839859008789,
"step": 460
},
{
"epoch": 0.4177777777777778,
"grad_norm": 1.9137721061706543,
"learning_rate": 3.6155717038065783e-06,
"logits/chosen": -0.08695463836193085,
"logits/rejected": -0.09596743434667587,
"logps/chosen": -1.5298550128936768,
"logps/rejected": -50.27445983886719,
"loss": 0.2066,
"rewards/accuracies": 0.9375,
"rewards/chosen": 17.35186004638672,
"rewards/margins": 5.014693260192871,
"rewards/rejected": 12.337167739868164,
"step": 470
},
{
"epoch": 0.4266666666666667,
"grad_norm": 84.80391693115234,
"learning_rate": 3.545592035773192e-06,
"logits/chosen": -0.0746893435716629,
"logits/rejected": -0.08653923869132996,
"logps/chosen": -2.0052125453948975,
"logps/rejected": -57.502811431884766,
"loss": 0.1149,
"rewards/accuracies": 0.9500000476837158,
"rewards/chosen": 17.14373016357422,
"rewards/margins": 5.360415935516357,
"rewards/rejected": 11.783313751220703,
"step": 480
},
{
"epoch": 0.4266666666666667,
"eval_logits/chosen": -0.07700399309396744,
"eval_logits/rejected": -0.08828537166118622,
"eval_logps/chosen": -4.48896598815918,
"eval_logps/rejected": -53.76282501220703,
"eval_loss": 0.29511645436286926,
"eval_rewards/accuracies": 0.9230000376701355,
"eval_rewards/chosen": 17.053997039794922,
"eval_rewards/margins": 5.069558143615723,
"eval_rewards/rejected": 11.984437942504883,
"eval_runtime": 361.5035,
"eval_samples_per_second": 2.766,
"eval_steps_per_second": 0.346,
"step": 480
},
{
"epoch": 0.43555555555555553,
"grad_norm": 82.9616470336914,
"learning_rate": 3.4746048188948806e-06,
"logits/chosen": -0.06675051152706146,
"logits/rejected": -0.07860895991325378,
"logps/chosen": -4.162237167358398,
"logps/rejected": -54.77789306640625,
"loss": 0.2979,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 17.047603607177734,
"rewards/margins": 5.138361930847168,
"rewards/rejected": 11.909242630004883,
"step": 490
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.04293210059404373,
"learning_rate": 3.4026784575644887e-06,
"logits/chosen": -0.06424491107463837,
"logits/rejected": -0.07567107677459717,
"logps/chosen": -2.05729603767395,
"logps/rejected": -56.646087646484375,
"loss": 0.4378,
"rewards/accuracies": 0.8875000476837158,
"rewards/chosen": 16.947803497314453,
"rewards/margins": 4.919981956481934,
"rewards/rejected": 12.02782154083252,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 1125,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6.372648598634496e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}