|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9968652037617555, |
|
"eval_steps": 500, |
|
"global_step": 159, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.125e-08, |
|
"logits/chosen": -2.5097975730895996, |
|
"logits/rejected": -2.419074058532715, |
|
"logps/chosen": -193.5599365234375, |
|
"logps/pi_response": -171.87442016601562, |
|
"logps/ref_response": -171.87442016601562, |
|
"logps/rejected": -224.4684295654297, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -2.5170841217041016, |
|
"logits/rejected": -2.499955415725708, |
|
"logps/chosen": -268.5122375488281, |
|
"logps/pi_response": -200.62173461914062, |
|
"logps/ref_response": -200.7644805908203, |
|
"logps/rejected": -275.15576171875, |
|
"loss": 0.692, |
|
"rewards/accuracies": 0.4513888955116272, |
|
"rewards/chosen": 0.0004006973758805543, |
|
"rewards/margins": 0.0025936374440789223, |
|
"rewards/rejected": -0.0021929400973021984, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.990353313429303e-07, |
|
"logits/chosen": -2.5279006958007812, |
|
"logits/rejected": -2.48834228515625, |
|
"logps/chosen": -280.61029052734375, |
|
"logps/pi_response": -218.712158203125, |
|
"logps/ref_response": -220.61703491210938, |
|
"logps/rejected": -302.9790344238281, |
|
"loss": 0.6799, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.0002802034723572433, |
|
"rewards/margins": 0.06487339735031128, |
|
"rewards/rejected": -0.06459320336580276, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.882681251368548e-07, |
|
"logits/chosen": -2.3786978721618652, |
|
"logits/rejected": -2.309803009033203, |
|
"logps/chosen": -268.9042053222656, |
|
"logps/pi_response": -195.0692901611328, |
|
"logps/ref_response": -189.48886108398438, |
|
"logps/rejected": -271.2804260253906, |
|
"loss": 0.6702, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -0.04177286848425865, |
|
"rewards/margins": 0.0969323068857193, |
|
"rewards/rejected": -0.13870516419410706, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.6604720940421207e-07, |
|
"logits/chosen": -2.210906505584717, |
|
"logits/rejected": -2.181436061859131, |
|
"logps/chosen": -283.42681884765625, |
|
"logps/pi_response": -215.8210906982422, |
|
"logps/ref_response": -189.73727416992188, |
|
"logps/rejected": -321.3996276855469, |
|
"loss": 0.6576, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.26275861263275146, |
|
"rewards/margins": 0.13055923581123352, |
|
"rewards/rejected": -0.3933178782463074, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.3344075855595097e-07, |
|
"logits/chosen": -2.1676063537597656, |
|
"logits/rejected": -2.122574806213379, |
|
"logps/chosen": -321.0862121582031, |
|
"logps/pi_response": -257.51531982421875, |
|
"logps/ref_response": -199.2998504638672, |
|
"logps/rejected": -330.80474853515625, |
|
"loss": 0.6442, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.47685688734054565, |
|
"rewards/margins": 0.16487360000610352, |
|
"rewards/rejected": -0.6417304873466492, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.920161866827889e-07, |
|
"logits/chosen": -2.14129900932312, |
|
"logits/rejected": -2.106346607208252, |
|
"logps/chosen": -291.825439453125, |
|
"logps/pi_response": -247.85244750976562, |
|
"logps/ref_response": -182.53189086914062, |
|
"logps/rejected": -323.88409423828125, |
|
"loss": 0.6403, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.41668352484703064, |
|
"rewards/margins": 0.3306669592857361, |
|
"rewards/rejected": -0.7473504543304443, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.4376480090239047e-07, |
|
"logits/chosen": -2.1696386337280273, |
|
"logits/rejected": -2.1270456314086914, |
|
"logps/chosen": -331.155029296875, |
|
"logps/pi_response": -269.52734375, |
|
"logps/ref_response": -191.21128845214844, |
|
"logps/rejected": -335.54974365234375, |
|
"loss": 0.6304, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -0.6724362373352051, |
|
"rewards/margins": 0.23178334534168243, |
|
"rewards/rejected": -0.9042196273803711, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.910060778827554e-07, |
|
"logits/chosen": -2.203618049621582, |
|
"logits/rejected": -2.166483163833618, |
|
"logps/chosen": -287.53997802734375, |
|
"logps/pi_response": -254.5912322998047, |
|
"logps/ref_response": -190.11843872070312, |
|
"logps/rejected": -332.36590576171875, |
|
"loss": 0.6061, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.514179527759552, |
|
"rewards/margins": 0.23715217411518097, |
|
"rewards/rejected": -0.7513316869735718, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 2.3627616503391812e-07, |
|
"logits/chosen": -2.278022527694702, |
|
"logits/rejected": -2.2521653175354004, |
|
"logps/chosen": -271.2624206542969, |
|
"logps/pi_response": -235.70266723632812, |
|
"logps/ref_response": -180.7279815673828, |
|
"logps/rejected": -326.7520751953125, |
|
"loss": 0.6169, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.3808743357658386, |
|
"rewards/margins": 0.2905920445919037, |
|
"rewards/rejected": -0.6714664101600647, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.8220596619089573e-07, |
|
"logits/chosen": -2.2085721492767334, |
|
"logits/rejected": -2.194762706756592, |
|
"logps/chosen": -326.04888916015625, |
|
"logps/pi_response": -270.80810546875, |
|
"logps/ref_response": -195.6220703125, |
|
"logps/rejected": -389.5771179199219, |
|
"loss": 0.6167, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.691897988319397, |
|
"rewards/margins": 0.3410559296607971, |
|
"rewards/rejected": -1.0329539775848389, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.3139467229135998e-07, |
|
"logits/chosen": -2.2901813983917236, |
|
"logits/rejected": -2.2825167179107666, |
|
"logps/chosen": -300.0601806640625, |
|
"logps/pi_response": -242.42221069335938, |
|
"logps/ref_response": -179.70770263671875, |
|
"logps/rejected": -329.3973083496094, |
|
"loss": 0.6131, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -0.5147222280502319, |
|
"rewards/margins": 0.23834428191184998, |
|
"rewards/rejected": -0.7530665993690491, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.628481651367875e-08, |
|
"logits/chosen": -2.2861225605010986, |
|
"logits/rejected": -2.2135860919952393, |
|
"logps/chosen": -316.3228454589844, |
|
"logps/pi_response": -282.2044982910156, |
|
"logps/ref_response": -202.98269653320312, |
|
"logps/rejected": -386.8232421875, |
|
"loss": 0.5922, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.6165128946304321, |
|
"rewards/margins": 0.42569026350975037, |
|
"rewards/rejected": -1.0422031879425049, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.904486005914027e-08, |
|
"logits/chosen": -2.304694890975952, |
|
"logits/rejected": -2.2709462642669678, |
|
"logps/chosen": -332.1279602050781, |
|
"logps/pi_response": -276.42706298828125, |
|
"logps/ref_response": -195.01205444335938, |
|
"logps/rejected": -367.41204833984375, |
|
"loss": 0.6018, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.6682323217391968, |
|
"rewards/margins": 0.37029170989990234, |
|
"rewards/rejected": -1.0385239124298096, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.1464952759020856e-08, |
|
"logits/chosen": -2.3100249767303467, |
|
"logits/rejected": -2.246779680252075, |
|
"logps/chosen": -328.9765930175781, |
|
"logps/pi_response": -279.7056579589844, |
|
"logps/ref_response": -195.75735473632812, |
|
"logps/rejected": -392.63818359375, |
|
"loss": 0.584, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -0.7241796255111694, |
|
"rewards/margins": 0.34663844108581543, |
|
"rewards/rejected": -1.0708180665969849, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.8708793644441086e-09, |
|
"logits/chosen": -2.2483038902282715, |
|
"logits/rejected": -2.229506254196167, |
|
"logps/chosen": -307.9349060058594, |
|
"logps/pi_response": -257.32733154296875, |
|
"logps/ref_response": -179.74029541015625, |
|
"logps/rejected": -353.04681396484375, |
|
"loss": 0.5882, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.6363255381584167, |
|
"rewards/margins": 0.36607229709625244, |
|
"rewards/rejected": -1.002397894859314, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 159, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6280672625175812, |
|
"train_runtime": 4264.6818, |
|
"train_samples_per_second": 4.778, |
|
"train_steps_per_second": 0.037 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 159, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|