ShenaoZhang's picture
Model save
899e059 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.99581589958159,
"eval_steps": 500,
"global_step": 119,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 4.166666666666666e-08,
"logits/chosen": -2.7076048851013184,
"logits/rejected": -2.5675482749938965,
"logps/chosen": -287.144287109375,
"logps/pi_response": -67.09939575195312,
"logps/ref_response": -67.09939575195312,
"logps/rejected": -200.97291564941406,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.08,
"learning_rate": 4.1666666666666667e-07,
"logits/chosen": -2.7254514694213867,
"logits/rejected": -2.6899900436401367,
"logps/chosen": -235.18109130859375,
"logps/pi_response": -75.69638061523438,
"logps/ref_response": -75.67623901367188,
"logps/rejected": -201.9483184814453,
"loss": 0.6924,
"rewards/accuracies": 0.4722222089767456,
"rewards/chosen": 0.0020254377741366625,
"rewards/margins": 0.0009842434665188193,
"rewards/rejected": 0.001041194424033165,
"step": 10
},
{
"epoch": 0.17,
"learning_rate": 4.931352528237397e-07,
"logits/chosen": -2.7493643760681152,
"logits/rejected": -2.706611394882202,
"logps/chosen": -246.18350219726562,
"logps/pi_response": -75.16463470458984,
"logps/ref_response": -72.46954345703125,
"logps/rejected": -174.53036499023438,
"loss": 0.6751,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 0.035773951560258865,
"rewards/margins": 0.03667151927947998,
"rewards/rejected": -0.000897567137144506,
"step": 20
},
{
"epoch": 0.25,
"learning_rate": 4.658920803689553e-07,
"logits/chosen": -2.672675848007202,
"logits/rejected": -2.638225793838501,
"logps/chosen": -231.5210418701172,
"logps/pi_response": -95.05049896240234,
"logps/ref_response": -74.0744400024414,
"logps/rejected": -167.09555053710938,
"loss": 0.6405,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.03551141917705536,
"rewards/margins": 0.12416237592697144,
"rewards/rejected": -0.15967382490634918,
"step": 30
},
{
"epoch": 0.33,
"learning_rate": 4.201712553872657e-07,
"logits/chosen": -2.661431074142456,
"logits/rejected": -2.6182944774627686,
"logps/chosen": -259.1197814941406,
"logps/pi_response": -129.29989624023438,
"logps/ref_response": -74.10218048095703,
"logps/rejected": -219.2566375732422,
"loss": 0.6059,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.1776684820652008,
"rewards/margins": 0.21845951676368713,
"rewards/rejected": -0.39612799882888794,
"step": 40
},
{
"epoch": 0.42,
"learning_rate": 3.598859066780754e-07,
"logits/chosen": -2.6624646186828613,
"logits/rejected": -2.627788543701172,
"logps/chosen": -299.5395812988281,
"logps/pi_response": -163.2670135498047,
"logps/ref_response": -79.56159973144531,
"logps/rejected": -232.27017211914062,
"loss": 0.5737,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -0.25120988488197327,
"rewards/margins": 0.41320866346359253,
"rewards/rejected": -0.6644185781478882,
"step": 50
},
{
"epoch": 0.5,
"learning_rate": 2.9019570347986706e-07,
"logits/chosen": -2.6411023139953613,
"logits/rejected": -2.6134071350097656,
"logps/chosen": -293.26239013671875,
"logps/pi_response": -176.7355499267578,
"logps/ref_response": -82.40287780761719,
"logps/rejected": -257.48724365234375,
"loss": 0.5494,
"rewards/accuracies": 0.8125,
"rewards/chosen": -0.33723509311676025,
"rewards/margins": 0.5918785929679871,
"rewards/rejected": -0.9291136860847473,
"step": 60
},
{
"epoch": 0.59,
"learning_rate": 2.1706525253979534e-07,
"logits/chosen": -2.6791365146636963,
"logits/rejected": -2.6405601501464844,
"logps/chosen": -286.8842468261719,
"logps/pi_response": -170.18603515625,
"logps/ref_response": -68.73023986816406,
"logps/rejected": -258.01019287109375,
"loss": 0.5384,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.49572473764419556,
"rewards/margins": 0.4329424798488617,
"rewards/rejected": -0.9286670684814453,
"step": 70
},
{
"epoch": 0.67,
"learning_rate": 1.4675360263490295e-07,
"logits/chosen": -2.645174026489258,
"logits/rejected": -2.6211788654327393,
"logps/chosen": -252.89706420898438,
"logps/pi_response": -170.83099365234375,
"logps/ref_response": -68.83003997802734,
"logps/rejected": -270.0607604980469,
"loss": 0.5174,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.5351428985595703,
"rewards/margins": 0.5067500472068787,
"rewards/rejected": -1.0418930053710938,
"step": 80
},
{
"epoch": 0.75,
"learning_rate": 8.527854855097224e-08,
"logits/chosen": -2.712350845336914,
"logits/rejected": -2.678652048110962,
"logps/chosen": -292.08087158203125,
"logps/pi_response": -186.6718292236328,
"logps/ref_response": -69.44734191894531,
"logps/rejected": -281.0624694824219,
"loss": 0.5078,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.5814527273178101,
"rewards/margins": 0.6152323484420776,
"rewards/rejected": -1.1966850757598877,
"step": 90
},
{
"epoch": 0.84,
"learning_rate": 3.790158337517127e-08,
"logits/chosen": -2.643378973007202,
"logits/rejected": -2.6130120754241943,
"logps/chosen": -328.4146728515625,
"logps/pi_response": -198.84283447265625,
"logps/ref_response": -70.62646484375,
"logps/rejected": -297.75482177734375,
"loss": 0.4993,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.6543205380439758,
"rewards/margins": 0.5759149789810181,
"rewards/rejected": -1.2302353382110596,
"step": 100
},
{
"epoch": 0.92,
"learning_rate": 8.677580722139671e-09,
"logits/chosen": -2.734649181365967,
"logits/rejected": -2.6601593494415283,
"logps/chosen": -329.1470642089844,
"logps/pi_response": -197.30343627929688,
"logps/ref_response": -80.19813537597656,
"logps/rejected": -300.39556884765625,
"loss": 0.5165,
"rewards/accuracies": 0.8125,
"rewards/chosen": -0.5252186059951782,
"rewards/margins": 0.7248367071151733,
"rewards/rejected": -1.2500553131103516,
"step": 110
},
{
"epoch": 1.0,
"step": 119,
"total_flos": 0.0,
"train_loss": 0.5675088297419187,
"train_runtime": 3570.6254,
"train_samples_per_second": 4.28,
"train_steps_per_second": 0.033
}
],
"logging_steps": 10,
"max_steps": 119,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}