zephyr-7b-gemma-dpo / trainer_state.json
hxi0408's picture
Model save
4233a9a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.971563981042654,
"eval_steps": 100,
"global_step": 104,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 141.4031591270893,
"learning_rate": 4.545454545454545e-08,
"logits/chosen": 111.1229019165039,
"logits/rejected": 86.78887939453125,
"logps/chosen": -328.8410339355469,
"logps/rejected": -329.2784118652344,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.19,
"grad_norm": 137.1330948401959,
"learning_rate": 4.545454545454545e-07,
"logits/chosen": 110.3634033203125,
"logits/rejected": 133.23895263671875,
"logps/chosen": -350.8841247558594,
"logps/rejected": -434.155029296875,
"loss": 0.7084,
"rewards/accuracies": 0.4722222089767456,
"rewards/chosen": 0.10759051889181137,
"rewards/margins": 0.03753471374511719,
"rewards/rejected": 0.07005578279495239,
"step": 10
},
{
"epoch": 0.38,
"grad_norm": 118.64244526415024,
"learning_rate": 4.885348141000122e-07,
"logits/chosen": 117.46419525146484,
"logits/rejected": 128.240234375,
"logps/chosen": -331.4848937988281,
"logps/rejected": -408.83056640625,
"loss": 0.6233,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": 0.20835945010185242,
"rewards/margins": 0.7139766812324524,
"rewards/rejected": -0.5056172609329224,
"step": 20
},
{
"epoch": 0.57,
"grad_norm": 101.92099166540923,
"learning_rate": 4.5025027361734613e-07,
"logits/chosen": 115.0364761352539,
"logits/rejected": 119.716064453125,
"logps/chosen": -399.8323669433594,
"logps/rejected": -474.94384765625,
"loss": 0.6029,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -1.7643420696258545,
"rewards/margins": 1.0587323904037476,
"rewards/rejected": -2.8230743408203125,
"step": 30
},
{
"epoch": 0.76,
"grad_norm": 107.76688802812863,
"learning_rate": 3.893311157806091e-07,
"logits/chosen": 117.44291687011719,
"logits/rejected": 112.09379577636719,
"logps/chosen": -424.4700622558594,
"logps/rejected": -459.14361572265625,
"loss": 0.5401,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -2.072272539138794,
"rewards/margins": 0.9466603398323059,
"rewards/rejected": -3.018933057785034,
"step": 40
},
{
"epoch": 0.95,
"grad_norm": 127.24884718202802,
"learning_rate": 3.126631330646801e-07,
"logits/chosen": 124.3499984741211,
"logits/rejected": 125.83009338378906,
"logps/chosen": -425.4100646972656,
"logps/rejected": -460.123046875,
"loss": 0.5226,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -1.6563717126846313,
"rewards/margins": 1.290310263633728,
"rewards/rejected": -2.9466819763183594,
"step": 50
},
{
"epoch": 1.14,
"grad_norm": 60.21781496264702,
"learning_rate": 2.2891223348923882e-07,
"logits/chosen": 122.9765625,
"logits/rejected": 126.6246566772461,
"logps/chosen": -393.0502014160156,
"logps/rejected": -447.2841796875,
"loss": 0.287,
"rewards/accuracies": 0.90625,
"rewards/chosen": -1.311488389968872,
"rewards/margins": 2.3555827140808105,
"rewards/rejected": -3.6670711040496826,
"step": 60
},
{
"epoch": 1.33,
"grad_norm": 56.930992725835345,
"learning_rate": 1.4754491880085317e-07,
"logits/chosen": 117.85044860839844,
"logits/rejected": 119.93992614746094,
"logps/chosen": -365.17333984375,
"logps/rejected": -490.39080810546875,
"loss": 0.1969,
"rewards/accuracies": 0.9375,
"rewards/chosen": -1.3022083044052124,
"rewards/margins": 2.6266796588897705,
"rewards/rejected": -3.9288878440856934,
"step": 70
},
{
"epoch": 1.52,
"grad_norm": 55.076942962446836,
"learning_rate": 7.775827023107834e-08,
"logits/chosen": 125.15724182128906,
"logits/rejected": 126.8897476196289,
"logps/chosen": -429.802978515625,
"logps/rejected": -526.1527099609375,
"loss": 0.1797,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -1.4758400917053223,
"rewards/margins": 2.972074270248413,
"rewards/rejected": -4.4479146003723145,
"step": 80
},
{
"epoch": 1.71,
"grad_norm": 59.283310271361565,
"learning_rate": 2.7440387297912122e-08,
"logits/chosen": 108.48063659667969,
"logits/rejected": 113.1912612915039,
"logps/chosen": -411.52783203125,
"logps/rejected": -497.0039978027344,
"loss": 0.1734,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -2.0875043869018555,
"rewards/margins": 3.0847535133361816,
"rewards/rejected": -5.172257423400879,
"step": 90
},
{
"epoch": 1.9,
"grad_norm": 60.59000761849891,
"learning_rate": 2.27878296044029e-09,
"logits/chosen": 124.4859619140625,
"logits/rejected": 115.1314697265625,
"logps/chosen": -430.19573974609375,
"logps/rejected": -541.2686767578125,
"loss": 0.1884,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -2.0935351848602295,
"rewards/margins": 3.198591709136963,
"rewards/rejected": -5.292126655578613,
"step": 100
},
{
"epoch": 1.9,
"eval_logits/chosen": 94.0592269897461,
"eval_logits/rejected": 88.1426773071289,
"eval_logps/chosen": -419.4271240234375,
"eval_logps/rejected": -448.1896057128906,
"eval_loss": 0.4704847037792206,
"eval_rewards/accuracies": 0.71875,
"eval_rewards/chosen": -2.870262861251831,
"eval_rewards/margins": 1.558273196220398,
"eval_rewards/rejected": -4.4285359382629395,
"eval_runtime": 61.9568,
"eval_samples_per_second": 12.105,
"eval_steps_per_second": 0.387,
"step": 100
},
{
"epoch": 1.97,
"step": 104,
"total_flos": 0.0,
"train_loss": 0.3924295770434233,
"train_runtime": 3265.4264,
"train_samples_per_second": 4.134,
"train_steps_per_second": 0.032
}
],
"logging_steps": 10,
"max_steps": 104,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}