zephyr-7b-gemma-dpo / trainer_state.json
chrlu's picture
Model save
f5a8c5e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.971563981042654,
"eval_steps": 100,
"global_step": 104,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.018957345971563982,
"grad_norm": 133.64062565621384,
"learning_rate": 4.545454545454545e-08,
"logits/chosen": 119.0696792602539,
"logits/rejected": 120.28123474121094,
"logps/chosen": -394.1268310546875,
"logps/rejected": -419.3145446777344,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.1895734597156398,
"grad_norm": 130.60842697521545,
"learning_rate": 4.545454545454545e-07,
"logits/chosen": 133.6595001220703,
"logits/rejected": 136.7303466796875,
"logps/chosen": -410.0771484375,
"logps/rejected": -445.1907653808594,
"loss": 0.7019,
"rewards/accuracies": 0.46875,
"rewards/chosen": 0.020121444016695023,
"rewards/margins": 0.041466910392045975,
"rewards/rejected": -0.021345460787415504,
"step": 10
},
{
"epoch": 0.3791469194312796,
"grad_norm": 127.29787487076526,
"learning_rate": 4.885348141000122e-07,
"logits/chosen": 122.2022476196289,
"logits/rejected": 128.57586669921875,
"logps/chosen": -357.1582336425781,
"logps/rejected": -416.08087158203125,
"loss": 0.6346,
"rewards/accuracies": 0.653124988079071,
"rewards/chosen": 0.25420495867729187,
"rewards/margins": 0.4108888506889343,
"rewards/rejected": -0.15668384730815887,
"step": 20
},
{
"epoch": 0.5687203791469194,
"grad_norm": 110.05011163607695,
"learning_rate": 4.5025027361734613e-07,
"logits/chosen": 121.9586181640625,
"logits/rejected": 125.2878646850586,
"logps/chosen": -387.713134765625,
"logps/rejected": -442.55206298828125,
"loss": 0.5698,
"rewards/accuracies": 0.703125,
"rewards/chosen": -1.2848999500274658,
"rewards/margins": 0.9555079340934753,
"rewards/rejected": -2.240407943725586,
"step": 30
},
{
"epoch": 0.7582938388625592,
"grad_norm": 111.08969508053838,
"learning_rate": 3.893311157806091e-07,
"logits/chosen": 121.52265930175781,
"logits/rejected": 119.2688980102539,
"logps/chosen": -402.15716552734375,
"logps/rejected": -444.649169921875,
"loss": 0.5496,
"rewards/accuracies": 0.7593749761581421,
"rewards/chosen": -2.0494799613952637,
"rewards/margins": 1.070623517036438,
"rewards/rejected": -3.120103359222412,
"step": 40
},
{
"epoch": 0.9478672985781991,
"grad_norm": 122.82358054602282,
"learning_rate": 3.126631330646801e-07,
"logits/chosen": 128.3933868408203,
"logits/rejected": 133.44308471679688,
"logps/chosen": -431.0421447753906,
"logps/rejected": -497.99420166015625,
"loss": 0.4959,
"rewards/accuracies": 0.746874988079071,
"rewards/chosen": -2.2058186531066895,
"rewards/margins": 1.2984471321105957,
"rewards/rejected": -3.504265546798706,
"step": 50
},
{
"epoch": 1.1374407582938388,
"grad_norm": 65.56687198861316,
"learning_rate": 2.2891223348923882e-07,
"logits/chosen": 124.70857238769531,
"logits/rejected": 126.91219329833984,
"logps/chosen": -420.981201171875,
"logps/rejected": -505.5345153808594,
"loss": 0.307,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": -2.4674336910247803,
"rewards/margins": 2.305318832397461,
"rewards/rejected": -4.772752285003662,
"step": 60
},
{
"epoch": 1.3270142180094786,
"grad_norm": 56.106028687537446,
"learning_rate": 1.4754491880085317e-07,
"logits/chosen": 121.775146484375,
"logits/rejected": 125.95316314697266,
"logps/chosen": -425.7054138183594,
"logps/rejected": -518.8656005859375,
"loss": 0.1907,
"rewards/accuracies": 0.940625011920929,
"rewards/chosen": -2.623661518096924,
"rewards/margins": 2.869920253753662,
"rewards/rejected": -5.493582248687744,
"step": 70
},
{
"epoch": 1.5165876777251186,
"grad_norm": 50.43661058282089,
"learning_rate": 7.775827023107834e-08,
"logits/chosen": 114.5962142944336,
"logits/rejected": 126.1790771484375,
"logps/chosen": -426.8082580566406,
"logps/rejected": -527.3065185546875,
"loss": 0.1761,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -2.979158401489258,
"rewards/margins": 3.0644469261169434,
"rewards/rejected": -6.043605804443359,
"step": 80
},
{
"epoch": 1.7061611374407581,
"grad_norm": 45.81843583580765,
"learning_rate": 2.7440387297912122e-08,
"logits/chosen": 117.46388244628906,
"logits/rejected": 123.80489349365234,
"logps/chosen": -449.65399169921875,
"logps/rejected": -544.6094970703125,
"loss": 0.1515,
"rewards/accuracies": 0.984375,
"rewards/chosen": -2.8478360176086426,
"rewards/margins": 3.247156858444214,
"rewards/rejected": -6.0949931144714355,
"step": 90
},
{
"epoch": 1.8957345971563981,
"grad_norm": 42.75820426735574,
"learning_rate": 2.27878296044029e-09,
"logits/chosen": 114.7729721069336,
"logits/rejected": 119.34477233886719,
"logps/chosen": -437.2296447753906,
"logps/rejected": -523.9191284179688,
"loss": 0.1578,
"rewards/accuracies": 0.9593750238418579,
"rewards/chosen": -2.8138155937194824,
"rewards/margins": 3.170293092727661,
"rewards/rejected": -5.984108924865723,
"step": 100
},
{
"epoch": 1.8957345971563981,
"eval_logits/chosen": 91.35408782958984,
"eval_logits/rejected": 94.07221221923828,
"eval_logps/chosen": -428.1683349609375,
"eval_logps/rejected": -515.7637939453125,
"eval_loss": 0.4643263816833496,
"eval_rewards/accuracies": 0.75,
"eval_rewards/chosen": -3.5909416675567627,
"eval_rewards/margins": 1.7481167316436768,
"eval_rewards/rejected": -5.339057922363281,
"eval_runtime": 88.3612,
"eval_samples_per_second": 8.488,
"eval_steps_per_second": 0.532,
"step": 100
},
{
"epoch": 1.971563981042654,
"step": 104,
"total_flos": 0.0,
"train_loss": 0.3883641087091886,
"train_runtime": 2802.2739,
"train_samples_per_second": 4.818,
"train_steps_per_second": 0.037
}
],
"logging_steps": 10,
"max_steps": 104,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}