ShenaoZ's picture
Model save
14e5b8f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016736401673640166,
"grad_norm": 78.60790491305158,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": -1.0250325202941895,
"logits/rejected": -0.7656487226486206,
"logps/chosen": -238.23211669921875,
"logps/pi_response": -67.60678100585938,
"logps/ref_response": -67.60678100585938,
"logps/rejected": -473.57257080078125,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.16736401673640167,
"grad_norm": 47.26665166784664,
"learning_rate": 4.930057285201027e-07,
"logits/chosen": -0.7590005397796631,
"logits/rejected": -0.534713625907898,
"logps/chosen": -269.3804931640625,
"logps/pi_response": -86.47183227539062,
"logps/ref_response": -85.30850982666016,
"logps/rejected": -577.9660034179688,
"loss": 0.6322,
"rewards/accuracies": 0.6284722089767456,
"rewards/chosen": -0.17737312614917755,
"rewards/margins": 0.3175060451030731,
"rewards/rejected": -0.49487918615341187,
"step": 10
},
{
"epoch": 0.33472803347280333,
"grad_norm": 78.10720741885179,
"learning_rate": 4.187457503795526e-07,
"logits/chosen": 0.7693876624107361,
"logits/rejected": 1.1501473188400269,
"logps/chosen": -397.8346862792969,
"logps/pi_response": -133.69357299804688,
"logps/ref_response": -84.12879943847656,
"logps/rejected": -852.2254028320312,
"loss": 0.6126,
"rewards/accuracies": 0.778124988079071,
"rewards/chosen": -1.3858518600463867,
"rewards/margins": 1.7604806423187256,
"rewards/rejected": -3.1463327407836914,
"step": 20
},
{
"epoch": 0.502092050209205,
"grad_norm": 44.300549506110464,
"learning_rate": 2.8691164100062034e-07,
"logits/chosen": 1.0299599170684814,
"logits/rejected": 1.5359565019607544,
"logps/chosen": -398.71575927734375,
"logps/pi_response": -156.10476684570312,
"logps/ref_response": -84.0820541381836,
"logps/rejected": -803.1231689453125,
"loss": 0.4797,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -1.3773680925369263,
"rewards/margins": 1.4377108812332153,
"rewards/rejected": -2.8150792121887207,
"step": 30
},
{
"epoch": 0.6694560669456067,
"grad_norm": 40.9925395869338,
"learning_rate": 1.4248369943086995e-07,
"logits/chosen": 0.7738146781921387,
"logits/rejected": 1.347411036491394,
"logps/chosen": -327.626953125,
"logps/pi_response": -121.0374526977539,
"logps/ref_response": -77.38616943359375,
"logps/rejected": -800.9747314453125,
"loss": 0.4483,
"rewards/accuracies": 0.7718750238418579,
"rewards/chosen": -0.895713210105896,
"rewards/margins": 1.4477618932724,
"rewards/rejected": -2.343475103378296,
"step": 40
},
{
"epoch": 0.8368200836820083,
"grad_norm": 43.60271544881446,
"learning_rate": 3.473909705816111e-08,
"logits/chosen": 0.9320430755615234,
"logits/rejected": 1.5464847087860107,
"logps/chosen": -350.8163757324219,
"logps/pi_response": -132.66949462890625,
"logps/ref_response": -84.61216735839844,
"logps/rejected": -686.16015625,
"loss": 0.4596,
"rewards/accuracies": 0.7406250238418579,
"rewards/chosen": -0.8762038350105286,
"rewards/margins": 1.1485484838485718,
"rewards/rejected": -2.024752378463745,
"step": 50
},
{
"epoch": 0.9874476987447699,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.5195278232380495,
"train_runtime": 2549.7708,
"train_samples_per_second": 5.994,
"train_steps_per_second": 0.023
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}