ShenaoZ's picture
Model save
00304cd verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016736401673640166,
"grad_norm": 7.773357075045369,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": -2.807276487350464,
"logits/rejected": -2.7759768962860107,
"logps/chosen": -315.42626953125,
"logps/rejected": -227.5915985107422,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.16736401673640167,
"grad_norm": 7.497914636567085,
"learning_rate": 4.930057285201027e-07,
"logits/chosen": -2.755561351776123,
"logits/rejected": -2.7461330890655518,
"logps/chosen": -271.9638366699219,
"logps/rejected": -260.6882019042969,
"loss": 0.6915,
"rewards/accuracies": 0.4895833432674408,
"rewards/chosen": 0.005060985218733549,
"rewards/margins": 0.003333525499328971,
"rewards/rejected": 0.0017274598358199,
"step": 10
},
{
"epoch": 0.33472803347280333,
"grad_norm": 6.720411242540097,
"learning_rate": 4.187457503795526e-07,
"logits/chosen": -2.7870450019836426,
"logits/rejected": -2.7681467533111572,
"logps/chosen": -261.4424133300781,
"logps/rejected": -250.8816375732422,
"loss": 0.6745,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": 0.032392919063568115,
"rewards/margins": 0.0407845601439476,
"rewards/rejected": -0.008391635492444038,
"step": 20
},
{
"epoch": 0.502092050209205,
"grad_norm": 7.440292572397188,
"learning_rate": 2.8691164100062034e-07,
"logits/chosen": -2.787830352783203,
"logits/rejected": -2.768749713897705,
"logps/chosen": -293.83428955078125,
"logps/rejected": -254.20156860351562,
"loss": 0.6486,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -0.023782286792993546,
"rewards/margins": 0.12665286660194397,
"rewards/rejected": -0.1504351645708084,
"step": 30
},
{
"epoch": 0.6694560669456067,
"grad_norm": 8.125719320287144,
"learning_rate": 1.4248369943086995e-07,
"logits/chosen": -2.7605981826782227,
"logits/rejected": -2.738039970397949,
"logps/chosen": -264.20245361328125,
"logps/rejected": -255.5854034423828,
"loss": 0.6354,
"rewards/accuracies": 0.684374988079071,
"rewards/chosen": -0.11282990127801895,
"rewards/margins": 0.1599176824092865,
"rewards/rejected": -0.27274757623672485,
"step": 40
},
{
"epoch": 0.8368200836820083,
"grad_norm": 8.480612858735153,
"learning_rate": 3.473909705816111e-08,
"logits/chosen": -2.7703018188476562,
"logits/rejected": -2.7503485679626465,
"logps/chosen": -278.990478515625,
"logps/rejected": -287.1396789550781,
"loss": 0.6202,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.2198183238506317,
"rewards/margins": 0.13645336031913757,
"rewards/rejected": -0.35627174377441406,
"step": 50
},
{
"epoch": 0.9874476987447699,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.6490245107877053,
"train_runtime": 1631.3433,
"train_samples_per_second": 9.368,
"train_steps_per_second": 0.036
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}