AmberYifan's picture
Training in progress, epoch 1, checkpoint
fc8ee12 verified
raw
history blame
16.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 313,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.003194888178913738,
"grad_norm": 32.14257471485931,
"learning_rate": 5.3191489361702125e-09,
"logits/chosen": -1.7265625,
"logits/rejected": -1.65625,
"logps/chosen": -249.0,
"logps/rejected": -242.0,
"loss": 0.6914,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.03194888178913738,
"grad_norm": 33.76096895998352,
"learning_rate": 5.3191489361702123e-08,
"logits/chosen": -1.765625,
"logits/rejected": -1.765625,
"logps/chosen": -240.0,
"logps/rejected": -244.0,
"loss": 0.6926,
"rewards/accuracies": 0.2083333283662796,
"rewards/chosen": 0.00555419921875,
"rewards/margins": 0.0,
"rewards/rejected": 0.00555419921875,
"step": 10
},
{
"epoch": 0.06389776357827476,
"grad_norm": 31.259900823289186,
"learning_rate": 1.0638297872340425e-07,
"logits/chosen": -1.71875,
"logits/rejected": -1.75,
"logps/chosen": -237.0,
"logps/rejected": -241.0,
"loss": 0.6916,
"rewards/accuracies": 0.26249998807907104,
"rewards/chosen": 0.0,
"rewards/margins": 0.006256103515625,
"rewards/rejected": -0.006256103515625,
"step": 20
},
{
"epoch": 0.09584664536741214,
"grad_norm": 32.366987695897166,
"learning_rate": 1.5957446808510638e-07,
"logits/chosen": -1.75,
"logits/rejected": -1.75,
"logps/chosen": -245.0,
"logps/rejected": -246.0,
"loss": 0.6946,
"rewards/accuracies": 0.22499999403953552,
"rewards/chosen": 6.109476089477539e-06,
"rewards/margins": -0.0012359619140625,
"rewards/rejected": 0.0012664794921875,
"step": 30
},
{
"epoch": 0.12779552715654952,
"grad_norm": 30.81764382136956,
"learning_rate": 2.127659574468085e-07,
"logits/chosen": -1.703125,
"logits/rejected": -1.734375,
"logps/chosen": -243.0,
"logps/rejected": -240.0,
"loss": 0.6818,
"rewards/accuracies": 0.4000000059604645,
"rewards/chosen": -0.003753662109375,
"rewards/margins": 0.032470703125,
"rewards/rejected": -0.036376953125,
"step": 40
},
{
"epoch": 0.1597444089456869,
"grad_norm": 31.214992328374773,
"learning_rate": 2.659574468085106e-07,
"logits/chosen": -1.7109375,
"logits/rejected": -1.7265625,
"logps/chosen": -242.0,
"logps/rejected": -243.0,
"loss": 0.673,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.0400390625,
"rewards/margins": 0.033935546875,
"rewards/rejected": -0.07373046875,
"step": 50
},
{
"epoch": 0.19169329073482427,
"grad_norm": 28.88257684001896,
"learning_rate": 3.1914893617021275e-07,
"logits/chosen": -1.7265625,
"logits/rejected": -1.75,
"logps/chosen": -240.0,
"logps/rejected": -243.0,
"loss": 0.6296,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": -0.0625,
"rewards/margins": 0.111328125,
"rewards/rejected": -0.173828125,
"step": 60
},
{
"epoch": 0.22364217252396165,
"grad_norm": 30.584634735099684,
"learning_rate": 3.7234042553191484e-07,
"logits/chosen": -1.7109375,
"logits/rejected": -1.71875,
"logps/chosen": -243.0,
"logps/rejected": -250.0,
"loss": 0.6439,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -0.080078125,
"rewards/margins": 0.19140625,
"rewards/rejected": -0.271484375,
"step": 70
},
{
"epoch": 0.25559105431309903,
"grad_norm": 29.751500863088314,
"learning_rate": 4.25531914893617e-07,
"logits/chosen": -1.703125,
"logits/rejected": -1.75,
"logps/chosen": -240.0,
"logps/rejected": -251.0,
"loss": 0.6266,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": -0.2060546875,
"rewards/margins": 0.2099609375,
"rewards/rejected": -0.41796875,
"step": 80
},
{
"epoch": 0.28753993610223644,
"grad_norm": 32.267981389042255,
"learning_rate": 4.787234042553192e-07,
"logits/chosen": -1.7109375,
"logits/rejected": -1.765625,
"logps/chosen": -245.0,
"logps/rejected": -247.0,
"loss": 0.6458,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": -0.349609375,
"rewards/margins": 0.2060546875,
"rewards/rejected": -0.5546875,
"step": 90
},
{
"epoch": 0.3194888178913738,
"grad_norm": 31.916118030442767,
"learning_rate": 4.964497041420119e-07,
"logits/chosen": -1.6953125,
"logits/rejected": -1.7109375,
"logps/chosen": -246.0,
"logps/rejected": -243.0,
"loss": 0.6301,
"rewards/accuracies": 0.5375000238418579,
"rewards/chosen": -0.29296875,
"rewards/margins": 0.1767578125,
"rewards/rejected": -0.470703125,
"step": 100
},
{
"epoch": 0.3514376996805112,
"grad_norm": 30.563502769937987,
"learning_rate": 4.905325443786982e-07,
"logits/chosen": -1.6796875,
"logits/rejected": -1.734375,
"logps/chosen": -247.0,
"logps/rejected": -250.0,
"loss": 0.6481,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -0.23046875,
"rewards/margins": 0.2421875,
"rewards/rejected": -0.47265625,
"step": 110
},
{
"epoch": 0.38338658146964855,
"grad_norm": 26.847013088556583,
"learning_rate": 4.846153846153846e-07,
"logits/chosen": -1.7109375,
"logits/rejected": -1.7109375,
"logps/chosen": -243.0,
"logps/rejected": -251.0,
"loss": 0.6152,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.2734375,
"rewards/margins": 0.306640625,
"rewards/rejected": -0.578125,
"step": 120
},
{
"epoch": 0.41533546325878595,
"grad_norm": 31.074353820130803,
"learning_rate": 4.78698224852071e-07,
"logits/chosen": -1.703125,
"logits/rejected": -1.71875,
"logps/chosen": -240.0,
"logps/rejected": -260.0,
"loss": 0.604,
"rewards/accuracies": 0.5375000238418579,
"rewards/chosen": -0.47265625,
"rewards/margins": 0.291015625,
"rewards/rejected": -0.76171875,
"step": 130
},
{
"epoch": 0.4472843450479233,
"grad_norm": 32.192668717979046,
"learning_rate": 4.727810650887574e-07,
"logits/chosen": -1.6796875,
"logits/rejected": -1.6796875,
"logps/chosen": -243.0,
"logps/rejected": -254.0,
"loss": 0.5609,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -0.416015625,
"rewards/margins": 0.5234375,
"rewards/rejected": -0.9375,
"step": 140
},
{
"epoch": 0.4792332268370607,
"grad_norm": 32.155266134791546,
"learning_rate": 4.668639053254438e-07,
"logits/chosen": -1.6953125,
"logits/rejected": -1.671875,
"logps/chosen": -246.0,
"logps/rejected": -251.0,
"loss": 0.5987,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -0.45703125,
"rewards/margins": 0.453125,
"rewards/rejected": -0.91015625,
"step": 150
},
{
"epoch": 0.5111821086261981,
"grad_norm": 30.837637394035802,
"learning_rate": 4.6094674556213014e-07,
"logits/chosen": -1.6875,
"logits/rejected": -1.703125,
"logps/chosen": -248.0,
"logps/rejected": -252.0,
"loss": 0.6033,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -0.333984375,
"rewards/margins": 0.373046875,
"rewards/rejected": -0.70703125,
"step": 160
},
{
"epoch": 0.5431309904153354,
"grad_norm": 25.59680166981086,
"learning_rate": 4.5502958579881655e-07,
"logits/chosen": -1.671875,
"logits/rejected": -1.6796875,
"logps/chosen": -238.0,
"logps/rejected": -248.0,
"loss": 0.5817,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": -0.322265625,
"rewards/margins": 0.30078125,
"rewards/rejected": -0.62109375,
"step": 170
},
{
"epoch": 0.5750798722044729,
"grad_norm": 29.648466125367857,
"learning_rate": 4.491124260355029e-07,
"logits/chosen": -1.6875,
"logits/rejected": -1.6875,
"logps/chosen": -245.0,
"logps/rejected": -251.0,
"loss": 0.5874,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.38671875,
"rewards/margins": 0.50390625,
"rewards/rejected": -0.890625,
"step": 180
},
{
"epoch": 0.6070287539936102,
"grad_norm": 28.442764104750204,
"learning_rate": 4.4319526627218936e-07,
"logits/chosen": -1.6875,
"logits/rejected": -1.6875,
"logps/chosen": -246.0,
"logps/rejected": -249.0,
"loss": 0.5737,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.294921875,
"rewards/margins": 0.5078125,
"rewards/rejected": -0.80078125,
"step": 190
},
{
"epoch": 0.6389776357827476,
"grad_norm": 26.370679072337964,
"learning_rate": 4.372781065088757e-07,
"logits/chosen": -1.71875,
"logits/rejected": -1.703125,
"logps/chosen": -240.0,
"logps/rejected": -256.0,
"loss": 0.5867,
"rewards/accuracies": 0.5874999761581421,
"rewards/chosen": -0.3828125,
"rewards/margins": 0.3671875,
"rewards/rejected": -0.75,
"step": 200
},
{
"epoch": 0.670926517571885,
"grad_norm": 24.556255560376357,
"learning_rate": 4.313609467455621e-07,
"logits/chosen": -1.703125,
"logits/rejected": -1.6953125,
"logps/chosen": -246.0,
"logps/rejected": -252.0,
"loss": 0.5853,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -0.349609375,
"rewards/margins": 0.3671875,
"rewards/rejected": -0.71875,
"step": 210
},
{
"epoch": 0.7028753993610224,
"grad_norm": 29.98817140815093,
"learning_rate": 4.2544378698224847e-07,
"logits/chosen": -1.6796875,
"logits/rejected": -1.71875,
"logps/chosen": -243.0,
"logps/rejected": -256.0,
"loss": 0.5872,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.39453125,
"rewards/margins": 0.55078125,
"rewards/rejected": -0.9453125,
"step": 220
},
{
"epoch": 0.7348242811501597,
"grad_norm": 27.478577985922634,
"learning_rate": 4.195266272189349e-07,
"logits/chosen": -1.671875,
"logits/rejected": -1.65625,
"logps/chosen": -240.0,
"logps/rejected": -249.0,
"loss": 0.5549,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.3203125,
"rewards/margins": 0.50390625,
"rewards/rejected": -0.82421875,
"step": 230
},
{
"epoch": 0.7667731629392971,
"grad_norm": 26.416091326825903,
"learning_rate": 4.1360946745562133e-07,
"logits/chosen": -1.703125,
"logits/rejected": -1.71875,
"logps/chosen": -240.0,
"logps/rejected": -262.0,
"loss": 0.538,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.32421875,
"rewards/margins": 0.55078125,
"rewards/rejected": -0.875,
"step": 240
},
{
"epoch": 0.7987220447284346,
"grad_norm": 28.30204123280793,
"learning_rate": 4.076923076923077e-07,
"logits/chosen": -1.6796875,
"logits/rejected": -1.6875,
"logps/chosen": -239.0,
"logps/rejected": -260.0,
"loss": 0.5507,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.38671875,
"rewards/margins": 0.5546875,
"rewards/rejected": -0.9453125,
"step": 250
},
{
"epoch": 0.8306709265175719,
"grad_norm": 33.17216428250153,
"learning_rate": 4.017751479289941e-07,
"logits/chosen": -1.6953125,
"logits/rejected": -1.7421875,
"logps/chosen": -244.0,
"logps/rejected": -256.0,
"loss": 0.5584,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -0.27734375,
"rewards/margins": 0.78125,
"rewards/rejected": -1.0625,
"step": 260
},
{
"epoch": 0.8626198083067093,
"grad_norm": 27.41764842123467,
"learning_rate": 3.9585798816568044e-07,
"logits/chosen": -1.6640625,
"logits/rejected": -1.65625,
"logps/chosen": -238.0,
"logps/rejected": -252.0,
"loss": 0.5742,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -0.24609375,
"rewards/margins": 0.6015625,
"rewards/rejected": -0.84765625,
"step": 270
},
{
"epoch": 0.8945686900958466,
"grad_norm": 34.061232014315166,
"learning_rate": 3.8994082840236685e-07,
"logits/chosen": -1.6953125,
"logits/rejected": -1.703125,
"logps/chosen": -242.0,
"logps/rejected": -251.0,
"loss": 0.5279,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -0.3203125,
"rewards/margins": 0.65234375,
"rewards/rejected": -0.97265625,
"step": 280
},
{
"epoch": 0.9265175718849841,
"grad_norm": 28.160658079870018,
"learning_rate": 3.840236686390532e-07,
"logits/chosen": -1.6796875,
"logits/rejected": -1.6953125,
"logps/chosen": -248.0,
"logps/rejected": -252.0,
"loss": 0.5804,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.4296875,
"rewards/margins": 0.45703125,
"rewards/rejected": -0.88671875,
"step": 290
},
{
"epoch": 0.9584664536741214,
"grad_norm": 29.60484241313221,
"learning_rate": 3.7810650887573966e-07,
"logits/chosen": -1.65625,
"logits/rejected": -1.671875,
"logps/chosen": -247.0,
"logps/rejected": -255.0,
"loss": 0.5635,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -0.35546875,
"rewards/margins": 0.400390625,
"rewards/rejected": -0.75390625,
"step": 300
},
{
"epoch": 0.9904153354632588,
"grad_norm": 33.368421615399974,
"learning_rate": 3.72189349112426e-07,
"logits/chosen": -1.7109375,
"logits/rejected": -1.703125,
"logps/chosen": -244.0,
"logps/rejected": -250.0,
"loss": 0.5843,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -0.2578125,
"rewards/margins": 0.49609375,
"rewards/rejected": -0.75390625,
"step": 310
},
{
"epoch": 1.0,
"eval_logits/chosen": -1.6953125,
"eval_logits/rejected": -1.7109375,
"eval_logps/chosen": -246.0,
"eval_logps/rejected": -248.0,
"eval_loss": 0.6162499785423279,
"eval_rewards/accuracies": 0.5714285969734192,
"eval_rewards/chosen": -0.380859375,
"eval_rewards/margins": 0.296875,
"eval_rewards/rejected": -0.6796875,
"eval_runtime": 12.6011,
"eval_samples_per_second": 15.872,
"eval_steps_per_second": 0.556,
"step": 313
}
],
"logging_steps": 10,
"max_steps": 939,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}