File size: 3,851 Bytes
61956e4 8b6d78e 61956e4 8b6d78e 61956e4 8b6d78e 61956e4 8b6d78e 61956e4 8b6d78e 61956e4 8b6d78e 61956e4 8b6d78e 61956e4 8b6d78e 61956e4 8b6d78e 61956e4 8b6d78e 61956e4 8b6d78e 61956e4 8b6d78e 61956e4 8b6d78e 61956e4 8b6d78e 61956e4 8b6d78e 61956e4 8b6d78e 61956e4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016736401673640166,
"grad_norm": 7.772482099050406,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": -2.807276487350464,
"logits/rejected": -2.7759768962860107,
"logps/chosen": -315.42626953125,
"logps/rejected": -227.5915985107422,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.16736401673640167,
"grad_norm": 7.4630481490893485,
"learning_rate": 4.930057285201027e-07,
"logits/chosen": -2.7555835247039795,
"logits/rejected": -2.746177911758423,
"logps/chosen": -271.9296569824219,
"logps/rejected": -260.67138671875,
"loss": 0.6914,
"rewards/accuracies": 0.5347222089767456,
"rewards/chosen": 0.0054025910794734955,
"rewards/margins": 0.0035073086619377136,
"rewards/rejected": 0.0018952824175357819,
"step": 10
},
{
"epoch": 0.33472803347280333,
"grad_norm": 6.708300371266294,
"learning_rate": 4.187457503795526e-07,
"logits/chosen": -2.787097930908203,
"logits/rejected": -2.7682840824127197,
"logps/chosen": -261.45843505859375,
"logps/rejected": -250.8958740234375,
"loss": 0.6745,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": 0.03223257511854172,
"rewards/margins": 0.040766604244709015,
"rewards/rejected": -0.008534022606909275,
"step": 20
},
{
"epoch": 0.502092050209205,
"grad_norm": 7.409956341692702,
"learning_rate": 2.8691164100062034e-07,
"logits/chosen": -2.7880468368530273,
"logits/rejected": -2.7689902782440186,
"logps/chosen": -293.8247985839844,
"logps/rejected": -254.217529296875,
"loss": 0.6487,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -0.023687291890382767,
"rewards/margins": 0.12690749764442444,
"rewards/rejected": -0.1505947858095169,
"step": 30
},
{
"epoch": 0.6694560669456067,
"grad_norm": 8.167332409118467,
"learning_rate": 1.4248369943086995e-07,
"logits/chosen": -2.760864734649658,
"logits/rejected": -2.7383790016174316,
"logps/chosen": -264.1250915527344,
"logps/rejected": -255.5350799560547,
"loss": 0.6354,
"rewards/accuracies": 0.684374988079071,
"rewards/chosen": -0.11205615103244781,
"rewards/margins": 0.16018818318843842,
"rewards/rejected": -0.27224433422088623,
"step": 40
},
{
"epoch": 0.8368200836820083,
"grad_norm": 8.545155858110496,
"learning_rate": 3.473909705816111e-08,
"logits/chosen": -2.770779848098755,
"logits/rejected": -2.750847339630127,
"logps/chosen": -279.1131286621094,
"logps/rejected": -287.27044677734375,
"loss": 0.6202,
"rewards/accuracies": 0.628125011920929,
"rewards/chosen": -0.22104480862617493,
"rewards/margins": 0.13653476536273956,
"rewards/rejected": -0.3575795590877533,
"step": 50
},
{
"epoch": 0.9874476987447699,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.6490178754774191,
"train_runtime": 1630.8514,
"train_samples_per_second": 9.371,
"train_steps_per_second": 0.036
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}
|