|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9874476987447699, |
|
"eval_steps": 500, |
|
"global_step": 59, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.016736401673640166, |
|
"grad_norm": 7.775175087490474, |
|
"learning_rate": 8.333333333333333e-08, |
|
"logits/chosen": -2.807276487350464, |
|
"logits/rejected": -2.7759768962860107, |
|
"logps/chosen": -315.42626953125, |
|
"logps/rejected": -227.5915985107422, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.16736401673640167, |
|
"grad_norm": 7.448045779799194, |
|
"learning_rate": 4.930057285201027e-07, |
|
"logits/chosen": -2.7555572986602783, |
|
"logits/rejected": -2.746171474456787, |
|
"logps/chosen": -271.9293212890625, |
|
"logps/rejected": -260.67010498046875, |
|
"loss": 0.6915, |
|
"rewards/accuracies": 0.53125, |
|
"rewards/chosen": 0.005406347569078207, |
|
"rewards/margins": 0.0034980433993041515, |
|
"rewards/rejected": 0.0019083041697740555, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.33472803347280333, |
|
"grad_norm": 6.734236245718351, |
|
"learning_rate": 4.187457503795526e-07, |
|
"logits/chosen": -2.7869458198547363, |
|
"logits/rejected": -2.768073320388794, |
|
"logps/chosen": -261.4452209472656, |
|
"logps/rejected": -250.89541625976562, |
|
"loss": 0.6745, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": 0.03236500173807144, |
|
"rewards/margins": 0.04089399054646492, |
|
"rewards/rejected": -0.008528990671038628, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.502092050209205, |
|
"grad_norm": 7.437539615124712, |
|
"learning_rate": 2.8691164100062034e-07, |
|
"logits/chosen": -2.7878143787384033, |
|
"logits/rejected": -2.7688212394714355, |
|
"logps/chosen": -293.8128662109375, |
|
"logps/rejected": -254.24874877929688, |
|
"loss": 0.6485, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.02356739528477192, |
|
"rewards/margins": 0.12733949720859528, |
|
"rewards/rejected": -0.15090689063072205, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.6694560669456067, |
|
"grad_norm": 8.137783776888698, |
|
"learning_rate": 1.4248369943086995e-07, |
|
"logits/chosen": -2.760474681854248, |
|
"logits/rejected": -2.7379024028778076, |
|
"logps/chosen": -264.1443176269531, |
|
"logps/rejected": -255.5771026611328, |
|
"loss": 0.6353, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": -0.11224844306707382, |
|
"rewards/margins": 0.16041621565818787, |
|
"rewards/rejected": -0.2726646959781647, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.8368200836820083, |
|
"grad_norm": 8.560922020463133, |
|
"learning_rate": 3.473909705816111e-08, |
|
"logits/chosen": -2.7703192234039307, |
|
"logits/rejected": -2.7502920627593994, |
|
"logps/chosen": -279.1363525390625, |
|
"logps/rejected": -287.27764892578125, |
|
"loss": 0.6202, |
|
"rewards/accuracies": 0.628125011920929, |
|
"rewards/chosen": -0.22127707302570343, |
|
"rewards/margins": 0.13637453317642212, |
|
"rewards/rejected": -0.35765162110328674, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.9874476987447699, |
|
"step": 59, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6490362135030455, |
|
"train_runtime": 1628.642, |
|
"train_samples_per_second": 9.384, |
|
"train_steps_per_second": 0.036 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 59, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|