|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9874476987447699, |
|
"eval_steps": 500, |
|
"global_step": 59, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 8.333333333333333e-08, |
|
"logits/chosen": -2.828038215637207, |
|
"logits/rejected": -2.728235960006714, |
|
"logps/chosen": -198.4669189453125, |
|
"logps/pi_response": -116.54592895507812, |
|
"logps/ref_response": -116.54592895507812, |
|
"logps/rejected": -205.3916015625, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.930057285201027e-07, |
|
"logits/chosen": -2.744847297668457, |
|
"logits/rejected": -2.7069268226623535, |
|
"logps/chosen": -220.64466857910156, |
|
"logps/pi_response": -125.66951751708984, |
|
"logps/ref_response": -125.32001495361328, |
|
"logps/rejected": -271.061279296875, |
|
"loss": 0.685, |
|
"rewards/accuracies": 0.5833333134651184, |
|
"rewards/chosen": -0.02577850967645645, |
|
"rewards/margins": 0.024903353303670883, |
|
"rewards/rejected": -0.050681862980127335, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.187457503795526e-07, |
|
"logits/chosen": -2.7635605335235596, |
|
"logits/rejected": -2.719918727874756, |
|
"logps/chosen": -259.6158142089844, |
|
"logps/pi_response": -120.7113037109375, |
|
"logps/ref_response": -122.2673110961914, |
|
"logps/rejected": -314.64898681640625, |
|
"loss": 0.6284, |
|
"rewards/accuracies": 0.640625, |
|
"rewards/chosen": -0.23702558875083923, |
|
"rewards/margins": 0.28365251421928406, |
|
"rewards/rejected": -0.5206781625747681, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.8691164100062034e-07, |
|
"logits/chosen": -2.7415413856506348, |
|
"logits/rejected": -2.70352840423584, |
|
"logps/chosen": -288.61761474609375, |
|
"logps/pi_response": -148.96580505371094, |
|
"logps/ref_response": -124.40946197509766, |
|
"logps/rejected": -359.23297119140625, |
|
"loss": 0.5876, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.5176891684532166, |
|
"rewards/margins": 0.4622374176979065, |
|
"rewards/rejected": -0.9799267053604126, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.4248369943086995e-07, |
|
"logits/chosen": -2.661461353302002, |
|
"logits/rejected": -2.625527858734131, |
|
"logps/chosen": -286.5964660644531, |
|
"logps/pi_response": -156.09762573242188, |
|
"logps/ref_response": -118.28387451171875, |
|
"logps/rejected": -361.10809326171875, |
|
"loss": 0.5654, |
|
"rewards/accuracies": 0.653124988079071, |
|
"rewards/chosen": -0.5846043229103088, |
|
"rewards/margins": 0.482909619808197, |
|
"rewards/rejected": -1.0675138235092163, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.473909705816111e-08, |
|
"logits/chosen": -2.6372907161712646, |
|
"logits/rejected": -2.5853748321533203, |
|
"logps/chosen": -291.3328857421875, |
|
"logps/pi_response": -168.8394317626953, |
|
"logps/ref_response": -121.82298278808594, |
|
"logps/rejected": -364.73297119140625, |
|
"loss": 0.559, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.6193948984146118, |
|
"rewards/margins": 0.5188055038452148, |
|
"rewards/rejected": -1.1382004022598267, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 59, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5958063157938295, |
|
"train_runtime": 3589.2721, |
|
"train_samples_per_second": 4.258, |
|
"train_steps_per_second": 0.016 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 59, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|