|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.971563981042654, |
|
"eval_steps": 100, |
|
"global_step": 104, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.018957345971563982, |
|
"grad_norm": 134.93724401851352, |
|
"learning_rate": 4.545454545454545e-08, |
|
"logits/chosen": 117.67350769042969, |
|
"logits/rejected": 126.90988159179688, |
|
"logps/chosen": -336.5020751953125, |
|
"logps/rejected": -438.0943298339844, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1895734597156398, |
|
"grad_norm": 136.34386152980758, |
|
"learning_rate": 4.545454545454545e-07, |
|
"logits/chosen": 134.7221221923828, |
|
"logits/rejected": 138.1984100341797, |
|
"logps/chosen": -395.924560546875, |
|
"logps/rejected": -439.0712890625, |
|
"loss": 0.7055, |
|
"rewards/accuracies": 0.4097222089767456, |
|
"rewards/chosen": -0.003234411356970668, |
|
"rewards/margins": -0.03947298228740692, |
|
"rewards/rejected": 0.036238569766283035, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3791469194312796, |
|
"grad_norm": 135.65125923828373, |
|
"learning_rate": 4.885348141000122e-07, |
|
"logits/chosen": 121.62422180175781, |
|
"logits/rejected": 125.15272521972656, |
|
"logps/chosen": -369.32891845703125, |
|
"logps/rejected": -422.75689697265625, |
|
"loss": 0.6228, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": 0.170469269156456, |
|
"rewards/margins": 0.30520111322402954, |
|
"rewards/rejected": -0.13473184406757355, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5687203791469194, |
|
"grad_norm": 114.07857539812969, |
|
"learning_rate": 4.5025027361734613e-07, |
|
"logits/chosen": 141.53656005859375, |
|
"logits/rejected": 135.02110290527344, |
|
"logps/chosen": -418.7715759277344, |
|
"logps/rejected": -464.8955078125, |
|
"loss": 0.56, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -1.2944904565811157, |
|
"rewards/margins": 0.8948480486869812, |
|
"rewards/rejected": -2.189338445663452, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.7582938388625592, |
|
"grad_norm": 103.49873893671008, |
|
"learning_rate": 3.893311157806091e-07, |
|
"logits/chosen": 123.42630767822266, |
|
"logits/rejected": 112.1140365600586, |
|
"logps/chosen": -401.87945556640625, |
|
"logps/rejected": -429.3699645996094, |
|
"loss": 0.5472, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -2.4228084087371826, |
|
"rewards/margins": 1.1271207332611084, |
|
"rewards/rejected": -3.54992938041687, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.9478672985781991, |
|
"grad_norm": 120.25232045055446, |
|
"learning_rate": 3.126631330646801e-07, |
|
"logits/chosen": 138.51483154296875, |
|
"logits/rejected": 142.50746154785156, |
|
"logps/chosen": -464.8746643066406, |
|
"logps/rejected": -547.7520141601562, |
|
"loss": 0.5034, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -2.2957112789154053, |
|
"rewards/margins": 1.2795140743255615, |
|
"rewards/rejected": -3.575225353240967, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.1374407582938388, |
|
"grad_norm": 60.01877502993502, |
|
"learning_rate": 2.2891223348923882e-07, |
|
"logits/chosen": 131.36839294433594, |
|
"logits/rejected": 134.95950317382812, |
|
"logps/chosen": -444.547607421875, |
|
"logps/rejected": -531.6339721679688, |
|
"loss": 0.3025, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -2.315918207168579, |
|
"rewards/margins": 2.439805269241333, |
|
"rewards/rejected": -4.755723476409912, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.3270142180094786, |
|
"grad_norm": 47.79216068377628, |
|
"learning_rate": 1.4754491880085317e-07, |
|
"logits/chosen": 124.45259094238281, |
|
"logits/rejected": 126.58074951171875, |
|
"logps/chosen": -414.2548828125, |
|
"logps/rejected": -518.2446899414062, |
|
"loss": 0.1949, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": -2.3579883575439453, |
|
"rewards/margins": 2.8935599327087402, |
|
"rewards/rejected": -5.251548767089844, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.5165876777251186, |
|
"grad_norm": 55.81661982856631, |
|
"learning_rate": 7.775827023107834e-08, |
|
"logits/chosen": 110.53846740722656, |
|
"logits/rejected": 127.3997573852539, |
|
"logps/chosen": -416.94775390625, |
|
"logps/rejected": -536.0177001953125, |
|
"loss": 0.1738, |
|
"rewards/accuracies": 0.9312499761581421, |
|
"rewards/chosen": -3.1685705184936523, |
|
"rewards/margins": 2.967393159866333, |
|
"rewards/rejected": -6.135963439941406, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.7061611374407581, |
|
"grad_norm": 46.35415773419707, |
|
"learning_rate": 2.7440387297912122e-08, |
|
"logits/chosen": 108.8740234375, |
|
"logits/rejected": 121.63133239746094, |
|
"logps/chosen": -452.612060546875, |
|
"logps/rejected": -570.0894775390625, |
|
"loss": 0.1585, |
|
"rewards/accuracies": 0.96875, |
|
"rewards/chosen": -3.32800030708313, |
|
"rewards/margins": 3.267381191253662, |
|
"rewards/rejected": -6.595381259918213, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.8957345971563981, |
|
"grad_norm": 43.55253667946353, |
|
"learning_rate": 2.27878296044029e-09, |
|
"logits/chosen": 115.157958984375, |
|
"logits/rejected": 114.7303695678711, |
|
"logps/chosen": -443.5245056152344, |
|
"logps/rejected": -538.7801513671875, |
|
"loss": 0.1534, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": -3.0830078125, |
|
"rewards/margins": 3.0285675525665283, |
|
"rewards/rejected": -6.111575126647949, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.8957345971563981, |
|
"eval_logits/chosen": 94.33853912353516, |
|
"eval_logits/rejected": 88.46772003173828, |
|
"eval_logps/chosen": -442.3636779785156, |
|
"eval_logps/rejected": -472.6589660644531, |
|
"eval_loss": 0.4630958139896393, |
|
"eval_rewards/accuracies": 0.7083333134651184, |
|
"eval_rewards/chosen": -3.9814913272857666, |
|
"eval_rewards/margins": 1.5538396835327148, |
|
"eval_rewards/rejected": -5.5353312492370605, |
|
"eval_runtime": 21.3945, |
|
"eval_samples_per_second": 35.056, |
|
"eval_steps_per_second": 1.122, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.971563981042654, |
|
"step": 104, |
|
"total_flos": 0.0, |
|
"train_loss": 0.38818228932527393, |
|
"train_runtime": 1555.646, |
|
"train_samples_per_second": 8.678, |
|
"train_steps_per_second": 0.067 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 104, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|