|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9992429977289932, |
|
"eval_steps": 500, |
|
"global_step": 165, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.941176470588235e-08, |
|
"logits/chosen": -0.36791229248046875, |
|
"logits/rejected": 1.366540551185608, |
|
"logps/chosen": -143.6042022705078, |
|
"logps/rejected": -273.9673156738281, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 2.941176470588235e-07, |
|
"logits/chosen": -0.5245670676231384, |
|
"logits/rejected": 1.248136281967163, |
|
"logps/chosen": -224.94821166992188, |
|
"logps/rejected": -359.60687255859375, |
|
"loss": 0.6689, |
|
"rewards/accuracies": 0.6597222089767456, |
|
"rewards/chosen": -0.021773571148514748, |
|
"rewards/margins": 0.06528930366039276, |
|
"rewards/rejected": -0.08706288039684296, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.994932636402031e-07, |
|
"logits/chosen": -0.00770964939147234, |
|
"logits/rejected": 1.8876569271087646, |
|
"logps/chosen": -248.1709442138672, |
|
"logps/rejected": -470.3589782714844, |
|
"loss": 0.5867, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.5789338946342468, |
|
"rewards/margins": 0.8933883905410767, |
|
"rewards/rejected": -1.4723222255706787, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.905416503522123e-07, |
|
"logits/chosen": -0.027714919298887253, |
|
"logits/rejected": 1.730603814125061, |
|
"logps/chosen": -320.3919677734375, |
|
"logps/rejected": -555.4063720703125, |
|
"loss": 0.5422, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.9962596893310547, |
|
"rewards/margins": 1.338209867477417, |
|
"rewards/rejected": -2.3344695568084717, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.707922373336523e-07, |
|
"logits/chosen": 0.3444180190563202, |
|
"logits/rejected": 1.8734357357025146, |
|
"logps/chosen": -267.8757019042969, |
|
"logps/rejected": -508.5665588378906, |
|
"loss": 0.4963, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": -0.6485141515731812, |
|
"rewards/margins": 1.0762808322906494, |
|
"rewards/rejected": -1.7247949838638306, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 4.4113156629677313e-07, |
|
"logits/chosen": 0.37747180461883545, |
|
"logits/rejected": 2.4962353706359863, |
|
"logps/chosen": -263.87420654296875, |
|
"logps/rejected": -523.7318115234375, |
|
"loss": 0.4954, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -0.6324308514595032, |
|
"rewards/margins": 1.1557624340057373, |
|
"rewards/rejected": -1.7881933450698853, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.0289109058972283e-07, |
|
"logits/chosen": 0.44958215951919556, |
|
"logits/rejected": 2.4981770515441895, |
|
"logps/chosen": -306.72894287109375, |
|
"logps/rejected": -533.3639526367188, |
|
"loss": 0.4826, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.7705734372138977, |
|
"rewards/margins": 1.0842384099960327, |
|
"rewards/rejected": -1.8548120260238647, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.577874068920446e-07, |
|
"logits/chosen": 0.39437779784202576, |
|
"logits/rejected": 1.8338779211044312, |
|
"logps/chosen": -271.2483215332031, |
|
"logps/rejected": -511.545654296875, |
|
"loss": 0.4825, |
|
"rewards/accuracies": 0.8187500238418579, |
|
"rewards/chosen": -0.6353504657745361, |
|
"rewards/margins": 1.111212134361267, |
|
"rewards/rejected": -1.7465627193450928, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 3.078451980100854e-07, |
|
"logits/chosen": 0.38897716999053955, |
|
"logits/rejected": 2.2546534538269043, |
|
"logps/chosen": -290.3913879394531, |
|
"logps/rejected": -523.7283935546875, |
|
"loss": 0.4871, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": -0.7725954055786133, |
|
"rewards/margins": 1.1881004571914673, |
|
"rewards/rejected": -1.9606958627700806, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 2.553063458334059e-07, |
|
"logits/chosen": 0.24224960803985596, |
|
"logits/rejected": 2.18202543258667, |
|
"logps/chosen": -291.9999694824219, |
|
"logps/rejected": -528.9313354492188, |
|
"loss": 0.4659, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -0.6901915669441223, |
|
"rewards/margins": 1.1535792350769043, |
|
"rewards/rejected": -1.8437706232070923, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 2.0252929432814287e-07, |
|
"logits/chosen": 0.29941219091415405, |
|
"logits/rejected": 2.375978708267212, |
|
"logps/chosen": -254.08950805664062, |
|
"logps/rejected": -499.0948181152344, |
|
"loss": 0.4401, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -0.6152492165565491, |
|
"rewards/margins": 1.0863755941390991, |
|
"rewards/rejected": -1.7016246318817139, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.5188318011445906e-07, |
|
"logits/chosen": 0.29582899808883667, |
|
"logits/rejected": 2.588685989379883, |
|
"logps/chosen": -301.9359436035156, |
|
"logps/rejected": -512.3338012695312, |
|
"loss": 0.4591, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -0.8254504203796387, |
|
"rewards/margins": 1.0966691970825195, |
|
"rewards/rejected": -1.9221197366714478, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.0564148305586295e-07, |
|
"logits/chosen": 0.34316009283065796, |
|
"logits/rejected": 2.3965556621551514, |
|
"logps/chosen": -301.0910339355469, |
|
"logps/rejected": -507.458251953125, |
|
"loss": 0.4652, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.8790184855461121, |
|
"rewards/margins": 0.9550568461418152, |
|
"rewards/rejected": -1.8340752124786377, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 6.587997083462196e-08, |
|
"logits/chosen": 0.7106651067733765, |
|
"logits/rejected": 2.231572389602661, |
|
"logps/chosen": -312.9052734375, |
|
"logps/rejected": -524.6278686523438, |
|
"loss": 0.4696, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.8491412401199341, |
|
"rewards/margins": 1.0410358905792236, |
|
"rewards/rejected": -1.8901771306991577, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 3.438351873250492e-08, |
|
"logits/chosen": 0.49447330832481384, |
|
"logits/rejected": 2.810368537902832, |
|
"logps/chosen": -259.0260925292969, |
|
"logps/rejected": -522.2190551757812, |
|
"loss": 0.4652, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -0.7394121885299683, |
|
"rewards/margins": 1.2198739051818848, |
|
"rewards/rejected": -1.959286093711853, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.256598743236703e-08, |
|
"logits/chosen": 0.6611243486404419, |
|
"logits/rejected": 2.3112754821777344, |
|
"logps/chosen": -326.75994873046875, |
|
"logps/rejected": -526.3076171875, |
|
"loss": 0.4862, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.9679144620895386, |
|
"rewards/margins": 1.0426340103149414, |
|
"rewards/rejected": -2.0105483531951904, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 1.406755487774386e-09, |
|
"logits/chosen": 0.5118433833122253, |
|
"logits/rejected": 2.144733428955078, |
|
"logps/chosen": -298.64678955078125, |
|
"logps/rejected": -506.86346435546875, |
|
"loss": 0.4578, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.8880035281181335, |
|
"rewards/margins": 0.9844219088554382, |
|
"rewards/rejected": -1.8724254369735718, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 165, |
|
"total_flos": 0.0, |
|
"train_loss": 0.49529894770997945, |
|
"train_runtime": 10046.2235, |
|
"train_samples_per_second": 2.104, |
|
"train_steps_per_second": 0.016 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 165, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|