|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9984, |
|
"eval_steps": 500, |
|
"global_step": 156, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 63.1178072888425, |
|
"learning_rate": 3.125e-08, |
|
"logits/chosen": -1.482092022895813, |
|
"logits/rejected": 0.7751112580299377, |
|
"logps/chosen": -254.50701904296875, |
|
"logps/rejected": -67.8974609375, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 21.62022121482692, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -1.8638765811920166, |
|
"logits/rejected": 0.6638774275779724, |
|
"logps/chosen": -329.24737548828125, |
|
"logps/rejected": -65.78995513916016, |
|
"loss": 0.6723, |
|
"rewards/accuracies": 0.7256944179534912, |
|
"rewards/chosen": 0.039733994752168655, |
|
"rewards/margins": 0.0414588563144207, |
|
"rewards/rejected": -0.0017248644726350904, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 9.802449200015984, |
|
"learning_rate": 4.989935734988097e-07, |
|
"logits/chosen": -2.5045132637023926, |
|
"logits/rejected": -0.7919284701347351, |
|
"logps/chosen": -281.2373962402344, |
|
"logps/rejected": -69.94824981689453, |
|
"loss": 0.5442, |
|
"rewards/accuracies": 0.984375, |
|
"rewards/chosen": 0.3721862733364105, |
|
"rewards/margins": 0.40567564964294434, |
|
"rewards/rejected": -0.03348935395479202, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 6.344038188899024, |
|
"learning_rate": 4.877641290737883e-07, |
|
"logits/chosen": -2.8063747882843018, |
|
"logits/rejected": -2.0033249855041504, |
|
"logps/chosen": -273.3968505859375, |
|
"logps/rejected": -83.54493713378906, |
|
"loss": 0.43, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.5773380398750305, |
|
"rewards/margins": 0.7373520135879517, |
|
"rewards/rejected": -0.16001388430595398, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 4.802832110120652, |
|
"learning_rate": 4.646121984004665e-07, |
|
"logits/chosen": -2.8680758476257324, |
|
"logits/rejected": -2.3258016109466553, |
|
"logps/chosen": -263.2470397949219, |
|
"logps/rejected": -92.8746109008789, |
|
"loss": 0.3813, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.5783931612968445, |
|
"rewards/margins": 0.836522102355957, |
|
"rewards/rejected": -0.25812894105911255, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 3.9555813235365616, |
|
"learning_rate": 4.3069871595684787e-07, |
|
"logits/chosen": -2.8306097984313965, |
|
"logits/rejected": -2.3546886444091797, |
|
"logps/chosen": -258.2146301269531, |
|
"logps/rejected": -98.2398452758789, |
|
"loss": 0.3589, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.6665330529212952, |
|
"rewards/margins": 0.9788433313369751, |
|
"rewards/rejected": -0.31231021881103516, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 4.001436828366304, |
|
"learning_rate": 3.877242453630256e-07, |
|
"logits/chosen": -2.819143772125244, |
|
"logits/rejected": -2.3954625129699707, |
|
"logps/chosen": -278.05682373046875, |
|
"logps/rejected": -101.1539077758789, |
|
"loss": 0.3482, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.6159041523933411, |
|
"rewards/margins": 0.9530438184738159, |
|
"rewards/rejected": -0.33713972568511963, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 3.7138660870592304, |
|
"learning_rate": 3.378437060203357e-07, |
|
"logits/chosen": -2.806363344192505, |
|
"logits/rejected": -2.4681384563446045, |
|
"logps/chosen": -264.37713623046875, |
|
"logps/rejected": -102.52363586425781, |
|
"loss": 0.3343, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.6885555386543274, |
|
"rewards/margins": 1.0429760217666626, |
|
"rewards/rejected": -0.3544204831123352, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 3.603973508900059, |
|
"learning_rate": 2.8355831645441387e-07, |
|
"logits/chosen": -2.8176445960998535, |
|
"logits/rejected": -2.5400137901306152, |
|
"logps/chosen": -258.44287109375, |
|
"logps/rejected": -103.3671875, |
|
"loss": 0.329, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.7031112909317017, |
|
"rewards/margins": 1.0713307857513428, |
|
"rewards/rejected": -0.3682195544242859, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 3.227257986644031, |
|
"learning_rate": 2.2759017277414164e-07, |
|
"logits/chosen": -2.844313859939575, |
|
"logits/rejected": -2.568953514099121, |
|
"logps/chosen": -261.044189453125, |
|
"logps/rejected": -103.6786880493164, |
|
"loss": 0.3265, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.6580973863601685, |
|
"rewards/margins": 1.024906873703003, |
|
"rewards/rejected": -0.36680951714515686, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 3.783887447902626, |
|
"learning_rate": 1.7274575140626315e-07, |
|
"logits/chosen": -2.835691452026367, |
|
"logits/rejected": -2.5570685863494873, |
|
"logps/chosen": -263.1366271972656, |
|
"logps/rejected": -104.17109680175781, |
|
"loss": 0.3324, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.716522216796875, |
|
"rewards/margins": 1.0835347175598145, |
|
"rewards/rejected": -0.36701256036758423, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 3.730463342852827, |
|
"learning_rate": 1.2177518064852348e-07, |
|
"logits/chosen": -2.8178272247314453, |
|
"logits/rejected": -2.5992181301116943, |
|
"logps/chosen": -265.06048583984375, |
|
"logps/rejected": -104.28125, |
|
"loss": 0.33, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.6356825232505798, |
|
"rewards/margins": 1.0085790157318115, |
|
"rewards/rejected": -0.3728964924812317, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 3.3151932122118555, |
|
"learning_rate": 7.723433775328384e-08, |
|
"logits/chosen": -2.8309969902038574, |
|
"logits/rejected": -2.600473403930664, |
|
"logps/chosen": -269.7681884765625, |
|
"logps/rejected": -104.23124694824219, |
|
"loss": 0.327, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.6922739148139954, |
|
"rewards/margins": 1.0601446628570557, |
|
"rewards/rejected": -0.36787083745002747, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 3.337566282002191, |
|
"learning_rate": 4.1356686569674335e-08, |
|
"logits/chosen": -2.793952465057373, |
|
"logits/rejected": -2.5505855083465576, |
|
"logps/chosen": -240.2477264404297, |
|
"logps/rejected": -104.47578430175781, |
|
"loss": 0.3267, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.6651207208633423, |
|
"rewards/margins": 1.041259527206421, |
|
"rewards/rejected": -0.37613874673843384, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 3.379211511743711, |
|
"learning_rate": 1.5941282340065697e-08, |
|
"logits/chosen": -2.860114097595215, |
|
"logits/rejected": -2.6306686401367188, |
|
"logps/chosen": -263.01873779296875, |
|
"logps/rejected": -104.28123474121094, |
|
"loss": 0.324, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.6528992652893066, |
|
"rewards/margins": 1.0218257904052734, |
|
"rewards/rejected": -0.3689264953136444, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 3.2760679534529755, |
|
"learning_rate": 2.2625595580163247e-09, |
|
"logits/chosen": -2.7721505165100098, |
|
"logits/rejected": -2.5658388137817383, |
|
"logps/chosen": -233.78543090820312, |
|
"logps/rejected": -104.24223327636719, |
|
"loss": 0.3254, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.6700480580329895, |
|
"rewards/margins": 1.0418356657028198, |
|
"rewards/rejected": -0.3717876970767975, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 156, |
|
"total_flos": 0.0, |
|
"train_loss": 0.37798528717114377, |
|
"train_runtime": 17606.5611, |
|
"train_samples_per_second": 1.136, |
|
"train_steps_per_second": 0.009 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 156, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 200, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|