phi3m0128-cds-0.8-kendall-onof-decrease-corr-max-2-simpo-max1500-default
/
checkpoint-200
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.17189514396218306, | |
"eval_steps": 50, | |
"global_step": 200, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.008594757198109154, | |
"grad_norm": 0.06708361208438873, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": 14.524938583374023, | |
"logits/rejected": 14.82593822479248, | |
"logps/chosen": -0.31433865427970886, | |
"logps/rejected": -0.32406437397003174, | |
"loss": 0.9442, | |
"rewards/accuracies": 0.4124999940395355, | |
"rewards/chosen": -0.4715079367160797, | |
"rewards/margins": 0.014588532969355583, | |
"rewards/rejected": -0.48609647154808044, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.017189514396218308, | |
"grad_norm": 0.056814808398485184, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": 14.309213638305664, | |
"logits/rejected": 14.978128433227539, | |
"logps/chosen": -0.31283506751060486, | |
"logps/rejected": -0.3911947011947632, | |
"loss": 0.928, | |
"rewards/accuracies": 0.550000011920929, | |
"rewards/chosen": -0.46925264596939087, | |
"rewards/margins": 0.1175394207239151, | |
"rewards/rejected": -0.5867919921875, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.02578427159432746, | |
"grad_norm": 0.061199307441711426, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": 14.68384075164795, | |
"logits/rejected": 15.338122367858887, | |
"logps/chosen": -0.3007296621799469, | |
"logps/rejected": -0.3204456865787506, | |
"loss": 0.9439, | |
"rewards/accuracies": 0.4375, | |
"rewards/chosen": -0.45109447836875916, | |
"rewards/margins": 0.029573997482657433, | |
"rewards/rejected": -0.48066848516464233, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.034379028792436615, | |
"grad_norm": 0.08423774689435959, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": 14.39265251159668, | |
"logits/rejected": 15.059102058410645, | |
"logps/chosen": -0.28216058015823364, | |
"logps/rejected": -0.33495840430259705, | |
"loss": 0.9184, | |
"rewards/accuracies": 0.4124999940395355, | |
"rewards/chosen": -0.42324090003967285, | |
"rewards/margins": 0.07919676601886749, | |
"rewards/rejected": -0.5024376511573792, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.042973785990545764, | |
"grad_norm": 0.06052614375948906, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": 14.383735656738281, | |
"logits/rejected": 15.029413223266602, | |
"logps/chosen": -0.27970507740974426, | |
"logps/rejected": -0.33213528990745544, | |
"loss": 0.9317, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.4195576310157776, | |
"rewards/margins": 0.07864536345005035, | |
"rewards/rejected": -0.49820294976234436, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.042973785990545764, | |
"eval_logits/chosen": 14.424538612365723, | |
"eval_logits/rejected": 15.006633758544922, | |
"eval_logps/chosen": -0.2923925220966339, | |
"eval_logps/rejected": -0.3531996011734009, | |
"eval_loss": 0.9324354529380798, | |
"eval_rewards/accuracies": 0.5052631497383118, | |
"eval_rewards/chosen": -0.43858882784843445, | |
"eval_rewards/margins": 0.09121060371398926, | |
"eval_rewards/rejected": -0.5297994017601013, | |
"eval_runtime": 26.3759, | |
"eval_samples_per_second": 28.549, | |
"eval_steps_per_second": 3.602, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.05156854318865492, | |
"grad_norm": 0.06899414211511612, | |
"learning_rate": 4.980286753286196e-06, | |
"logits/chosen": 14.888933181762695, | |
"logits/rejected": 15.33955192565918, | |
"logps/chosen": -0.2886829972267151, | |
"logps/rejected": -0.34016504883766174, | |
"loss": 0.9323, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -0.43302449584007263, | |
"rewards/margins": 0.07722309231758118, | |
"rewards/rejected": -0.5102475881576538, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.060163300386764075, | |
"grad_norm": 0.06679105013608932, | |
"learning_rate": 4.973180832407471e-06, | |
"logits/chosen": 14.721624374389648, | |
"logits/rejected": 15.614666938781738, | |
"logps/chosen": -0.29435139894485474, | |
"logps/rejected": -0.38699784874916077, | |
"loss": 0.9172, | |
"rewards/accuracies": 0.48750001192092896, | |
"rewards/chosen": -0.4415270686149597, | |
"rewards/margins": 0.13896968960762024, | |
"rewards/rejected": -0.5804967880249023, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.06875805758487323, | |
"grad_norm": 0.07169903814792633, | |
"learning_rate": 4.964990092676263e-06, | |
"logits/chosen": 13.848808288574219, | |
"logits/rejected": 14.609800338745117, | |
"logps/chosen": -0.26156893372535706, | |
"logps/rejected": -0.33030644059181213, | |
"loss": 0.9245, | |
"rewards/accuracies": 0.48750001192092896, | |
"rewards/chosen": -0.3923533856868744, | |
"rewards/margins": 0.10310628265142441, | |
"rewards/rejected": -0.495459645986557, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.07735281478298238, | |
"grad_norm": 0.06593246012926102, | |
"learning_rate": 4.9557181268217225e-06, | |
"logits/chosen": 14.603567123413086, | |
"logits/rejected": 14.994171142578125, | |
"logps/chosen": -0.3191321790218353, | |
"logps/rejected": -0.3477073311805725, | |
"loss": 0.9359, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -0.4786983132362366, | |
"rewards/margins": 0.042862698435783386, | |
"rewards/rejected": -0.5215609669685364, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.08594757198109153, | |
"grad_norm": 0.0718066617846489, | |
"learning_rate": 4.9453690018345144e-06, | |
"logits/chosen": 13.928094863891602, | |
"logits/rejected": 14.792709350585938, | |
"logps/chosen": -0.24115696549415588, | |
"logps/rejected": -0.3537539839744568, | |
"loss": 0.9066, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.3617354929447174, | |
"rewards/margins": 0.16889554262161255, | |
"rewards/rejected": -0.5306310653686523, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.08594757198109153, | |
"eval_logits/chosen": 14.40036392211914, | |
"eval_logits/rejected": 14.97786808013916, | |
"eval_logps/chosen": -0.2777771055698395, | |
"eval_logps/rejected": -0.3516874611377716, | |
"eval_loss": 0.9236211180686951, | |
"eval_rewards/accuracies": 0.5052631497383118, | |
"eval_rewards/chosen": -0.4166657328605652, | |
"eval_rewards/margins": 0.11086549609899521, | |
"eval_rewards/rejected": -0.5275312066078186, | |
"eval_runtime": 25.8056, | |
"eval_samples_per_second": 29.18, | |
"eval_steps_per_second": 3.681, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.09454232917920069, | |
"grad_norm": 0.06681054830551147, | |
"learning_rate": 4.933947257182901e-06, | |
"logits/chosen": 14.76116943359375, | |
"logits/rejected": 15.001077651977539, | |
"logps/chosen": -0.297056645154953, | |
"logps/rejected": -0.3221590518951416, | |
"loss": 0.929, | |
"rewards/accuracies": 0.44999998807907104, | |
"rewards/chosen": -0.4455850124359131, | |
"rewards/margins": 0.03765357658267021, | |
"rewards/rejected": -0.4832385182380676, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.10313708637730984, | |
"grad_norm": 0.10024584829807281, | |
"learning_rate": 4.921457902821578e-06, | |
"logits/chosen": 14.405306816101074, | |
"logits/rejected": 15.084524154663086, | |
"logps/chosen": -0.2726767361164093, | |
"logps/rejected": -0.3543504774570465, | |
"loss": 0.9299, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.40901508927345276, | |
"rewards/margins": 0.12251058965921402, | |
"rewards/rejected": -0.531525731086731, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.11173184357541899, | |
"grad_norm": 0.08629737794399261, | |
"learning_rate": 4.907906416994146e-06, | |
"logits/chosen": 14.073992729187012, | |
"logits/rejected": 14.882128715515137, | |
"logps/chosen": -0.2827032506465912, | |
"logps/rejected": -0.369393527507782, | |
"loss": 0.9109, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.42405492067337036, | |
"rewards/margins": 0.13003548979759216, | |
"rewards/rejected": -0.5540903806686401, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.12032660077352815, | |
"grad_norm": 0.07973086833953857, | |
"learning_rate": 4.893298743830168e-06, | |
"logits/chosen": 13.96656322479248, | |
"logits/rejected": 14.639463424682617, | |
"logps/chosen": -0.28426361083984375, | |
"logps/rejected": -0.3899250030517578, | |
"loss": 0.9138, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.4263954162597656, | |
"rewards/margins": 0.1584920585155487, | |
"rewards/rejected": -0.5848874449729919, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.1289213579716373, | |
"grad_norm": 0.08767445385456085, | |
"learning_rate": 4.8776412907378845e-06, | |
"logits/chosen": 13.705177307128906, | |
"logits/rejected": 14.19865608215332, | |
"logps/chosen": -0.26735779643058777, | |
"logps/rejected": -0.34726911783218384, | |
"loss": 0.9157, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.40103667974472046, | |
"rewards/margins": 0.1198669821023941, | |
"rewards/rejected": -0.5209037065505981, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.1289213579716373, | |
"eval_logits/chosen": 13.20260238647461, | |
"eval_logits/rejected": 13.959339141845703, | |
"eval_logps/chosen": -0.27623170614242554, | |
"eval_logps/rejected": -0.3724917769432068, | |
"eval_loss": 0.909102737903595, | |
"eval_rewards/accuracies": 0.557894766330719, | |
"eval_rewards/chosen": -0.4143475592136383, | |
"eval_rewards/margins": 0.14439010620117188, | |
"eval_rewards/rejected": -0.5587376356124878, | |
"eval_runtime": 25.7839, | |
"eval_samples_per_second": 29.204, | |
"eval_steps_per_second": 3.684, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.13751611516974646, | |
"grad_norm": 0.09749539196491241, | |
"learning_rate": 4.860940925593703e-06, | |
"logits/chosen": 13.301411628723145, | |
"logits/rejected": 14.054819107055664, | |
"logps/chosen": -0.2808162569999695, | |
"logps/rejected": -0.39500662684440613, | |
"loss": 0.9, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.4212244153022766, | |
"rewards/margins": 0.17128555476665497, | |
"rewards/rejected": -0.592509925365448, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.1461108723678556, | |
"grad_norm": 0.14965052902698517, | |
"learning_rate": 4.84320497372973e-06, | |
"logits/chosen": 12.261284828186035, | |
"logits/rejected": 13.0617036819458, | |
"logps/chosen": -0.29266461730003357, | |
"logps/rejected": -0.4265298843383789, | |
"loss": 0.896, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.43899694085121155, | |
"rewards/margins": 0.20079784095287323, | |
"rewards/rejected": -0.6397948265075684, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.15470562956596476, | |
"grad_norm": 0.13044072687625885, | |
"learning_rate": 4.824441214720629e-06, | |
"logits/chosen": 11.509119033813477, | |
"logits/rejected": 12.31033706665039, | |
"logps/chosen": -0.27384257316589355, | |
"logps/rejected": -0.3920982778072357, | |
"loss": 0.8911, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.4107638895511627, | |
"rewards/margins": 0.17738358676433563, | |
"rewards/rejected": -0.5881474018096924, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.1633003867640739, | |
"grad_norm": 0.16182811558246613, | |
"learning_rate": 4.804657878971252e-06, | |
"logits/chosen": 10.68933391571045, | |
"logits/rejected": 11.632065773010254, | |
"logps/chosen": -0.292975515127182, | |
"logps/rejected": -0.42257896065711975, | |
"loss": 0.9002, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.4394632875919342, | |
"rewards/margins": 0.19440510869026184, | |
"rewards/rejected": -0.633868396282196, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.17189514396218306, | |
"grad_norm": 0.181160107254982, | |
"learning_rate": 4.783863644106502e-06, | |
"logits/chosen": 10.593437194824219, | |
"logits/rejected": 11.435877799987793, | |
"logps/chosen": -0.32495418190956116, | |
"logps/rejected": -0.4480825364589691, | |
"loss": 0.8773, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.48743128776550293, | |
"rewards/margins": 0.18469250202178955, | |
"rewards/rejected": -0.6721237897872925, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.17189514396218306, | |
"eval_logits/chosen": 9.299257278442383, | |
"eval_logits/rejected": 10.055145263671875, | |
"eval_logps/chosen": -0.31059205532073975, | |
"eval_logps/rejected": -0.47102925181388855, | |
"eval_loss": 0.8721462488174438, | |
"eval_rewards/accuracies": 0.6105263233184814, | |
"eval_rewards/chosen": -0.4658880829811096, | |
"eval_rewards/margins": 0.24065588414669037, | |
"eval_rewards/rejected": -0.7065439224243164, | |
"eval_runtime": 25.78, | |
"eval_samples_per_second": 29.209, | |
"eval_steps_per_second": 3.685, | |
"step": 200 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 4.569984252510208e+17, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |