|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 283, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 384.0463357925415, |
|
"epoch": 0.0176678445229682, |
|
"grad_norm": 0.6529819382356065, |
|
"kl": 0.0007791996002197265, |
|
"learning_rate": 3.448275862068966e-06, |
|
"loss": 0.0, |
|
"reward": 0.640290205180645, |
|
"reward_std": 0.43957694210112097, |
|
"rewards/accuracy_reward": 0.14720982844009994, |
|
"rewards/format_reward": 0.4930803783237934, |
|
"step": 5 |
|
}, |
|
{ |
|
"completion_length": 211.52143774032592, |
|
"epoch": 0.0353356890459364, |
|
"grad_norm": 8.069932901761003, |
|
"kl": 0.6061183929443359, |
|
"learning_rate": 6.896551724137932e-06, |
|
"loss": 0.0242, |
|
"reward": 0.9496652163565159, |
|
"reward_std": 0.264773327531293, |
|
"rewards/accuracy_reward": 0.07377232476137578, |
|
"rewards/format_reward": 0.8758928958326578, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 146.1306981086731, |
|
"epoch": 0.053003533568904596, |
|
"grad_norm": 0.43113546878909836, |
|
"kl": 0.077935791015625, |
|
"learning_rate": 1.0344827586206898e-05, |
|
"loss": 0.0031, |
|
"reward": 1.0457589790225028, |
|
"reward_std": 0.20272804964333774, |
|
"rewards/accuracy_reward": 0.08671875351574272, |
|
"rewards/format_reward": 0.9590402156114578, |
|
"step": 15 |
|
}, |
|
{ |
|
"completion_length": 99.81663384437562, |
|
"epoch": 0.0706713780918728, |
|
"grad_norm": 0.4019533843400798, |
|
"kl": 0.19752197265625, |
|
"learning_rate": 1.3793103448275863e-05, |
|
"loss": 0.0079, |
|
"reward": 1.0904018372297286, |
|
"reward_std": 0.21298125991597772, |
|
"rewards/accuracy_reward": 0.12154018429573625, |
|
"rewards/format_reward": 0.968861648440361, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 152.5420826435089, |
|
"epoch": 0.08833922261484099, |
|
"grad_norm": 2409.3058850659404, |
|
"kl": 4.647747802734375, |
|
"learning_rate": 1.7241379310344828e-05, |
|
"loss": 0.187, |
|
"reward": 1.1687500491738319, |
|
"reward_std": 0.28142741243354974, |
|
"rewards/accuracy_reward": 0.20290179601870478, |
|
"rewards/format_reward": 0.9658482521772385, |
|
"step": 25 |
|
}, |
|
{ |
|
"completion_length": 214.08862552642822, |
|
"epoch": 0.10600706713780919, |
|
"grad_norm": 0.2986315582789042, |
|
"kl": 0.159442138671875, |
|
"learning_rate": 1.999923511388017e-05, |
|
"loss": 0.0064, |
|
"reward": 1.1717634417116642, |
|
"reward_std": 0.3338914422318339, |
|
"rewards/accuracy_reward": 0.23303572479635476, |
|
"rewards/format_reward": 0.9387277208268643, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 149.01953802108764, |
|
"epoch": 0.12367491166077739, |
|
"grad_norm": 0.2289227294262673, |
|
"kl": 0.17880859375, |
|
"learning_rate": 1.9972476383747748e-05, |
|
"loss": 0.0072, |
|
"reward": 1.2422991648316384, |
|
"reward_std": 0.2768098399043083, |
|
"rewards/accuracy_reward": 0.26517858244478704, |
|
"rewards/format_reward": 0.9771205797791481, |
|
"step": 35 |
|
}, |
|
{ |
|
"completion_length": 247.47110538482667, |
|
"epoch": 0.1413427561837456, |
|
"grad_norm": 370.97596271434315, |
|
"kl": 5.921478271484375, |
|
"learning_rate": 1.9907590277344582e-05, |
|
"loss": 0.2362, |
|
"reward": 1.2344866633415221, |
|
"reward_std": 0.320804962515831, |
|
"rewards/accuracy_reward": 0.27901786835864184, |
|
"rewards/format_reward": 0.9554687894880771, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 251.62902946472167, |
|
"epoch": 0.15901060070671377, |
|
"grad_norm": 58.23088108276416, |
|
"kl": 1.127880859375, |
|
"learning_rate": 1.9804824871166254e-05, |
|
"loss": 0.045, |
|
"reward": 1.1912946954369545, |
|
"reward_std": 0.31524865701794624, |
|
"rewards/accuracy_reward": 0.2500000115483999, |
|
"rewards/format_reward": 0.9412946835160255, |
|
"step": 45 |
|
}, |
|
{ |
|
"completion_length": 176.58136978149415, |
|
"epoch": 0.17667844522968199, |
|
"grad_norm": 1.2616830529166947, |
|
"kl": 0.3770751953125, |
|
"learning_rate": 1.9664573064143604e-05, |
|
"loss": 0.0151, |
|
"reward": 1.1544643394649028, |
|
"reward_std": 0.35408149342983963, |
|
"rewards/accuracy_reward": 0.24129465334117411, |
|
"rewards/format_reward": 0.9131696835160256, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 122.38192529678345, |
|
"epoch": 0.19434628975265017, |
|
"grad_norm": 0.2806316603874717, |
|
"kl": 0.38321533203125, |
|
"learning_rate": 1.948737107548771e-05, |
|
"loss": 0.0153, |
|
"reward": 1.2000000566244124, |
|
"reward_std": 0.2845297777093947, |
|
"rewards/accuracy_reward": 0.24564733263105154, |
|
"rewards/format_reward": 0.9543527215719223, |
|
"step": 55 |
|
}, |
|
{ |
|
"completion_length": 134.8651843070984, |
|
"epoch": 0.21201413427561838, |
|
"grad_norm": 2.6512614670552606, |
|
"kl": 0.39910888671875, |
|
"learning_rate": 1.9273896394584103e-05, |
|
"loss": 0.016, |
|
"reward": 1.2407366573810577, |
|
"reward_std": 0.2907262988388538, |
|
"rewards/accuracy_reward": 0.2858259055763483, |
|
"rewards/format_reward": 0.9549107551574707, |
|
"step": 60 |
|
}, |
|
{ |
|
"completion_length": 136.180140209198, |
|
"epoch": 0.22968197879858657, |
|
"grad_norm": 1.338921430387086, |
|
"kl": 2.08179931640625, |
|
"learning_rate": 1.9024965190774262e-05, |
|
"loss": 0.0833, |
|
"reward": 1.275223271548748, |
|
"reward_std": 0.26917654536664487, |
|
"rewards/accuracy_reward": 0.3123884086497128, |
|
"rewards/format_reward": 0.9628348648548126, |
|
"step": 65 |
|
}, |
|
{ |
|
"completion_length": 305.0924233436584, |
|
"epoch": 0.24734982332155478, |
|
"grad_norm": 2.4885869901417887, |
|
"kl": 2.4799072265625, |
|
"learning_rate": 1.8741529192927528e-05, |
|
"loss": 0.0992, |
|
"reward": 0.9402902211993933, |
|
"reward_std": 0.42617332013323905, |
|
"rewards/accuracy_reward": 0.2555803691968322, |
|
"rewards/format_reward": 0.6847098521888256, |
|
"step": 70 |
|
}, |
|
{ |
|
"completion_length": 273.31150884628295, |
|
"epoch": 0.26501766784452296, |
|
"grad_norm": 0.42249549223947436, |
|
"kl": 0.40208740234375, |
|
"learning_rate": 1.8424672050733577e-05, |
|
"loss": 0.0161, |
|
"reward": 1.0387277275323867, |
|
"reward_std": 0.46621278002858163, |
|
"rewards/accuracy_reward": 0.2751116196624935, |
|
"rewards/format_reward": 0.7636161059141159, |
|
"step": 75 |
|
}, |
|
{ |
|
"completion_length": 122.03739376068116, |
|
"epoch": 0.2826855123674912, |
|
"grad_norm": 0.5350338358000201, |
|
"kl": 0.6455322265625, |
|
"learning_rate": 1.8075605191627242e-05, |
|
"loss": 0.0258, |
|
"reward": 1.2546875521540641, |
|
"reward_std": 0.26898800805211065, |
|
"rewards/accuracy_reward": 0.2853794780559838, |
|
"rewards/format_reward": 0.9693080767989158, |
|
"step": 80 |
|
}, |
|
{ |
|
"completion_length": 152.50893516540526, |
|
"epoch": 0.3003533568904594, |
|
"grad_norm": 1.6918463013914555, |
|
"kl": 0.884033203125, |
|
"learning_rate": 1.7695663189185703e-05, |
|
"loss": 0.0354, |
|
"reward": 1.260156300663948, |
|
"reward_std": 0.2852775551378727, |
|
"rewards/accuracy_reward": 0.3037946572527289, |
|
"rewards/format_reward": 0.956361646950245, |
|
"step": 85 |
|
}, |
|
{ |
|
"completion_length": 186.635165309906, |
|
"epoch": 0.31802120141342755, |
|
"grad_norm": 3.0673822516554643, |
|
"kl": 1.114306640625, |
|
"learning_rate": 1.7286298660705877e-05, |
|
"loss": 0.0446, |
|
"reward": 1.2143973752856254, |
|
"reward_std": 0.3637195309624076, |
|
"rewards/accuracy_reward": 0.301004477776587, |
|
"rewards/format_reward": 0.9133929021656513, |
|
"step": 90 |
|
}, |
|
{ |
|
"completion_length": 184.8044722557068, |
|
"epoch": 0.33568904593639576, |
|
"grad_norm": 0.8799900343158491, |
|
"kl": 1.20673828125, |
|
"learning_rate": 1.6849076713469914e-05, |
|
"loss": 0.0483, |
|
"reward": 1.1883929029107094, |
|
"reward_std": 0.352879635989666, |
|
"rewards/accuracy_reward": 0.2754464400932193, |
|
"rewards/format_reward": 0.9129464730620385, |
|
"step": 95 |
|
}, |
|
{ |
|
"completion_length": 459.28539962768554, |
|
"epoch": 0.35335689045936397, |
|
"grad_norm": 1.4660921102611524, |
|
"kl": 2.501123046875, |
|
"learning_rate": 1.6385668960932143e-05, |
|
"loss": 0.1, |
|
"reward": 0.7824777118861675, |
|
"reward_std": 0.5532245114445686, |
|
"rewards/accuracy_reward": 0.1621651851804927, |
|
"rewards/format_reward": 0.6203125301748514, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.35335689045936397, |
|
"eval_completion_length": 471.5725402832031, |
|
"eval_kl": 2.90625, |
|
"eval_loss": 0.11904626339673996, |
|
"eval_reward": 0.690848246216774, |
|
"eval_reward_std": 0.6071737855672836, |
|
"eval_rewards/accuracy_reward": 0.1261160746216774, |
|
"eval_rewards/format_reward": 0.564732164144516, |
|
"eval_runtime": 46.7851, |
|
"eval_samples_per_second": 2.116, |
|
"eval_steps_per_second": 0.021, |
|
"step": 100 |
|
}, |
|
{ |
|
"completion_length": 432.415310382843, |
|
"epoch": 0.3710247349823322, |
|
"grad_norm": 0.48111406831320014, |
|
"kl": 1.6375, |
|
"learning_rate": 1.5897847131705194e-05, |
|
"loss": 0.0655, |
|
"reward": 0.7103794921189547, |
|
"reward_std": 0.4915090943686664, |
|
"rewards/accuracy_reward": 0.1224330407101661, |
|
"rewards/format_reward": 0.5879464514553547, |
|
"step": 105 |
|
}, |
|
{ |
|
"completion_length": 44.55658693313599, |
|
"epoch": 0.38869257950530034, |
|
"grad_norm": 0.4829508094562178, |
|
"kl": 1.22216796875, |
|
"learning_rate": 1.5387476295779737e-05, |
|
"loss": 0.0489, |
|
"reward": 1.173437552154064, |
|
"reward_std": 0.16121841138228774, |
|
"rewards/accuracy_reward": 0.19732143701985477, |
|
"rewards/format_reward": 0.9761160992085933, |
|
"step": 110 |
|
}, |
|
{ |
|
"completion_length": 102.14576349258422, |
|
"epoch": 0.40636042402826855, |
|
"grad_norm": 13.346352830691668, |
|
"kl": 0.862548828125, |
|
"learning_rate": 1.4856507733875837e-05, |
|
"loss": 0.0345, |
|
"reward": 1.1206473730504514, |
|
"reward_std": 0.30396630289033055, |
|
"rewards/accuracy_reward": 0.20636161593720317, |
|
"rewards/format_reward": 0.9142857596278191, |
|
"step": 115 |
|
}, |
|
{ |
|
"completion_length": 102.4949821472168, |
|
"epoch": 0.42402826855123676, |
|
"grad_norm": 0.2561020453511286, |
|
"kl": 0.561376953125, |
|
"learning_rate": 1.4306971477188223e-05, |
|
"loss": 0.0224, |
|
"reward": 1.201450951397419, |
|
"reward_std": 0.22256765561178327, |
|
"rewards/accuracy_reward": 0.22254465399309992, |
|
"rewards/format_reward": 0.9789062902331352, |
|
"step": 120 |
|
}, |
|
{ |
|
"completion_length": 238.2740068435669, |
|
"epoch": 0.4416961130742049, |
|
"grad_norm": 0.265018502031295, |
|
"kl": 0.1888427734375, |
|
"learning_rate": 1.3740968546047935e-05, |
|
"loss": 0.0076, |
|
"reward": 1.191964340209961, |
|
"reward_std": 0.323291926831007, |
|
"rewards/accuracy_reward": 0.2588169751688838, |
|
"rewards/format_reward": 0.9331473611295223, |
|
"step": 125 |
|
}, |
|
{ |
|
"completion_length": 310.94075145721433, |
|
"epoch": 0.45936395759717313, |
|
"grad_norm": 0.17518365102004282, |
|
"kl": 0.142608642578125, |
|
"learning_rate": 1.3160662917174045e-05, |
|
"loss": 0.0057, |
|
"reward": 1.2094866700470448, |
|
"reward_std": 0.41736746579408646, |
|
"rewards/accuracy_reward": 0.33258930016309024, |
|
"rewards/format_reward": 0.8768973611295223, |
|
"step": 130 |
|
}, |
|
{ |
|
"completion_length": 316.3401933670044, |
|
"epoch": 0.47703180212014135, |
|
"grad_norm": 0.17370338211422123, |
|
"kl": 0.132843017578125, |
|
"learning_rate": 1.2568273250226681e-05, |
|
"loss": 0.0053, |
|
"reward": 1.191852729022503, |
|
"reward_std": 0.43695004992187025, |
|
"rewards/accuracy_reward": 0.34654019474983216, |
|
"rewards/format_reward": 0.8453125394880772, |
|
"step": 135 |
|
}, |
|
{ |
|
"completion_length": 416.6068260192871, |
|
"epoch": 0.49469964664310956, |
|
"grad_norm": 0.20545441542706755, |
|
"kl": 0.15596923828125, |
|
"learning_rate": 1.1966064405292887e-05, |
|
"loss": 0.0062, |
|
"reward": 1.046428620815277, |
|
"reward_std": 0.525279750674963, |
|
"rewards/accuracy_reward": 0.3165178705006838, |
|
"rewards/format_reward": 0.7299107514321804, |
|
"step": 140 |
|
}, |
|
{ |
|
"completion_length": 242.95715389251708, |
|
"epoch": 0.5123674911660777, |
|
"grad_norm": 0.3073638444949766, |
|
"kl": 0.17847900390625, |
|
"learning_rate": 1.1356338783736256e-05, |
|
"loss": 0.0071, |
|
"reward": 1.2570313096046448, |
|
"reward_std": 0.36572824846953156, |
|
"rewards/accuracy_reward": 0.350446443259716, |
|
"rewards/format_reward": 0.9065848618745804, |
|
"step": 145 |
|
}, |
|
{ |
|
"completion_length": 210.5078224182129, |
|
"epoch": 0.5300353356890459, |
|
"grad_norm": 0.8165939885649788, |
|
"kl": 0.1881591796875, |
|
"learning_rate": 1.0741427525516463e-05, |
|
"loss": 0.0075, |
|
"reward": 1.293192020058632, |
|
"reward_std": 0.3428498702123761, |
|
"rewards/accuracy_reward": 0.3569196606054902, |
|
"rewards/format_reward": 0.9362723641097546, |
|
"step": 150 |
|
}, |
|
{ |
|
"completion_length": 261.776350402832, |
|
"epoch": 0.5477031802120141, |
|
"grad_norm": 0.1582208660243009, |
|
"kl": 0.1717041015625, |
|
"learning_rate": 1.012368159663363e-05, |
|
"loss": 0.0069, |
|
"reward": 1.243973270058632, |
|
"reward_std": 0.39117210209369657, |
|
"rewards/accuracy_reward": 0.3363839453086257, |
|
"rewards/format_reward": 0.9075893312692642, |
|
"step": 155 |
|
}, |
|
{ |
|
"completion_length": 239.85179653167725, |
|
"epoch": 0.5653710247349824, |
|
"grad_norm": 0.1704685274719256, |
|
"kl": 0.14776611328125, |
|
"learning_rate": 9.505462800772612e-06, |
|
"loss": 0.0059, |
|
"reward": 1.2680804081261159, |
|
"reward_std": 0.35588377732783555, |
|
"rewards/accuracy_reward": 0.335825908370316, |
|
"rewards/format_reward": 0.9322545066475868, |
|
"step": 160 |
|
}, |
|
{ |
|
"completion_length": 229.55492095947267, |
|
"epoch": 0.5830388692579506, |
|
"grad_norm": 0.15800892653159143, |
|
"kl": 0.138787841796875, |
|
"learning_rate": 8.889134749511956e-06, |
|
"loss": 0.0055, |
|
"reward": 1.2492188036441803, |
|
"reward_std": 0.3586545692756772, |
|
"rewards/accuracy_reward": 0.31674108635634185, |
|
"rewards/format_reward": 0.9324777208268642, |
|
"step": 165 |
|
}, |
|
{ |
|
"completion_length": 249.42702083587648, |
|
"epoch": 0.6007067137809188, |
|
"grad_norm": 0.15989747781862085, |
|
"kl": 0.13619384765625, |
|
"learning_rate": 8.277053825620836e-06, |
|
"loss": 0.0054, |
|
"reward": 1.234040232002735, |
|
"reward_std": 0.3690490124747157, |
|
"rewards/accuracy_reward": 0.315625012665987, |
|
"rewards/format_reward": 0.9184152215719223, |
|
"step": 170 |
|
}, |
|
{ |
|
"completion_length": 266.64677543640136, |
|
"epoch": 0.6183745583038869, |
|
"grad_norm": 0.1728727033658703, |
|
"kl": 0.143048095703125, |
|
"learning_rate": 7.671560173993588e-06, |
|
"loss": 0.0057, |
|
"reward": 1.2549107685685157, |
|
"reward_std": 0.3902056057006121, |
|
"rewards/accuracy_reward": 0.3504464441910386, |
|
"rewards/format_reward": 0.90446432903409, |
|
"step": 175 |
|
}, |
|
{ |
|
"completion_length": 267.99766731262207, |
|
"epoch": 0.6360424028268551, |
|
"grad_norm": 0.16828899543606332, |
|
"kl": 0.157781982421875, |
|
"learning_rate": 7.07496875466589e-06, |
|
"loss": 0.0063, |
|
"reward": 1.2652902334928513, |
|
"reward_std": 0.394162866845727, |
|
"rewards/accuracy_reward": 0.36138394605368374, |
|
"rewards/format_reward": 0.9039062932133675, |
|
"step": 180 |
|
}, |
|
{ |
|
"completion_length": 251.41809253692628, |
|
"epoch": 0.6537102473498233, |
|
"grad_norm": 0.21026602400413624, |
|
"kl": 0.2133056640625, |
|
"learning_rate": 6.489560492119225e-06, |
|
"loss": 0.0085, |
|
"reward": 1.237165230512619, |
|
"reward_std": 0.3938104841858149, |
|
"rewards/accuracy_reward": 0.3395089440047741, |
|
"rewards/format_reward": 0.8976562954485416, |
|
"step": 185 |
|
}, |
|
{ |
|
"completion_length": 214.81686115264893, |
|
"epoch": 0.6713780918727915, |
|
"grad_norm": 0.18932563174791797, |
|
"kl": 0.2186767578125, |
|
"learning_rate": 5.9175735547120975e-06, |
|
"loss": 0.0087, |
|
"reward": 1.2207589730620385, |
|
"reward_std": 0.41313072480261326, |
|
"rewards/accuracy_reward": 0.3296875163912773, |
|
"rewards/format_reward": 0.8910714708268642, |
|
"step": 190 |
|
}, |
|
{ |
|
"completion_length": 244.51027965545654, |
|
"epoch": 0.6890459363957597, |
|
"grad_norm": 0.2257367270272996, |
|
"kl": 0.21044921875, |
|
"learning_rate": 5.361194797579108e-06, |
|
"loss": 0.0084, |
|
"reward": 1.2325893357396125, |
|
"reward_std": 0.40438184086233375, |
|
"rewards/accuracy_reward": 0.34832590743899344, |
|
"rewards/format_reward": 0.8842634335160255, |
|
"step": 195 |
|
}, |
|
{ |
|
"completion_length": 229.27824668884278, |
|
"epoch": 0.7067137809187279, |
|
"grad_norm": 0.19283868985883293, |
|
"kl": 0.20889892578125, |
|
"learning_rate": 4.8225514017138205e-06, |
|
"loss": 0.0084, |
|
"reward": 1.2724330857396127, |
|
"reward_std": 0.37237234245985745, |
|
"rewards/accuracy_reward": 0.358370553702116, |
|
"rewards/format_reward": 0.9140625432133674, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.7067137809187279, |
|
"eval_completion_length": 220.90455627441406, |
|
"eval_kl": 0.200927734375, |
|
"eval_loss": 0.008105829358100891, |
|
"eval_reward": 1.3314732909202576, |
|
"eval_reward_std": 0.33083294332027435, |
|
"eval_rewards/accuracy_reward": 0.3984375223517418, |
|
"eval_rewards/format_reward": 0.933035746216774, |
|
"eval_runtime": 31.5112, |
|
"eval_samples_per_second": 3.142, |
|
"eval_steps_per_second": 0.032, |
|
"step": 200 |
|
}, |
|
{ |
|
"completion_length": 248.72456493377686, |
|
"epoch": 0.7243816254416962, |
|
"grad_norm": 0.1784321065416981, |
|
"kl": 0.2092529296875, |
|
"learning_rate": 4.303702741201431e-06, |
|
"loss": 0.0084, |
|
"reward": 1.2589286297559739, |
|
"reward_std": 0.38014698009938, |
|
"rewards/accuracy_reward": 0.34765626564621926, |
|
"rewards/format_reward": 0.9112723655998707, |
|
"step": 205 |
|
}, |
|
{ |
|
"completion_length": 260.51217727661134, |
|
"epoch": 0.7420494699646644, |
|
"grad_norm": 0.1683054026863793, |
|
"kl": 0.2004150390625, |
|
"learning_rate": 3.8066325096949153e-06, |
|
"loss": 0.008, |
|
"reward": 1.255580408871174, |
|
"reward_std": 0.40174516644328834, |
|
"rewards/accuracy_reward": 0.35680805090814827, |
|
"rewards/format_reward": 0.8987723641097546, |
|
"step": 210 |
|
}, |
|
{ |
|
"completion_length": 268.3051458358765, |
|
"epoch": 0.7597173144876325, |
|
"grad_norm": 0.19491423500924795, |
|
"kl": 0.21324462890625, |
|
"learning_rate": 3.3332411362372063e-06, |
|
"loss": 0.0085, |
|
"reward": 1.222433079779148, |
|
"reward_std": 0.4209536049515009, |
|
"rewards/accuracy_reward": 0.33761162161827085, |
|
"rewards/format_reward": 0.8848214723169804, |
|
"step": 215 |
|
}, |
|
{ |
|
"completion_length": 242.99431762695312, |
|
"epoch": 0.7773851590106007, |
|
"grad_norm": 0.1827115506822177, |
|
"kl": 0.2093017578125, |
|
"learning_rate": 2.8853385194256677e-06, |
|
"loss": 0.0084, |
|
"reward": 1.256361658871174, |
|
"reward_std": 0.3803428867831826, |
|
"rewards/accuracy_reward": 0.34787948057055473, |
|
"rewards/format_reward": 0.9084821864962578, |
|
"step": 220 |
|
}, |
|
{ |
|
"completion_length": 229.23405113220215, |
|
"epoch": 0.7950530035335689, |
|
"grad_norm": 0.19240194787621448, |
|
"kl": 0.21724853515625, |
|
"learning_rate": 2.464637107698046e-06, |
|
"loss": 0.0087, |
|
"reward": 1.2588170126080513, |
|
"reward_std": 0.37087023109197614, |
|
"rewards/accuracy_reward": 0.3417410867288709, |
|
"rewards/format_reward": 0.917075938731432, |
|
"step": 225 |
|
}, |
|
{ |
|
"completion_length": 243.72713241577148, |
|
"epoch": 0.8127208480565371, |
|
"grad_norm": 0.1741043475951591, |
|
"kl": 0.2067626953125, |
|
"learning_rate": 2.072745352195794e-06, |
|
"loss": 0.0083, |
|
"reward": 1.243750050663948, |
|
"reward_std": 0.39028936978429557, |
|
"rewards/accuracy_reward": 0.3424107315018773, |
|
"rewards/format_reward": 0.901339328289032, |
|
"step": 230 |
|
}, |
|
{ |
|
"completion_length": 245.68751201629638, |
|
"epoch": 0.8303886925795053, |
|
"grad_norm": 0.24505417166133048, |
|
"kl": 0.21961669921875, |
|
"learning_rate": 1.7111615572361628e-06, |
|
"loss": 0.0088, |
|
"reward": 1.2446429148316382, |
|
"reward_std": 0.4114550109952688, |
|
"rewards/accuracy_reward": 0.35022323187440635, |
|
"rewards/format_reward": 0.8944196850061417, |
|
"step": 235 |
|
}, |
|
{ |
|
"completion_length": 241.52579193115236, |
|
"epoch": 0.8480565371024735, |
|
"grad_norm": 0.20757665190714725, |
|
"kl": 0.21859130859375, |
|
"learning_rate": 1.381268151904298e-06, |
|
"loss": 0.0087, |
|
"reward": 1.2525670200586319, |
|
"reward_std": 0.3901967225596309, |
|
"rewards/accuracy_reward": 0.3515625149011612, |
|
"rewards/format_reward": 0.9010045081377029, |
|
"step": 240 |
|
}, |
|
{ |
|
"completion_length": 244.1225549697876, |
|
"epoch": 0.8657243816254417, |
|
"grad_norm": 0.17183432547369637, |
|
"kl": 0.21815185546875, |
|
"learning_rate": 1.0843264046665558e-06, |
|
"loss": 0.0087, |
|
"reward": 1.249665230512619, |
|
"reward_std": 0.3750341447070241, |
|
"rewards/accuracy_reward": 0.3444196585565805, |
|
"rewards/format_reward": 0.9052455805242061, |
|
"step": 245 |
|
}, |
|
{ |
|
"completion_length": 243.7822650909424, |
|
"epoch": 0.8833922261484098, |
|
"grad_norm": 0.1916692184784476, |
|
"kl": 0.21383056640625, |
|
"learning_rate": 8.214716012124491e-07, |
|
"loss": 0.0086, |
|
"reward": 1.24776791036129, |
|
"reward_std": 0.3710861327126622, |
|
"rewards/accuracy_reward": 0.33995537385344504, |
|
"rewards/format_reward": 0.9078125409781933, |
|
"step": 250 |
|
}, |
|
{ |
|
"completion_length": 235.8559259414673, |
|
"epoch": 0.901060070671378, |
|
"grad_norm": 0.23346599184542174, |
|
"kl": 0.22025146484375, |
|
"learning_rate": 5.937087039615619e-07, |
|
"loss": 0.0088, |
|
"reward": 1.2597098812460898, |
|
"reward_std": 0.3739591669291258, |
|
"rewards/accuracy_reward": 0.34542412087321284, |
|
"rewards/format_reward": 0.9142857551574707, |
|
"step": 255 |
|
}, |
|
{ |
|
"completion_length": 251.72690830230712, |
|
"epoch": 0.9187279151943463, |
|
"grad_norm": 0.20314456248803323, |
|
"kl": 0.2180908203125, |
|
"learning_rate": 4.019085098303077e-07, |
|
"loss": 0.0087, |
|
"reward": 1.2440848767757415, |
|
"reward_std": 0.39818168375641105, |
|
"rewards/accuracy_reward": 0.3501116232946515, |
|
"rewards/format_reward": 0.8939732573926449, |
|
"step": 260 |
|
}, |
|
{ |
|
"completion_length": 248.0124002456665, |
|
"epoch": 0.9363957597173145, |
|
"grad_norm": 0.1993341399262892, |
|
"kl": 0.22598876953125, |
|
"learning_rate": 2.4680432094837394e-07, |
|
"loss": 0.009, |
|
"reward": 1.2393973782658576, |
|
"reward_std": 0.3911540800705552, |
|
"rewards/accuracy_reward": 0.34441965762525795, |
|
"rewards/format_reward": 0.8949777200818062, |
|
"step": 265 |
|
}, |
|
{ |
|
"completion_length": 249.1703239440918, |
|
"epoch": 0.9540636042402827, |
|
"grad_norm": 0.2591538960467843, |
|
"kl": 0.2145263671875, |
|
"learning_rate": 1.289891410535593e-07, |
|
"loss": 0.0086, |
|
"reward": 1.247767911851406, |
|
"reward_std": 0.40619117505848407, |
|
"rewards/accuracy_reward": 0.34966519437730315, |
|
"rewards/format_reward": 0.8981027238070964, |
|
"step": 270 |
|
}, |
|
{ |
|
"completion_length": 245.0108373641968, |
|
"epoch": 0.9717314487632509, |
|
"grad_norm": 0.18045259609614844, |
|
"kl": 0.22490234375, |
|
"learning_rate": 4.8913408283934874e-08, |
|
"loss": 0.009, |
|
"reward": 1.24698666036129, |
|
"reward_std": 0.3904744828119874, |
|
"rewards/accuracy_reward": 0.3506696572527289, |
|
"rewards/format_reward": 0.8963170073926449, |
|
"step": 275 |
|
}, |
|
{ |
|
"completion_length": 248.61061344146728, |
|
"epoch": 0.9893992932862191, |
|
"grad_norm": 0.1910344572252932, |
|
"kl": 0.2626220703125, |
|
"learning_rate": 6.883273035447335e-09, |
|
"loss": 0.0105, |
|
"reward": 1.2524554118514062, |
|
"reward_std": 0.4037581391632557, |
|
"rewards/accuracy_reward": 0.3559151943773031, |
|
"rewards/format_reward": 0.8965402223169804, |
|
"step": 280 |
|
}, |
|
{ |
|
"completion_length": 249.9445120493571, |
|
"epoch": 1.0, |
|
"kl": 0.21577962239583334, |
|
"reward": 1.2481399315098922, |
|
"reward_std": 0.4004522568235795, |
|
"rewards/accuracy_reward": 0.35435269710918266, |
|
"rewards/format_reward": 0.8937872474392256, |
|
"step": 283, |
|
"total_flos": 0.0, |
|
"train_loss": 0.02524358952622005, |
|
"train_runtime": 29066.0481, |
|
"train_samples_per_second": 2.492, |
|
"train_steps_per_second": 0.01 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 283, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|