|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 283, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 383.0911985397339, |
|
"epoch": 0.0176678445229682, |
|
"grad_norm": 0.9267308712005615, |
|
"kl": 0.0008915185928344727, |
|
"learning_rate": 3.448275862068966e-06, |
|
"loss": 0.0, |
|
"reward": 0.6448660973459482, |
|
"reward_std": 0.43812552504241464, |
|
"rewards/accuracy_reward": 0.1502232206054032, |
|
"rewards/format_reward": 0.49464287869632245, |
|
"step": 5 |
|
}, |
|
{ |
|
"completion_length": 224.4988929748535, |
|
"epoch": 0.0353356890459364, |
|
"grad_norm": 0.44213607907295227, |
|
"kl": 0.31944580078125, |
|
"learning_rate": 6.896551724137932e-06, |
|
"loss": 0.0128, |
|
"reward": 0.9520089730620385, |
|
"reward_std": 0.26545318868011236, |
|
"rewards/accuracy_reward": 0.07265625306172296, |
|
"rewards/format_reward": 0.8793527193367481, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 157.38460483551026, |
|
"epoch": 0.053003533568904596, |
|
"grad_norm": 0.2661096155643463, |
|
"kl": 0.05403594970703125, |
|
"learning_rate": 1.0344827586206898e-05, |
|
"loss": 0.0022, |
|
"reward": 1.0422991596162319, |
|
"reward_std": 0.21440245490521193, |
|
"rewards/accuracy_reward": 0.09095982516882942, |
|
"rewards/format_reward": 0.9513393238186836, |
|
"step": 15 |
|
}, |
|
{ |
|
"completion_length": 96.58973650932312, |
|
"epoch": 0.0706713780918728, |
|
"grad_norm": 0.39351069927215576, |
|
"kl": 0.15865478515625, |
|
"learning_rate": 1.3793103448275863e-05, |
|
"loss": 0.0063, |
|
"reward": 1.1008929066359996, |
|
"reward_std": 0.20822038110345603, |
|
"rewards/accuracy_reward": 0.12767857781145722, |
|
"rewards/format_reward": 0.9732143238186837, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 107.32266120910644, |
|
"epoch": 0.08833922261484099, |
|
"grad_norm": 0.661626398563385, |
|
"kl": 0.35902099609375, |
|
"learning_rate": 1.7241379310344828e-05, |
|
"loss": 0.0144, |
|
"reward": 1.128459869325161, |
|
"reward_std": 0.2679577572271228, |
|
"rewards/accuracy_reward": 0.17399554383009672, |
|
"rewards/format_reward": 0.9544643260538578, |
|
"step": 25 |
|
}, |
|
{ |
|
"completion_length": 47.275559878349306, |
|
"epoch": 0.10600706713780919, |
|
"grad_norm": 0.5010063052177429, |
|
"kl": 0.5186767578125, |
|
"learning_rate": 1.999923511388017e-05, |
|
"loss": 0.0208, |
|
"reward": 1.1618304058909417, |
|
"reward_std": 0.210414170473814, |
|
"rewards/accuracy_reward": 0.18805804364383222, |
|
"rewards/format_reward": 0.9737723633646965, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 239.45213136672973, |
|
"epoch": 0.12367491166077739, |
|
"grad_norm": 0.5395922660827637, |
|
"kl": 0.2320068359375, |
|
"learning_rate": 1.9972476383747748e-05, |
|
"loss": 0.0093, |
|
"reward": 1.1856027349829674, |
|
"reward_std": 0.2784878654405475, |
|
"rewards/accuracy_reward": 0.2165178671013564, |
|
"rewards/format_reward": 0.9690848588943481, |
|
"step": 35 |
|
}, |
|
{ |
|
"completion_length": 170.19040813446045, |
|
"epoch": 0.1413427561837456, |
|
"grad_norm": 0.393367201089859, |
|
"kl": 0.2654296875, |
|
"learning_rate": 1.9907590277344582e-05, |
|
"loss": 0.0106, |
|
"reward": 1.1880580872297286, |
|
"reward_std": 0.25068697994574907, |
|
"rewards/accuracy_reward": 0.20792411724105478, |
|
"rewards/format_reward": 0.9801339648663998, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 132.1880645751953, |
|
"epoch": 0.15901060070671377, |
|
"grad_norm": 0.2796829342842102, |
|
"kl": 0.3013916015625, |
|
"learning_rate": 1.9804824871166254e-05, |
|
"loss": 0.0121, |
|
"reward": 1.2225446954369545, |
|
"reward_std": 0.24187104841694235, |
|
"rewards/accuracy_reward": 0.24073661854490638, |
|
"rewards/format_reward": 0.9818080730736256, |
|
"step": 45 |
|
}, |
|
{ |
|
"completion_length": 166.0952097892761, |
|
"epoch": 0.17667844522968199, |
|
"grad_norm": 0.3038596510887146, |
|
"kl": 0.27999267578125, |
|
"learning_rate": 1.9664573064143604e-05, |
|
"loss": 0.0112, |
|
"reward": 1.243750052154064, |
|
"reward_std": 0.2760961548425257, |
|
"rewards/accuracy_reward": 0.27399554969742895, |
|
"rewards/format_reward": 0.9697545059025288, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 146.12824268341063, |
|
"epoch": 0.19434628975265017, |
|
"grad_norm": 0.3787432312965393, |
|
"kl": 0.40203857421875, |
|
"learning_rate": 1.948737107548771e-05, |
|
"loss": 0.0161, |
|
"reward": 1.200334867835045, |
|
"reward_std": 0.30924737490713594, |
|
"rewards/accuracy_reward": 0.264620547182858, |
|
"rewards/format_reward": 0.9357143260538578, |
|
"step": 55 |
|
}, |
|
{ |
|
"completion_length": 63.74587321281433, |
|
"epoch": 0.21201413427561838, |
|
"grad_norm": 0.6207670569419861, |
|
"kl": 0.74423828125, |
|
"learning_rate": 1.9273896394584103e-05, |
|
"loss": 0.0298, |
|
"reward": 1.1473214782774448, |
|
"reward_std": 0.2868926800787449, |
|
"rewards/accuracy_reward": 0.22734376145526766, |
|
"rewards/format_reward": 0.9199777208268642, |
|
"step": 60 |
|
}, |
|
{ |
|
"completion_length": 110.70368802547455, |
|
"epoch": 0.22968197879858657, |
|
"grad_norm": 52.001220703125, |
|
"kl": 1.921142578125, |
|
"learning_rate": 1.9024965190774262e-05, |
|
"loss": 0.0769, |
|
"reward": 1.1512277290225028, |
|
"reward_std": 0.27448821542784574, |
|
"rewards/accuracy_reward": 0.23549108249135314, |
|
"rewards/format_reward": 0.9157366491854191, |
|
"step": 65 |
|
}, |
|
{ |
|
"completion_length": 226.51317901611327, |
|
"epoch": 0.24734982332155478, |
|
"grad_norm": 3.3317148685455322, |
|
"kl": 1.24912109375, |
|
"learning_rate": 1.8741529192927528e-05, |
|
"loss": 0.05, |
|
"reward": 1.0268973752856254, |
|
"reward_std": 0.4129971068352461, |
|
"rewards/accuracy_reward": 0.20267858097795396, |
|
"rewards/format_reward": 0.824218786507845, |
|
"step": 70 |
|
}, |
|
{ |
|
"completion_length": 128.79185905456544, |
|
"epoch": 0.26501766784452296, |
|
"grad_norm": 14.126457214355469, |
|
"kl": 1.390869140625, |
|
"learning_rate": 1.8424672050733577e-05, |
|
"loss": 0.0556, |
|
"reward": 1.1761161178350448, |
|
"reward_std": 0.30296447649598124, |
|
"rewards/accuracy_reward": 0.2532366174273193, |
|
"rewards/format_reward": 0.9228795051574707, |
|
"step": 75 |
|
}, |
|
{ |
|
"completion_length": 130.28717198371888, |
|
"epoch": 0.2826855123674912, |
|
"grad_norm": 12.251070976257324, |
|
"kl": 4.2921875, |
|
"learning_rate": 1.8075605191627242e-05, |
|
"loss": 0.1717, |
|
"reward": 1.15781254991889, |
|
"reward_std": 0.31116877496242523, |
|
"rewards/accuracy_reward": 0.2357142967171967, |
|
"rewards/format_reward": 0.9220982573926448, |
|
"step": 80 |
|
}, |
|
{ |
|
"completion_length": 166.793869304657, |
|
"epoch": 0.3003533568904594, |
|
"grad_norm": 8.950257301330566, |
|
"kl": 3.652392578125, |
|
"learning_rate": 1.7695663189185703e-05, |
|
"loss": 0.1461, |
|
"reward": 1.1366071909666062, |
|
"reward_std": 0.37287568356841805, |
|
"rewards/accuracy_reward": 0.24709822554141284, |
|
"rewards/format_reward": 0.8895089700818062, |
|
"step": 85 |
|
}, |
|
{ |
|
"completion_length": 127.73795194625855, |
|
"epoch": 0.31802120141342755, |
|
"grad_norm": 2.8447344303131104, |
|
"kl": 2.64189453125, |
|
"learning_rate": 1.7286298660705877e-05, |
|
"loss": 0.1056, |
|
"reward": 1.1922991588711738, |
|
"reward_std": 0.31104280035942794, |
|
"rewards/accuracy_reward": 0.25502233263105156, |
|
"rewards/format_reward": 0.93727682903409, |
|
"step": 90 |
|
}, |
|
{ |
|
"completion_length": 113.19252805709839, |
|
"epoch": 0.33568904593639576, |
|
"grad_norm": 1.4655897617340088, |
|
"kl": 1.2039306640625, |
|
"learning_rate": 1.6849076713469914e-05, |
|
"loss": 0.0481, |
|
"reward": 1.2165179058909417, |
|
"reward_std": 0.29555373433977367, |
|
"rewards/accuracy_reward": 0.2671875121071935, |
|
"rewards/format_reward": 0.9493304006755352, |
|
"step": 95 |
|
}, |
|
{ |
|
"completion_length": 86.61194581985474, |
|
"epoch": 0.35335689045936397, |
|
"grad_norm": 0.3733413517475128, |
|
"kl": 0.6366943359375, |
|
"learning_rate": 1.6385668960932143e-05, |
|
"loss": 0.0255, |
|
"reward": 1.1428571932017804, |
|
"reward_std": 0.2961823304183781, |
|
"rewards/accuracy_reward": 0.24252233356237413, |
|
"rewards/format_reward": 0.9003348629921675, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.35335689045936397, |
|
"eval_completion_length": 124.61478996276855, |
|
"eval_kl": 0.7275390625, |
|
"eval_loss": 0.02880026213824749, |
|
"eval_reward": 1.2354911267757416, |
|
"eval_reward_std": 0.30917802825570107, |
|
"eval_rewards/accuracy_reward": 0.2946428656578064, |
|
"eval_rewards/format_reward": 0.940848246216774, |
|
"eval_runtime": 36.0891, |
|
"eval_samples_per_second": 2.743, |
|
"eval_steps_per_second": 0.028, |
|
"step": 100 |
|
}, |
|
{ |
|
"completion_length": 183.94967460632324, |
|
"epoch": 0.3710247349823322, |
|
"grad_norm": 0.29940587282180786, |
|
"kl": 0.660888671875, |
|
"learning_rate": 1.5897847131705194e-05, |
|
"loss": 0.0264, |
|
"reward": 1.2210938051342963, |
|
"reward_std": 0.3049804212525487, |
|
"rewards/accuracy_reward": 0.27265626164153217, |
|
"rewards/format_reward": 0.9484375402331352, |
|
"step": 105 |
|
}, |
|
{ |
|
"completion_length": 231.64610500335692, |
|
"epoch": 0.38869257950530034, |
|
"grad_norm": 1.9811211824417114, |
|
"kl": 0.8390380859375, |
|
"learning_rate": 1.5387476295779737e-05, |
|
"loss": 0.0336, |
|
"reward": 1.2466518417000771, |
|
"reward_std": 0.3104502111673355, |
|
"rewards/accuracy_reward": 0.3001116213388741, |
|
"rewards/format_reward": 0.9465402208268643, |
|
"step": 110 |
|
}, |
|
{ |
|
"completion_length": 193.47433891296387, |
|
"epoch": 0.40636042402826855, |
|
"grad_norm": 1.2147762775421143, |
|
"kl": 1.2443359375, |
|
"learning_rate": 1.4856507733875837e-05, |
|
"loss": 0.0498, |
|
"reward": 1.2349330991506577, |
|
"reward_std": 0.30721699353307486, |
|
"rewards/accuracy_reward": 0.2930803688708693, |
|
"rewards/format_reward": 0.941852717846632, |
|
"step": 115 |
|
}, |
|
{ |
|
"completion_length": 153.606591796875, |
|
"epoch": 0.42402826855123676, |
|
"grad_norm": 0.3809666931629181, |
|
"kl": 0.4972412109375, |
|
"learning_rate": 1.4306971477188223e-05, |
|
"loss": 0.0199, |
|
"reward": 1.2648438036441803, |
|
"reward_std": 0.290036397613585, |
|
"rewards/accuracy_reward": 0.3108259065076709, |
|
"rewards/format_reward": 0.9540178947150707, |
|
"step": 120 |
|
}, |
|
{ |
|
"completion_length": 145.02165756225585, |
|
"epoch": 0.4416961130742049, |
|
"grad_norm": 0.4991630017757416, |
|
"kl": 0.4145751953125, |
|
"learning_rate": 1.3740968546047935e-05, |
|
"loss": 0.0166, |
|
"reward": 1.2626116588711738, |
|
"reward_std": 0.26841245898976923, |
|
"rewards/accuracy_reward": 0.3021205497905612, |
|
"rewards/format_reward": 0.9604911148548126, |
|
"step": 125 |
|
}, |
|
{ |
|
"completion_length": 163.11552076339723, |
|
"epoch": 0.45936395759717313, |
|
"grad_norm": 0.31778135895729065, |
|
"kl": 0.36231689453125, |
|
"learning_rate": 1.3160662917174045e-05, |
|
"loss": 0.0145, |
|
"reward": 1.285379521548748, |
|
"reward_std": 0.2959928734228015, |
|
"rewards/accuracy_reward": 0.3406250147148967, |
|
"rewards/format_reward": 0.9447545073926449, |
|
"step": 130 |
|
}, |
|
{ |
|
"completion_length": 141.67891292572023, |
|
"epoch": 0.47703180212014135, |
|
"grad_norm": 0.34590408205986023, |
|
"kl": 0.4644775390625, |
|
"learning_rate": 1.2568273250226681e-05, |
|
"loss": 0.0186, |
|
"reward": 1.2870536252856255, |
|
"reward_std": 0.26789645981043575, |
|
"rewards/accuracy_reward": 0.33683037385344505, |
|
"rewards/format_reward": 0.9502232536673546, |
|
"step": 135 |
|
}, |
|
{ |
|
"completion_length": 144.7002299308777, |
|
"epoch": 0.49469964664310956, |
|
"grad_norm": 0.44773581624031067, |
|
"kl": 0.4588134765625, |
|
"learning_rate": 1.1966064405292887e-05, |
|
"loss": 0.0184, |
|
"reward": 1.2602679148316382, |
|
"reward_std": 0.2673138614743948, |
|
"rewards/accuracy_reward": 0.3029018001630902, |
|
"rewards/format_reward": 0.9573661126196384, |
|
"step": 140 |
|
}, |
|
{ |
|
"completion_length": 160.48873472213745, |
|
"epoch": 0.5123674911660777, |
|
"grad_norm": 0.3488762080669403, |
|
"kl": 0.369140625, |
|
"learning_rate": 1.1356338783736256e-05, |
|
"loss": 0.0148, |
|
"reward": 1.2993304133415222, |
|
"reward_std": 0.27455081362277267, |
|
"rewards/accuracy_reward": 0.3327009085565805, |
|
"rewards/format_reward": 0.9666295036673546, |
|
"step": 145 |
|
}, |
|
{ |
|
"completion_length": 171.54386882781984, |
|
"epoch": 0.5300353356890459, |
|
"grad_norm": 0.5754287242889404, |
|
"kl": 0.3918701171875, |
|
"learning_rate": 1.0741427525516463e-05, |
|
"loss": 0.0157, |
|
"reward": 1.3091518446803092, |
|
"reward_std": 0.26976782493293283, |
|
"rewards/accuracy_reward": 0.34665180258452893, |
|
"rewards/format_reward": 0.9625000424683094, |
|
"step": 150 |
|
}, |
|
{ |
|
"completion_length": 163.8817042350769, |
|
"epoch": 0.5477031802120141, |
|
"grad_norm": 0.24734921753406525, |
|
"kl": 0.4376220703125, |
|
"learning_rate": 1.012368159663363e-05, |
|
"loss": 0.0175, |
|
"reward": 1.3056920334696769, |
|
"reward_std": 0.2626989148557186, |
|
"rewards/accuracy_reward": 0.33872769456356766, |
|
"rewards/format_reward": 0.9669643260538578, |
|
"step": 155 |
|
}, |
|
{ |
|
"completion_length": 214.48315677642822, |
|
"epoch": 0.5653710247349824, |
|
"grad_norm": 0.39896252751350403, |
|
"kl": 0.36552734375, |
|
"learning_rate": 9.505462800772612e-06, |
|
"loss": 0.0146, |
|
"reward": 1.2992188043892383, |
|
"reward_std": 0.3003792591392994, |
|
"rewards/accuracy_reward": 0.34944198122248055, |
|
"rewards/format_reward": 0.9497768260538578, |
|
"step": 160 |
|
}, |
|
{ |
|
"completion_length": 212.60224132537843, |
|
"epoch": 0.5830388692579506, |
|
"grad_norm": 0.3877353072166443, |
|
"kl": 0.3916259765625, |
|
"learning_rate": 8.889134749511956e-06, |
|
"loss": 0.0157, |
|
"reward": 1.2678571984171867, |
|
"reward_std": 0.33237045761197803, |
|
"rewards/accuracy_reward": 0.329464303329587, |
|
"rewards/format_reward": 0.9383928969502449, |
|
"step": 165 |
|
}, |
|
{ |
|
"completion_length": 193.6290256500244, |
|
"epoch": 0.6007067137809188, |
|
"grad_norm": 0.2638029456138611, |
|
"kl": 0.4411865234375, |
|
"learning_rate": 8.277053825620836e-06, |
|
"loss": 0.0176, |
|
"reward": 1.2617187976837159, |
|
"reward_std": 0.3131700936704874, |
|
"rewards/accuracy_reward": 0.32912948057055474, |
|
"rewards/format_reward": 0.9325893305242061, |
|
"step": 170 |
|
}, |
|
{ |
|
"completion_length": 163.14465045928955, |
|
"epoch": 0.6183745583038869, |
|
"grad_norm": 0.3375968933105469, |
|
"kl": 0.792431640625, |
|
"learning_rate": 7.671560173993588e-06, |
|
"loss": 0.0317, |
|
"reward": 1.3142857745289802, |
|
"reward_std": 0.2933783018961549, |
|
"rewards/accuracy_reward": 0.3618303745985031, |
|
"rewards/format_reward": 0.9524553991854191, |
|
"step": 175 |
|
}, |
|
{ |
|
"completion_length": 168.05692710876465, |
|
"epoch": 0.6360424028268551, |
|
"grad_norm": 0.3316074311733246, |
|
"kl": 0.4262451171875, |
|
"learning_rate": 7.07496875466589e-06, |
|
"loss": 0.0171, |
|
"reward": 1.2792411267757415, |
|
"reward_std": 0.32045796802267434, |
|
"rewards/accuracy_reward": 0.3372768020257354, |
|
"rewards/format_reward": 0.9419643253087997, |
|
"step": 180 |
|
}, |
|
{ |
|
"completion_length": 162.42344417572022, |
|
"epoch": 0.6537102473498233, |
|
"grad_norm": 0.3786272704601288, |
|
"kl": 0.5138427734375, |
|
"learning_rate": 6.489560492119225e-06, |
|
"loss": 0.0206, |
|
"reward": 1.2774554193019867, |
|
"reward_std": 0.31533648930490016, |
|
"rewards/accuracy_reward": 0.33761162012815477, |
|
"rewards/format_reward": 0.9398437954485417, |
|
"step": 185 |
|
}, |
|
{ |
|
"completion_length": 138.84576539993287, |
|
"epoch": 0.6713780918727915, |
|
"grad_norm": 0.4081542491912842, |
|
"kl": 0.4474365234375, |
|
"learning_rate": 5.9175735547120975e-06, |
|
"loss": 0.0179, |
|
"reward": 1.3568080931901931, |
|
"reward_std": 0.2501720578409731, |
|
"rewards/accuracy_reward": 0.38180805277079344, |
|
"rewards/format_reward": 0.9750000454485417, |
|
"step": 190 |
|
}, |
|
{ |
|
"completion_length": 187.03762016296386, |
|
"epoch": 0.6890459363957597, |
|
"grad_norm": 0.4072677493095398, |
|
"kl": 0.454052734375, |
|
"learning_rate": 5.361194797579108e-06, |
|
"loss": 0.0182, |
|
"reward": 1.306808091700077, |
|
"reward_std": 0.3165676988661289, |
|
"rewards/accuracy_reward": 0.3659598369151354, |
|
"rewards/format_reward": 0.9408482536673546, |
|
"step": 195 |
|
}, |
|
{ |
|
"completion_length": 196.41183795928956, |
|
"epoch": 0.7067137809187279, |
|
"grad_norm": 0.2528441846370697, |
|
"kl": 0.410107421875, |
|
"learning_rate": 4.8225514017138205e-06, |
|
"loss": 0.0164, |
|
"reward": 1.3114955961704253, |
|
"reward_std": 0.324984360858798, |
|
"rewards/accuracy_reward": 0.36796876452863214, |
|
"rewards/format_reward": 0.9435268275439739, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.7067137809187279, |
|
"eval_completion_length": 190.1671905517578, |
|
"eval_kl": 0.3974609375, |
|
"eval_loss": 0.013840513303875923, |
|
"eval_reward": 1.3738839626312256, |
|
"eval_reward_std": 0.3043372705578804, |
|
"eval_rewards/accuracy_reward": 0.4107143059372902, |
|
"eval_rewards/format_reward": 0.9631696939468384, |
|
"eval_runtime": 41.1198, |
|
"eval_samples_per_second": 2.408, |
|
"eval_steps_per_second": 0.024, |
|
"step": 200 |
|
}, |
|
{ |
|
"completion_length": 187.42009782791138, |
|
"epoch": 0.7243816254416962, |
|
"grad_norm": 0.5058914422988892, |
|
"kl": 0.42244873046875, |
|
"learning_rate": 4.303702741201431e-06, |
|
"loss": 0.0169, |
|
"reward": 1.3109375566244126, |
|
"reward_std": 0.309923352021724, |
|
"rewards/accuracy_reward": 0.35669644493609665, |
|
"rewards/format_reward": 0.9542411133646965, |
|
"step": 205 |
|
}, |
|
{ |
|
"completion_length": 196.0847183227539, |
|
"epoch": 0.7420494699646644, |
|
"grad_norm": 0.4528452157974243, |
|
"kl": 0.459375, |
|
"learning_rate": 3.8066325096949153e-06, |
|
"loss": 0.0184, |
|
"reward": 1.2954241663217545, |
|
"reward_std": 0.3209618354216218, |
|
"rewards/accuracy_reward": 0.354799123480916, |
|
"rewards/format_reward": 0.9406250424683094, |
|
"step": 210 |
|
}, |
|
{ |
|
"completion_length": 182.6784683227539, |
|
"epoch": 0.7597173144876325, |
|
"grad_norm": 0.5112515687942505, |
|
"kl": 0.433740234375, |
|
"learning_rate": 3.3332411362372063e-06, |
|
"loss": 0.0174, |
|
"reward": 1.31026791036129, |
|
"reward_std": 0.32117727063596246, |
|
"rewards/accuracy_reward": 0.3594866218045354, |
|
"rewards/format_reward": 0.9507812909781933, |
|
"step": 215 |
|
}, |
|
{ |
|
"completion_length": 200.36418285369874, |
|
"epoch": 0.7773851590106007, |
|
"grad_norm": 0.31226980686187744, |
|
"kl": 0.4846923828125, |
|
"learning_rate": 2.8853385194256677e-06, |
|
"loss": 0.0194, |
|
"reward": 1.273549161851406, |
|
"reward_std": 0.334391163662076, |
|
"rewards/accuracy_reward": 0.3455357301980257, |
|
"rewards/format_reward": 0.9280134335160255, |
|
"step": 220 |
|
}, |
|
{ |
|
"completion_length": 183.47835721969605, |
|
"epoch": 0.7950530035335689, |
|
"grad_norm": 0.437847375869751, |
|
"kl": 0.418212890625, |
|
"learning_rate": 2.464637107698046e-06, |
|
"loss": 0.0167, |
|
"reward": 1.3132812976837158, |
|
"reward_std": 0.3066121691837907, |
|
"rewards/accuracy_reward": 0.36618305146694186, |
|
"rewards/format_reward": 0.9470982573926449, |
|
"step": 225 |
|
}, |
|
{ |
|
"completion_length": 199.54431734085082, |
|
"epoch": 0.8127208480565371, |
|
"grad_norm": 0.35982805490493774, |
|
"kl": 0.4347900390625, |
|
"learning_rate": 2.072745352195794e-06, |
|
"loss": 0.0174, |
|
"reward": 1.286272370815277, |
|
"reward_std": 0.333691646438092, |
|
"rewards/accuracy_reward": 0.3571428755298257, |
|
"rewards/format_reward": 0.9291295044124126, |
|
"step": 230 |
|
}, |
|
{ |
|
"completion_length": 201.60123605728148, |
|
"epoch": 0.8303886925795053, |
|
"grad_norm": 0.4822141230106354, |
|
"kl": 0.4885009765625, |
|
"learning_rate": 1.7111615572361628e-06, |
|
"loss": 0.0195, |
|
"reward": 1.281696480512619, |
|
"reward_std": 0.36514539532363416, |
|
"rewards/accuracy_reward": 0.36071430388838055, |
|
"rewards/format_reward": 0.9209821879863739, |
|
"step": 235 |
|
}, |
|
{ |
|
"completion_length": 191.80257606506348, |
|
"epoch": 0.8480565371024735, |
|
"grad_norm": 0.3629634380340576, |
|
"kl": 0.4655517578125, |
|
"learning_rate": 1.381268151904298e-06, |
|
"loss": 0.0186, |
|
"reward": 1.3007813036441802, |
|
"reward_std": 0.33475553449243306, |
|
"rewards/accuracy_reward": 0.3681919816881418, |
|
"rewards/format_reward": 0.9325893260538578, |
|
"step": 240 |
|
}, |
|
{ |
|
"completion_length": 194.34130392074584, |
|
"epoch": 0.8657243816254417, |
|
"grad_norm": 0.42891520261764526, |
|
"kl": 0.462939453125, |
|
"learning_rate": 1.0843264046665558e-06, |
|
"loss": 0.0185, |
|
"reward": 1.302232201397419, |
|
"reward_std": 0.3392474535852671, |
|
"rewards/accuracy_reward": 0.3680803723633289, |
|
"rewards/format_reward": 0.9341518305242061, |
|
"step": 245 |
|
}, |
|
{ |
|
"completion_length": 200.85045433044434, |
|
"epoch": 0.8833922261484098, |
|
"grad_norm": 0.4686770737171173, |
|
"kl": 0.4640380859375, |
|
"learning_rate": 8.214716012124491e-07, |
|
"loss": 0.0186, |
|
"reward": 1.3125000596046448, |
|
"reward_std": 0.3251642185263336, |
|
"rewards/accuracy_reward": 0.3733259098604321, |
|
"rewards/format_reward": 0.9391741506755352, |
|
"step": 250 |
|
}, |
|
{ |
|
"completion_length": 194.45402736663817, |
|
"epoch": 0.901060070671378, |
|
"grad_norm": 0.854424774646759, |
|
"kl": 0.4887451171875, |
|
"learning_rate": 5.937087039615619e-07, |
|
"loss": 0.0196, |
|
"reward": 1.3106027349829674, |
|
"reward_std": 0.32200891636312007, |
|
"rewards/accuracy_reward": 0.3715401954948902, |
|
"rewards/format_reward": 0.9390625439584255, |
|
"step": 255 |
|
}, |
|
{ |
|
"completion_length": 201.23203868865966, |
|
"epoch": 0.9187279151943463, |
|
"grad_norm": 0.4338059723377228, |
|
"kl": 0.49970703125, |
|
"learning_rate": 4.019085098303077e-07, |
|
"loss": 0.02, |
|
"reward": 1.3261161267757415, |
|
"reward_std": 0.33655967731028796, |
|
"rewards/accuracy_reward": 0.39051341228187086, |
|
"rewards/format_reward": 0.9356027200818062, |
|
"step": 260 |
|
}, |
|
{ |
|
"completion_length": 204.1974422454834, |
|
"epoch": 0.9363957597173145, |
|
"grad_norm": 0.5758671164512634, |
|
"kl": 0.49468994140625, |
|
"learning_rate": 2.4680432094837394e-07, |
|
"loss": 0.0198, |
|
"reward": 1.300446479022503, |
|
"reward_std": 0.3384882753714919, |
|
"rewards/accuracy_reward": 0.3676339445635676, |
|
"rewards/format_reward": 0.9328125424683094, |
|
"step": 265 |
|
}, |
|
{ |
|
"completion_length": 199.08661632537843, |
|
"epoch": 0.9540636042402827, |
|
"grad_norm": 0.5108721852302551, |
|
"kl": 0.45732421875, |
|
"learning_rate": 1.289891410535593e-07, |
|
"loss": 0.0183, |
|
"reward": 1.3210938066244124, |
|
"reward_std": 0.33634756207466127, |
|
"rewards/accuracy_reward": 0.3814732311293483, |
|
"rewards/format_reward": 0.939620578289032, |
|
"step": 270 |
|
}, |
|
{ |
|
"completion_length": 200.83203945159912, |
|
"epoch": 0.9717314487632509, |
|
"grad_norm": 0.40297839045524597, |
|
"kl": 0.4919921875, |
|
"learning_rate": 4.8913408283934874e-08, |
|
"loss": 0.0197, |
|
"reward": 1.302455411851406, |
|
"reward_std": 0.33796050902456043, |
|
"rewards/accuracy_reward": 0.37109376695007085, |
|
"rewards/format_reward": 0.9313616514205932, |
|
"step": 275 |
|
}, |
|
{ |
|
"completion_length": 205.692866897583, |
|
"epoch": 0.9893992932862191, |
|
"grad_norm": 0.4729168713092804, |
|
"kl": 0.4669677734375, |
|
"learning_rate": 6.883273035447335e-09, |
|
"loss": 0.0187, |
|
"reward": 1.3194196969270706, |
|
"reward_std": 0.3483607778325677, |
|
"rewards/accuracy_reward": 0.38526787627488374, |
|
"rewards/format_reward": 0.9341518260538578, |
|
"step": 280 |
|
}, |
|
{ |
|
"completion_length": 200.4055379231771, |
|
"epoch": 1.0, |
|
"kl": 0.4570719401041667, |
|
"reward": 1.3089658319950104, |
|
"reward_std": 0.32773972923556965, |
|
"rewards/accuracy_reward": 0.3723958491658171, |
|
"rewards/format_reward": 0.9365699800352255, |
|
"step": 283, |
|
"total_flos": 0.0, |
|
"train_loss": 0.027384539475202127, |
|
"train_runtime": 37232.0457, |
|
"train_samples_per_second": 1.946, |
|
"train_steps_per_second": 0.008 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 283, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|