|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.4946695095948828, |
|
"eval_steps": 500, |
|
"global_step": 87, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 383.43863677978516, |
|
"epoch": 0.017057569296375266, |
|
"grad_norm": 0.7056748867034912, |
|
"kl": 0.0, |
|
"learning_rate": 3.333333333333333e-07, |
|
"loss": 0.1106, |
|
"reward": 0.712053619325161, |
|
"reward_std": 0.39283478632569313, |
|
"rewards/accuracy_reward": 0.7087053954601288, |
|
"rewards/format_reward": 0.003348214435391128, |
|
"step": 1 |
|
}, |
|
{ |
|
"completion_length": 417.2608995437622, |
|
"epoch": 0.08528784648187633, |
|
"grad_norm": 10.000102996826172, |
|
"kl": 0.0006268918514251709, |
|
"learning_rate": 1.6666666666666669e-06, |
|
"loss": 0.0585, |
|
"reward": 0.7357701230794191, |
|
"reward_std": 0.37447383999824524, |
|
"rewards/accuracy_reward": 0.7313058394938707, |
|
"rewards/format_reward": 0.004464285942958668, |
|
"step": 5 |
|
}, |
|
{ |
|
"completion_length": 497.51051940917966, |
|
"epoch": 0.17057569296375266, |
|
"grad_norm": 0.3063008487224579, |
|
"kl": 0.0017390489578247071, |
|
"learning_rate": 2.9987834972573546e-06, |
|
"loss": 0.0784, |
|
"reward": 0.7703125357627869, |
|
"reward_std": 0.3321108963340521, |
|
"rewards/accuracy_reward": 0.7689732491970063, |
|
"rewards/format_reward": 0.0013392857974395156, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 500.5500213623047, |
|
"epoch": 0.255863539445629, |
|
"grad_norm": 2.702301263809204, |
|
"kl": 0.010482597351074218, |
|
"learning_rate": 2.956412726139078e-06, |
|
"loss": 0.1039, |
|
"reward": 0.8607143312692642, |
|
"reward_std": 0.2081299727782607, |
|
"rewards/accuracy_reward": 0.8604911163449287, |
|
"rewards/format_reward": 0.00022321429569274187, |
|
"step": 15 |
|
}, |
|
{ |
|
"completion_length": 579.4114112854004, |
|
"epoch": 0.3411513859275053, |
|
"grad_norm": 0.1238444522023201, |
|
"kl": 0.002919578552246094, |
|
"learning_rate": 2.8551756519155732e-06, |
|
"loss": 0.0479, |
|
"reward": 0.8312500357627869, |
|
"reward_std": 0.18435155414044857, |
|
"rewards/accuracy_reward": 0.8312500357627869, |
|
"rewards/format_reward": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 549.7174324035644, |
|
"epoch": 0.42643923240938164, |
|
"grad_norm": 0.8831402659416199, |
|
"kl": 0.004978370666503906, |
|
"learning_rate": 2.699164145105252e-06, |
|
"loss": 0.049, |
|
"reward": 0.8495536148548126, |
|
"reward_std": 0.16553675523027778, |
|
"rewards/accuracy_reward": 0.8495536148548126, |
|
"rewards/format_reward": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"completion_length": 592.7422164916992, |
|
"epoch": 0.511727078891258, |
|
"grad_norm": 0.1488523930311203, |
|
"kl": 0.0038507461547851564, |
|
"learning_rate": 2.4946839873611927e-06, |
|
"loss": 0.0382, |
|
"reward": 0.8165178954601288, |
|
"reward_std": 0.17473283503204584, |
|
"rewards/accuracy_reward": 0.8165178954601288, |
|
"rewards/format_reward": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 622.1555999755859, |
|
"epoch": 0.5970149253731343, |
|
"grad_norm": 0.15904287993907928, |
|
"kl": 0.003652000427246094, |
|
"learning_rate": 2.25e-06, |
|
"loss": 0.0396, |
|
"reward": 0.7752232491970062, |
|
"reward_std": 0.2052750363945961, |
|
"rewards/accuracy_reward": 0.7752232491970062, |
|
"rewards/format_reward": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"completion_length": 623.6841766357422, |
|
"epoch": 0.6823027718550106, |
|
"grad_norm": 0.11450017243623734, |
|
"kl": 0.004055023193359375, |
|
"learning_rate": 1.975001990702209e-06, |
|
"loss": 0.0393, |
|
"reward": 0.7602678909897804, |
|
"reward_std": 0.2107524886727333, |
|
"rewards/accuracy_reward": 0.7602678909897804, |
|
"rewards/format_reward": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 709.4147674560547, |
|
"epoch": 0.767590618336887, |
|
"grad_norm": 0.08205878734588623, |
|
"kl": 0.011851119995117187, |
|
"learning_rate": 1.6808050203829845e-06, |
|
"loss": 0.0359, |
|
"reward": 0.5767857410013676, |
|
"reward_std": 0.23020651061087846, |
|
"rewards/accuracy_reward": 0.5767857410013676, |
|
"rewards/format_reward": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"completion_length": 716.3203491210937, |
|
"epoch": 0.8528784648187633, |
|
"grad_norm": 0.11587467789649963, |
|
"kl": 0.00437164306640625, |
|
"learning_rate": 1.3793001469249112e-06, |
|
"loss": 0.0316, |
|
"reward": 0.574553594738245, |
|
"reward_std": 0.24477371014654636, |
|
"rewards/accuracy_reward": 0.574553594738245, |
|
"rewards/format_reward": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 724.092886352539, |
|
"epoch": 0.9381663113006397, |
|
"grad_norm": 0.09563813358545303, |
|
"kl": 0.003982162475585938, |
|
"learning_rate": 1.0826738041253211e-06, |
|
"loss": 0.0343, |
|
"reward": 0.586830385029316, |
|
"reward_std": 0.25132143292576076, |
|
"rewards/accuracy_reward": 0.586830385029316, |
|
"rewards/format_reward": 0.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"completion_length": 572.2068037553267, |
|
"epoch": 1.0341151385927505, |
|
"grad_norm": 2.0511133670806885, |
|
"kl": 0.006678494540127841, |
|
"learning_rate": 8.029152419343472e-07, |
|
"loss": 0.0443, |
|
"reward": 0.7159091213887389, |
|
"reward_std": 0.1854671297002245, |
|
"rewards/accuracy_reward": 0.7159091213887389, |
|
"rewards/format_reward": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"completion_length": 398.4535873413086, |
|
"epoch": 1.1194029850746268, |
|
"grad_norm": 0.10762660950422287, |
|
"kl": 0.007398223876953125, |
|
"learning_rate": 5.513319366069343e-07, |
|
"loss": 0.0402, |
|
"reward": 0.9424107536673546, |
|
"reward_std": 0.08303849077783525, |
|
"rewards/accuracy_reward": 0.9424107536673546, |
|
"rewards/format_reward": 0.0, |
|
"step": 65 |
|
}, |
|
{ |
|
"completion_length": 435.78461837768555, |
|
"epoch": 1.2046908315565032, |
|
"grad_norm": 0.2290273904800415, |
|
"kl": 0.022534942626953124, |
|
"learning_rate": 3.380925572585183e-07, |
|
"loss": 0.0382, |
|
"reward": 0.9220982596278191, |
|
"reward_std": 0.10309098572470247, |
|
"rewards/accuracy_reward": 0.9220982596278191, |
|
"rewards/format_reward": 0.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"completion_length": 480.5415397644043, |
|
"epoch": 1.2899786780383795, |
|
"grad_norm": 0.15823833644390106, |
|
"kl": 0.005892181396484375, |
|
"learning_rate": 1.718159615201853e-07, |
|
"loss": 0.043, |
|
"reward": 0.8839286133646965, |
|
"reward_std": 0.1372868578415364, |
|
"rewards/accuracy_reward": 0.883705398440361, |
|
"rewards/format_reward": 0.00022321429569274187, |
|
"step": 75 |
|
}, |
|
{ |
|
"completion_length": 517.8933265686035, |
|
"epoch": 1.375266524520256, |
|
"grad_norm": 0.12397471815347672, |
|
"kl": 0.005947113037109375, |
|
"learning_rate": 5.922283255294164e-08, |
|
"loss": 0.0319, |
|
"reward": 0.8825893267989159, |
|
"reward_std": 0.14030799297615887, |
|
"rewards/accuracy_reward": 0.8825893267989159, |
|
"rewards/format_reward": 0.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"completion_length": 516.631275177002, |
|
"epoch": 1.4605543710021323, |
|
"grad_norm": 0.13666246831417084, |
|
"kl": 0.006908416748046875, |
|
"learning_rate": 4.864037798685106e-09, |
|
"loss": 0.0268, |
|
"reward": 0.8665178909897804, |
|
"reward_std": 0.14118066830560566, |
|
"rewards/accuracy_reward": 0.8662946775555611, |
|
"rewards/format_reward": 0.00022321429569274187, |
|
"step": 85 |
|
}, |
|
{ |
|
"completion_length": 589.9185562133789, |
|
"epoch": 1.4946695095948828, |
|
"kl": 0.005696296691894531, |
|
"reward": 0.7940848618745804, |
|
"reward_std": 0.18597882147878408, |
|
"rewards/accuracy_reward": 0.7940848618745804, |
|
"rewards/format_reward": 0.0, |
|
"step": 87, |
|
"total_flos": 0.0, |
|
"train_loss": 0.04595949243882607, |
|
"train_runtime": 10042.601, |
|
"train_samples_per_second": 1.12, |
|
"train_steps_per_second": 0.009 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 87, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|