|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9893390191897654, |
|
"eval_steps": 100, |
|
"global_step": 58, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 613.9678833007813, |
|
"epoch": 0.08528784648187633, |
|
"grad_norm": 1.6863154172897339, |
|
"kl": 0.0001811981201171875, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.0, |
|
"reward": 0.6475446730852127, |
|
"reward_std": 0.32297179140150545, |
|
"rewards/accuracy_reward": 0.646651816368103, |
|
"rewards/format_reward": 0.0008928571827709675, |
|
"step": 5 |
|
}, |
|
{ |
|
"completion_length": 623.497127532959, |
|
"epoch": 0.17057569296375266, |
|
"grad_norm": 5.361064910888672, |
|
"kl": 4.185117244720459, |
|
"learning_rate": 2.956412726139078e-06, |
|
"loss": 0.1668, |
|
"reward": 0.695758955180645, |
|
"reward_std": 0.2824364464730024, |
|
"rewards/accuracy_reward": 0.6948660999536515, |
|
"rewards/format_reward": 0.0008928571827709675, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 621.0348518371582, |
|
"epoch": 0.255863539445629, |
|
"grad_norm": 0.34819459915161133, |
|
"kl": 0.006930732727050781, |
|
"learning_rate": 2.7836719084521715e-06, |
|
"loss": 0.0003, |
|
"reward": 0.7484375357627868, |
|
"reward_std": 0.23940655626356602, |
|
"rewards/accuracy_reward": 0.7484375357627868, |
|
"rewards/format_reward": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"completion_length": 617.9591751098633, |
|
"epoch": 0.3411513859275053, |
|
"grad_norm": 5.891634941101074, |
|
"kl": 0.007678604125976563, |
|
"learning_rate": 2.4946839873611927e-06, |
|
"loss": 0.0003, |
|
"reward": 0.7546875327825546, |
|
"reward_std": 0.23243394643068313, |
|
"rewards/accuracy_reward": 0.7546875327825546, |
|
"rewards/format_reward": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 625.6734642028808, |
|
"epoch": 0.42643923240938164, |
|
"grad_norm": 0.4985528588294983, |
|
"kl": 0.06998424530029297, |
|
"learning_rate": 2.1156192081791355e-06, |
|
"loss": 0.0028, |
|
"reward": 0.7497768148779869, |
|
"reward_std": 0.20579003393650055, |
|
"rewards/accuracy_reward": 0.7497768148779869, |
|
"rewards/format_reward": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"completion_length": 621.3915466308594, |
|
"epoch": 0.511727078891258, |
|
"grad_norm": 0.4441167712211609, |
|
"kl": 0.0034616470336914064, |
|
"learning_rate": 1.6808050203829845e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7517857521772384, |
|
"reward_std": 0.19103028811514378, |
|
"rewards/accuracy_reward": 0.7517857521772384, |
|
"rewards/format_reward": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 611.6134216308594, |
|
"epoch": 0.5970149253731343, |
|
"grad_norm": 0.45489344000816345, |
|
"kl": 0.003511810302734375, |
|
"learning_rate": 1.2296174432791415e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7323660954833031, |
|
"reward_std": 0.19216552414000035, |
|
"rewards/accuracy_reward": 0.7323660954833031, |
|
"rewards/format_reward": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"completion_length": 594.996004486084, |
|
"epoch": 0.6823027718550106, |
|
"grad_norm": 0.38299915194511414, |
|
"kl": 0.0036407470703125, |
|
"learning_rate": 8.029152419343472e-07, |
|
"loss": 0.0001, |
|
"reward": 0.7618303939700126, |
|
"reward_std": 0.182381122559309, |
|
"rewards/accuracy_reward": 0.7618303939700126, |
|
"rewards/format_reward": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 607.5631973266602, |
|
"epoch": 0.767590618336887, |
|
"grad_norm": 0.1558612883090973, |
|
"kl": 0.003850555419921875, |
|
"learning_rate": 4.3933982822017883e-07, |
|
"loss": 0.0002, |
|
"reward": 0.7508928924798965, |
|
"reward_std": 0.19622449725866317, |
|
"rewards/accuracy_reward": 0.7508928924798965, |
|
"rewards/format_reward": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"completion_length": 610.1696670532226, |
|
"epoch": 0.8528784648187633, |
|
"grad_norm": 0.11370094120502472, |
|
"kl": 0.004032135009765625, |
|
"learning_rate": 1.718159615201853e-07, |
|
"loss": 0.0002, |
|
"reward": 0.7486607491970062, |
|
"reward_std": 0.17984699215739966, |
|
"rewards/accuracy_reward": 0.7486607491970062, |
|
"rewards/format_reward": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 603.7953384399414, |
|
"epoch": 0.9381663113006397, |
|
"grad_norm": 0.09367287904024124, |
|
"kl": 0.0048553466796875, |
|
"learning_rate": 2.4570139579284723e-08, |
|
"loss": 0.0002, |
|
"reward": 0.7750000357627869, |
|
"reward_std": 0.18225797163322568, |
|
"rewards/accuracy_reward": 0.7750000357627869, |
|
"rewards/format_reward": 0.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"completion_length": 598.5974960327148, |
|
"epoch": 0.9893390191897654, |
|
"kl": 0.003758112589518229, |
|
"reward": 0.7678571765621504, |
|
"reward_std": 0.18772160820662975, |
|
"rewards/accuracy_reward": 0.7678571765621504, |
|
"rewards/format_reward": 0.0, |
|
"step": 58, |
|
"total_flos": 0.0, |
|
"train_loss": 0.01571170037346621, |
|
"train_runtime": 7483.8014, |
|
"train_samples_per_second": 1.002, |
|
"train_steps_per_second": 0.008 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 58, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|