|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.7739526199089743, |
|
"eval_steps": 100, |
|
"global_step": 1900, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09335978527249388, |
|
"grad_norm": 5.347364902496338, |
|
"learning_rate": 9.897551417342852e-05, |
|
"loss": 4.6888, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.09335978527249388, |
|
"eval_loss": 0.3825870752334595, |
|
"eval_runtime": 23.2535, |
|
"eval_samples_per_second": 3.741, |
|
"eval_steps_per_second": 3.741, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.18671957054498775, |
|
"grad_norm": 5.412732124328613, |
|
"learning_rate": 9.374809314115647e-05, |
|
"loss": 2.3316, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.18671957054498775, |
|
"eval_loss": 0.25006037950515747, |
|
"eval_runtime": 23.4002, |
|
"eval_samples_per_second": 3.718, |
|
"eval_steps_per_second": 3.718, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.2800793558174816, |
|
"grad_norm": 2.9451260566711426, |
|
"learning_rate": 8.454375586546417e-05, |
|
"loss": 1.5881, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.2800793558174816, |
|
"eval_loss": 0.1890459656715393, |
|
"eval_runtime": 23.2549, |
|
"eval_samples_per_second": 3.741, |
|
"eval_steps_per_second": 3.741, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.3734391410899755, |
|
"grad_norm": 5.892703533172607, |
|
"learning_rate": 7.219922175040683e-05, |
|
"loss": 1.2221, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.3734391410899755, |
|
"eval_loss": 0.14931949973106384, |
|
"eval_runtime": 23.3305, |
|
"eval_samples_per_second": 3.729, |
|
"eval_steps_per_second": 3.729, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.4667989263624694, |
|
"grad_norm": 3.341991424560547, |
|
"learning_rate": 5.783666949486448e-05, |
|
"loss": 1.038, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.4667989263624694, |
|
"eval_loss": 0.13619396090507507, |
|
"eval_runtime": 23.3865, |
|
"eval_samples_per_second": 3.72, |
|
"eval_steps_per_second": 3.72, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.5601587116349632, |
|
"grad_norm": 2.6955649852752686, |
|
"learning_rate": 4.276172554471998e-05, |
|
"loss": 0.9644, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.5601587116349632, |
|
"eval_loss": 0.1279439479112625, |
|
"eval_runtime": 23.3534, |
|
"eval_samples_per_second": 3.725, |
|
"eval_steps_per_second": 3.725, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.6535184969074571, |
|
"grad_norm": 3.1168127059936523, |
|
"learning_rate": 2.834477624505443e-05, |
|
"loss": 0.8806, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.6535184969074571, |
|
"eval_loss": 0.12419606000185013, |
|
"eval_runtime": 23.2939, |
|
"eval_samples_per_second": 3.735, |
|
"eval_steps_per_second": 3.735, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.746878282179951, |
|
"grad_norm": 2.7166848182678223, |
|
"learning_rate": 1.5896392999869652e-05, |
|
"loss": 0.8333, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.746878282179951, |
|
"eval_loss": 0.1232759952545166, |
|
"eval_runtime": 23.4216, |
|
"eval_samples_per_second": 3.715, |
|
"eval_steps_per_second": 3.715, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.8402380674524449, |
|
"grad_norm": 2.7149593830108643, |
|
"learning_rate": 6.548194903336408e-06, |
|
"loss": 0.8307, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.8402380674524449, |
|
"eval_loss": 0.12131741642951965, |
|
"eval_runtime": 23.2806, |
|
"eval_samples_per_second": 3.737, |
|
"eval_steps_per_second": 3.737, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.9335978527249388, |
|
"grad_norm": 2.5703158378601074, |
|
"learning_rate": 1.149979013737007e-06, |
|
"loss": 0.8138, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.9335978527249388, |
|
"eval_loss": 0.12097638100385666, |
|
"eval_runtime": 23.3058, |
|
"eval_samples_per_second": 3.733, |
|
"eval_steps_per_second": 3.733, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.0270743377290232, |
|
"grad_norm": 1.5639070272445679, |
|
"learning_rate": 5.02646972298292e-05, |
|
"loss": 0.4182, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.0270743377290232, |
|
"eval_loss": 0.12452684342861176, |
|
"eval_runtime": 11.3284, |
|
"eval_samples_per_second": 7.68, |
|
"eval_steps_per_second": 7.68, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.1204341230015171, |
|
"grad_norm": 1.3367255926132202, |
|
"learning_rate": 4.2727771933821015e-05, |
|
"loss": 0.807, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.1204341230015171, |
|
"eval_loss": 0.12348207086324692, |
|
"eval_runtime": 11.3327, |
|
"eval_samples_per_second": 7.677, |
|
"eval_steps_per_second": 7.677, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.213793908274011, |
|
"grad_norm": 1.3575553894042969, |
|
"learning_rate": 3.535690704015874e-05, |
|
"loss": 0.7986, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.213793908274011, |
|
"eval_loss": 0.12156575173139572, |
|
"eval_runtime": 11.3167, |
|
"eval_samples_per_second": 7.688, |
|
"eval_steps_per_second": 7.688, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.307153693546505, |
|
"grad_norm": 1.1323909759521484, |
|
"learning_rate": 2.8320415310590165e-05, |
|
"loss": 0.769, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.307153693546505, |
|
"eval_loss": 0.11894583702087402, |
|
"eval_runtime": 11.3324, |
|
"eval_samples_per_second": 7.677, |
|
"eval_steps_per_second": 7.677, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.4005134788189988, |
|
"grad_norm": 0.9585688710212708, |
|
"learning_rate": 2.1778974138217168e-05, |
|
"loss": 0.7456, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.4005134788189988, |
|
"eval_loss": 0.11771341413259506, |
|
"eval_runtime": 11.3495, |
|
"eval_samples_per_second": 7.666, |
|
"eval_steps_per_second": 7.666, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.4938732640914925, |
|
"grad_norm": 1.1619073152542114, |
|
"learning_rate": 1.58819564996737e-05, |
|
"loss": 0.7581, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.4938732640914925, |
|
"eval_loss": 0.11598959565162659, |
|
"eval_runtime": 11.3352, |
|
"eval_samples_per_second": 7.675, |
|
"eval_steps_per_second": 7.675, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.5872330493639866, |
|
"grad_norm": 0.9958471655845642, |
|
"learning_rate": 1.0764020042215306e-05, |
|
"loss": 0.7593, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.5872330493639866, |
|
"eval_loss": 0.1154765784740448, |
|
"eval_runtime": 11.381, |
|
"eval_samples_per_second": 7.644, |
|
"eval_steps_per_second": 7.644, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.6805928346364802, |
|
"grad_norm": 1.3259460926055908, |
|
"learning_rate": 6.54203219348205e-06, |
|
"loss": 0.7625, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.6805928346364802, |
|
"eval_loss": 0.11468129605054855, |
|
"eval_runtime": 11.3548, |
|
"eval_samples_per_second": 7.662, |
|
"eval_steps_per_second": 7.662, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.7739526199089743, |
|
"grad_norm": 1.9127427339553833, |
|
"learning_rate": 3.3124015086622216e-06, |
|
"loss": 0.7506, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.7739526199089743, |
|
"eval_loss": 0.11443212628364563, |
|
"eval_runtime": 11.3509, |
|
"eval_samples_per_second": 7.665, |
|
"eval_steps_per_second": 7.665, |
|
"step": 1900 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 2142, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.5106809033990144e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|