|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 2.6207497820401047,
|
|
"eval_steps": 50,
|
|
"global_step": 750,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.17436791630340018,
|
|
"grad_norm": 0.31028223037719727,
|
|
"learning_rate": 0.0002,
|
|
"loss": 1.7522,
|
|
"mean_token_accuracy": 0.6208396552503109,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.17436791630340018,
|
|
"eval_loss": 1.5662453174591064,
|
|
"eval_mean_token_accuracy": 0.6466238599197537,
|
|
"eval_runtime": 216.8159,
|
|
"eval_samples_per_second": 9.404,
|
|
"eval_steps_per_second": 1.176,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.34873583260680036,
|
|
"grad_norm": 0.20454441010951996,
|
|
"learning_rate": 0.0002,
|
|
"loss": 1.4597,
|
|
"mean_token_accuracy": 0.6637434582412243,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.34873583260680036,
|
|
"eval_loss": 1.4656527042388916,
|
|
"eval_mean_token_accuracy": 0.6612562953257093,
|
|
"eval_runtime": 219.0107,
|
|
"eval_samples_per_second": 9.31,
|
|
"eval_steps_per_second": 1.164,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.5231037489102005,
|
|
"grad_norm": 0.13001485168933868,
|
|
"learning_rate": 0.0002,
|
|
"loss": 1.4281,
|
|
"mean_token_accuracy": 0.6697324779629708,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 0.5231037489102005,
|
|
"eval_loss": 1.4386416673660278,
|
|
"eval_mean_token_accuracy": 0.6682294864280551,
|
|
"eval_runtime": 218.6278,
|
|
"eval_samples_per_second": 9.326,
|
|
"eval_steps_per_second": 1.166,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 0.6974716652136007,
|
|
"grad_norm": 0.2524537146091461,
|
|
"learning_rate": 0.0002,
|
|
"loss": 1.4169,
|
|
"mean_token_accuracy": 0.6710615785419941,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.6974716652136007,
|
|
"eval_loss": 1.4218348264694214,
|
|
"eval_mean_token_accuracy": 0.6715860612252179,
|
|
"eval_runtime": 215.794,
|
|
"eval_samples_per_second": 9.449,
|
|
"eval_steps_per_second": 1.182,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.8718395815170009,
|
|
"grad_norm": 0.16508391499519348,
|
|
"learning_rate": 0.0002,
|
|
"loss": 1.398,
|
|
"mean_token_accuracy": 0.674775720089674,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 0.8718395815170009,
|
|
"eval_loss": 1.4061472415924072,
|
|
"eval_mean_token_accuracy": 0.6714201174530329,
|
|
"eval_runtime": 217.972,
|
|
"eval_samples_per_second": 9.354,
|
|
"eval_steps_per_second": 1.17,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 1.045335658238884,
|
|
"grad_norm": 0.06276454031467438,
|
|
"learning_rate": 0.0002,
|
|
"loss": 1.4208,
|
|
"mean_token_accuracy": 0.6656776975147688,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 1.045335658238884,
|
|
"eval_loss": 1.3979426622390747,
|
|
"eval_mean_token_accuracy": 0.6722868853924321,
|
|
"eval_runtime": 221.6256,
|
|
"eval_samples_per_second": 9.2,
|
|
"eval_steps_per_second": 1.151,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 1.2197035745422842,
|
|
"grad_norm": 0.0702725201845169,
|
|
"learning_rate": 0.0002,
|
|
"loss": 1.3814,
|
|
"mean_token_accuracy": 0.675048353523016,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 1.2197035745422842,
|
|
"eval_loss": 1.3879132270812988,
|
|
"eval_mean_token_accuracy": 0.6748333828122008,
|
|
"eval_runtime": 211.3323,
|
|
"eval_samples_per_second": 9.648,
|
|
"eval_steps_per_second": 1.207,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 1.3940714908456844,
|
|
"grad_norm": 0.07022608816623688,
|
|
"learning_rate": 0.0002,
|
|
"loss": 1.376,
|
|
"mean_token_accuracy": 0.6765765248239041,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 1.3940714908456844,
|
|
"eval_loss": 1.3835749626159668,
|
|
"eval_mean_token_accuracy": 0.6750268143766066,
|
|
"eval_runtime": 211.0785,
|
|
"eval_samples_per_second": 9.66,
|
|
"eval_steps_per_second": 1.208,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 1.5684394071490846,
|
|
"grad_norm": 0.07153413444757462,
|
|
"learning_rate": 0.0002,
|
|
"loss": 1.3735,
|
|
"mean_token_accuracy": 0.6764040923118592,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 1.5684394071490846,
|
|
"eval_loss": 1.3785712718963623,
|
|
"eval_mean_token_accuracy": 0.6757427355822395,
|
|
"eval_runtime": 209.9753,
|
|
"eval_samples_per_second": 9.711,
|
|
"eval_steps_per_second": 1.214,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 1.7462946817785527,
|
|
"grad_norm": 0.0622311495244503,
|
|
"learning_rate": 0.0002,
|
|
"loss": 1.3769,
|
|
"mean_token_accuracy": 0.6763439090549945,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 1.7462946817785527,
|
|
"eval_loss": 1.3926106691360474,
|
|
"eval_mean_token_accuracy": 0.6742508411407471,
|
|
"eval_runtime": 211.3856,
|
|
"eval_samples_per_second": 9.646,
|
|
"eval_steps_per_second": 1.206,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 1.920662598081953,
|
|
"grad_norm": 0.07205487042665482,
|
|
"learning_rate": 0.0002,
|
|
"loss": 1.3622,
|
|
"mean_token_accuracy": 0.6805365033447742,
|
|
"step": 550
|
|
},
|
|
{
|
|
"epoch": 1.920662598081953,
|
|
"eval_loss": 1.3798006772994995,
|
|
"eval_mean_token_accuracy": 0.6765407457071192,
|
|
"eval_runtime": 211.4314,
|
|
"eval_samples_per_second": 9.644,
|
|
"eval_steps_per_second": 1.206,
|
|
"step": 550
|
|
},
|
|
{
|
|
"epoch": 2.0976460331299043,
|
|
"grad_norm": 0.05617905408143997,
|
|
"learning_rate": 0.0002,
|
|
"loss": 1.3978,
|
|
"mean_token_accuracy": 0.6721828516774577,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 2.0976460331299043,
|
|
"eval_loss": 1.376914620399475,
|
|
"eval_mean_token_accuracy": 0.6766514093268151,
|
|
"eval_runtime": 211.4284,
|
|
"eval_samples_per_second": 9.644,
|
|
"eval_steps_per_second": 1.206,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 2.2720139494333043,
|
|
"grad_norm": 0.06081470474600792,
|
|
"learning_rate": 0.0002,
|
|
"loss": 1.3542,
|
|
"mean_token_accuracy": 0.6793570870161056,
|
|
"step": 650
|
|
},
|
|
{
|
|
"epoch": 2.2720139494333043,
|
|
"eval_loss": 1.3769170045852661,
|
|
"eval_mean_token_accuracy": 0.6753617045926112,
|
|
"eval_runtime": 211.9023,
|
|
"eval_samples_per_second": 9.622,
|
|
"eval_steps_per_second": 1.203,
|
|
"step": 650
|
|
},
|
|
{
|
|
"epoch": 2.4463818657367042,
|
|
"grad_norm": 0.05523712560534477,
|
|
"learning_rate": 0.0002,
|
|
"loss": 1.35,
|
|
"mean_token_accuracy": 0.6805331887304783,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 2.4463818657367042,
|
|
"eval_loss": 1.3597146272659302,
|
|
"eval_mean_token_accuracy": 0.6796985892688527,
|
|
"eval_runtime": 211.7167,
|
|
"eval_samples_per_second": 9.631,
|
|
"eval_steps_per_second": 1.204,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 2.6207497820401047,
|
|
"grad_norm": 0.05956251546740532,
|
|
"learning_rate": 0.0002,
|
|
"loss": 1.3365,
|
|
"mean_token_accuracy": 0.6847609375417233,
|
|
"step": 750
|
|
},
|
|
{
|
|
"epoch": 2.6207497820401047,
|
|
"eval_loss": 1.353187084197998,
|
|
"eval_mean_token_accuracy": 0.6810896275090236,
|
|
"eval_runtime": 223.086,
|
|
"eval_samples_per_second": 9.14,
|
|
"eval_steps_per_second": 1.143,
|
|
"step": 750
|
|
}
|
|
],
|
|
"logging_steps": 50,
|
|
"max_steps": 858,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 50,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": false
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 1.0807925353190031e+18,
|
|
"train_batch_size": 8,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|