squad_albert_xl_finetuned / trainer_state.json
shuheng's picture
End of training
d4ec15c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.0,
"eval_steps": 500,
"global_step": 16620,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.18050541516245489,
"grad_norm": 36.13896942138672,
"learning_rate": 2.909747292418773e-05,
"loss": 1.6675,
"step": 500
},
{
"epoch": 0.36101083032490977,
"grad_norm": 17.48224449157715,
"learning_rate": 2.819494584837545e-05,
"loss": 0.932,
"step": 1000
},
{
"epoch": 0.5415162454873647,
"grad_norm": 15.81024169921875,
"learning_rate": 2.729241877256318e-05,
"loss": 0.89,
"step": 1500
},
{
"epoch": 0.7220216606498195,
"grad_norm": 10.735639572143555,
"learning_rate": 2.6389891696750903e-05,
"loss": 0.8503,
"step": 2000
},
{
"epoch": 0.9025270758122743,
"grad_norm": 17.32847785949707,
"learning_rate": 2.548736462093863e-05,
"loss": 0.8174,
"step": 2500
},
{
"epoch": 1.0830324909747293,
"grad_norm": 12.157166481018066,
"learning_rate": 2.4584837545126353e-05,
"loss": 0.7371,
"step": 3000
},
{
"epoch": 1.263537906137184,
"grad_norm": 1.791842805687338e-05,
"learning_rate": 2.368231046931408e-05,
"loss": 3.2033,
"step": 3500
},
{
"epoch": 1.444043321299639,
"grad_norm": 1.9402736143092625e-05,
"learning_rate": 2.2779783393501805e-05,
"loss": 5.9507,
"step": 4000
},
{
"epoch": 1.6245487364620939,
"grad_norm": 1.948318094946444e-05,
"learning_rate": 2.1877256317689534e-05,
"loss": 5.9507,
"step": 4500
},
{
"epoch": 1.8050541516245486,
"grad_norm": 1.8354030544287525e-05,
"learning_rate": 2.0974729241877255e-05,
"loss": 5.9507,
"step": 5000
},
{
"epoch": 1.9855595667870036,
"grad_norm": 1.7204820323968306e-05,
"learning_rate": 2.0072202166064983e-05,
"loss": 5.9507,
"step": 5500
},
{
"epoch": 2.1660649819494586,
"grad_norm": 1.8372866179561242e-05,
"learning_rate": 1.9169675090252708e-05,
"loss": 5.9507,
"step": 6000
},
{
"epoch": 2.3465703971119134,
"grad_norm": 1.6711237549316138e-05,
"learning_rate": 1.8267148014440436e-05,
"loss": 5.9507,
"step": 6500
},
{
"epoch": 2.527075812274368,
"grad_norm": 1.866539241746068e-05,
"learning_rate": 1.7364620938628157e-05,
"loss": 5.9507,
"step": 7000
},
{
"epoch": 2.707581227436823,
"grad_norm": 1.9231085389037617e-05,
"learning_rate": 1.6462093862815885e-05,
"loss": 5.9507,
"step": 7500
},
{
"epoch": 2.888086642599278,
"grad_norm": 2.1632338757626712e-05,
"learning_rate": 1.555956678700361e-05,
"loss": 5.9507,
"step": 8000
},
{
"epoch": 3.068592057761733,
"grad_norm": 1.8851398635888472e-05,
"learning_rate": 1.4657039711191336e-05,
"loss": 5.9507,
"step": 8500
},
{
"epoch": 3.2490974729241877,
"grad_norm": 1.6099018466775306e-05,
"learning_rate": 1.3754512635379063e-05,
"loss": 5.9507,
"step": 9000
},
{
"epoch": 3.4296028880866425,
"grad_norm": 1.880363743111957e-05,
"learning_rate": 1.2851985559566788e-05,
"loss": 5.9507,
"step": 9500
},
{
"epoch": 3.6101083032490973,
"grad_norm": 1.930674807226751e-05,
"learning_rate": 1.1949458483754514e-05,
"loss": 5.9507,
"step": 10000
},
{
"epoch": 3.7906137184115525,
"grad_norm": 1.641635572013911e-05,
"learning_rate": 1.1046931407942239e-05,
"loss": 5.9507,
"step": 10500
},
{
"epoch": 3.9711191335740073,
"grad_norm": 1.3756233784079086e-05,
"learning_rate": 1.0144404332129965e-05,
"loss": 5.9507,
"step": 11000
},
{
"epoch": 4.1516245487364625,
"grad_norm": 1.3154494808986783e-05,
"learning_rate": 9.24187725631769e-06,
"loss": 5.9507,
"step": 11500
},
{
"epoch": 4.332129963898917,
"grad_norm": 1.031011197483167e-05,
"learning_rate": 8.339350180505416e-06,
"loss": 5.9507,
"step": 12000
},
{
"epoch": 4.512635379061372,
"grad_norm": 1.8964210539706983e-05,
"learning_rate": 7.436823104693141e-06,
"loss": 5.9507,
"step": 12500
},
{
"epoch": 4.693140794223827,
"grad_norm": 2.142435550922528e-05,
"learning_rate": 6.534296028880867e-06,
"loss": 5.9507,
"step": 13000
},
{
"epoch": 4.873646209386282,
"grad_norm": 2.3836026230128482e-05,
"learning_rate": 5.631768953068592e-06,
"loss": 5.9507,
"step": 13500
},
{
"epoch": 5.054151624548736,
"grad_norm": 2.4697543267393485e-05,
"learning_rate": 4.729241877256318e-06,
"loss": 5.9507,
"step": 14000
},
{
"epoch": 5.234657039711191,
"grad_norm": 2.540420609875582e-05,
"learning_rate": 3.826714801444043e-06,
"loss": 5.9507,
"step": 14500
},
{
"epoch": 5.415162454873646,
"grad_norm": 2.4388718884438276e-05,
"learning_rate": 2.924187725631769e-06,
"loss": 5.9507,
"step": 15000
},
{
"epoch": 5.595667870036101,
"grad_norm": 2.4222141291829757e-05,
"learning_rate": 2.0216606498194946e-06,
"loss": 5.9507,
"step": 15500
},
{
"epoch": 5.776173285198556,
"grad_norm": 2.60755459748907e-05,
"learning_rate": 1.1191335740072204e-06,
"loss": 5.9507,
"step": 16000
},
{
"epoch": 5.956678700361011,
"grad_norm": 2.603538814582862e-05,
"learning_rate": 2.1660649819494586e-07,
"loss": 5.9507,
"step": 16500
},
{
"epoch": 6.0,
"step": 16620,
"total_flos": 6.203464557305242e+16,
"train_loss": 4.971211804551769,
"train_runtime": 43621.7229,
"train_samples_per_second": 12.192,
"train_steps_per_second": 0.381
}
],
"logging_steps": 500,
"max_steps": 16620,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.203464557305242e+16,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}