Boffl's picture
Upload trainer_state.json with huggingface_hub
fe2b10a verified
raw
history blame
8.13 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9993906154783668,
"eval_steps": 500,
"global_step": 410,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02437538086532602,
"grad_norm": 0.381931334733963,
"learning_rate": 1.2195121951219513e-05,
"loss": 3.6679,
"step": 10
},
{
"epoch": 0.04875076173065204,
"grad_norm": 0.19698743522167206,
"learning_rate": 2.4390243902439026e-05,
"loss": 3.5098,
"step": 20
},
{
"epoch": 0.07312614259597806,
"grad_norm": 0.26034015417099,
"learning_rate": 3.6585365853658535e-05,
"loss": 3.3353,
"step": 30
},
{
"epoch": 0.09750152346130408,
"grad_norm": 0.29549556970596313,
"learning_rate": 4.878048780487805e-05,
"loss": 3.2342,
"step": 40
},
{
"epoch": 0.1218769043266301,
"grad_norm": 0.2754429578781128,
"learning_rate": 4.992664502959351e-05,
"loss": 3.1812,
"step": 50
},
{
"epoch": 0.14625228519195613,
"grad_norm": 0.3207014799118042,
"learning_rate": 4.967362490933723e-05,
"loss": 3.1661,
"step": 60
},
{
"epoch": 0.17062766605728213,
"grad_norm": 0.30315226316452026,
"learning_rate": 4.924186648858207e-05,
"loss": 3.0649,
"step": 70
},
{
"epoch": 0.19500304692260817,
"grad_norm": 0.26564204692840576,
"learning_rate": 4.863449747015384e-05,
"loss": 3.0024,
"step": 80
},
{
"epoch": 0.21937842778793418,
"grad_norm": 0.3084321618080139,
"learning_rate": 4.7855917698280054e-05,
"loss": 2.9751,
"step": 90
},
{
"epoch": 0.2437538086532602,
"grad_norm": 0.2992245554924011,
"learning_rate": 4.691176728566159e-05,
"loss": 3.0007,
"step": 100
},
{
"epoch": 0.2681291895185862,
"grad_norm": 0.3074418306350708,
"learning_rate": 4.580888575591068e-05,
"loss": 2.9229,
"step": 110
},
{
"epoch": 0.29250457038391225,
"grad_norm": 0.2845458984375,
"learning_rate": 4.455526249733178e-05,
"loss": 2.8955,
"step": 120
},
{
"epoch": 0.3168799512492383,
"grad_norm": 0.3487965166568756,
"learning_rate": 4.3159978886963226e-05,
"loss": 2.8837,
"step": 130
},
{
"epoch": 0.34125533211456427,
"grad_norm": 0.307179719209671,
"learning_rate": 4.163314250413913e-05,
"loss": 2.8862,
"step": 140
},
{
"epoch": 0.3656307129798903,
"grad_norm": 0.3133622407913208,
"learning_rate": 3.9985813910135304e-05,
"loss": 2.9099,
"step": 150
},
{
"epoch": 0.39000609384521634,
"grad_norm": 0.3793308138847351,
"learning_rate": 3.8229926524315016e-05,
"loss": 2.8219,
"step": 160
},
{
"epoch": 0.4143814747105424,
"grad_norm": 0.3438339829444885,
"learning_rate": 3.6378200177200224e-05,
"loss": 2.7811,
"step": 170
},
{
"epoch": 0.43875685557586835,
"grad_norm": 0.33851078152656555,
"learning_rate": 3.444404896669865e-05,
"loss": 2.8869,
"step": 180
},
{
"epoch": 0.4631322364411944,
"grad_norm": 0.3436271846294403,
"learning_rate": 3.2441484084985865e-05,
"loss": 2.8078,
"step": 190
},
{
"epoch": 0.4875076173065204,
"grad_norm": 0.29718300700187683,
"learning_rate": 3.0385012319974537e-05,
"loss": 2.8488,
"step": 200
},
{
"epoch": 0.5118829981718465,
"grad_norm": 0.3266434073448181,
"learning_rate": 2.8289530966636625e-05,
"loss": 2.7989,
"step": 210
},
{
"epoch": 0.5362583790371724,
"grad_norm": 0.3141980767250061,
"learning_rate": 2.617021990945197e-05,
"loss": 2.7921,
"step": 220
},
{
"epoch": 0.5606337599024985,
"grad_norm": 0.3401101231575012,
"learning_rate": 2.4042431657749117e-05,
"loss": 2.8359,
"step": 230
},
{
"epoch": 0.5850091407678245,
"grad_norm": 0.31733494997024536,
"learning_rate": 2.1921580130533827e-05,
"loss": 2.7748,
"step": 240
},
{
"epoch": 0.6093845216331505,
"grad_norm": 0.3072984218597412,
"learning_rate": 1.9823028996459486e-05,
"loss": 2.7994,
"step": 250
},
{
"epoch": 0.6337599024984766,
"grad_norm": 0.35069364309310913,
"learning_rate": 1.7761980377816287e-05,
"loss": 2.7459,
"step": 260
},
{
"epoch": 0.6581352833638026,
"grad_norm": 0.3242040276527405,
"learning_rate": 1.5753364724779092e-05,
"loss": 2.838,
"step": 270
},
{
"epoch": 0.6825106642291285,
"grad_norm": 0.3111414611339569,
"learning_rate": 1.381173265767623e-05,
"loss": 2.7298,
"step": 280
},
{
"epoch": 0.7068860450944546,
"grad_norm": 0.3313029408454895,
"learning_rate": 1.1951149560785167e-05,
"loss": 2.7464,
"step": 290
},
{
"epoch": 0.7312614259597806,
"grad_norm": 0.31917551159858704,
"learning_rate": 1.0185093691228534e-05,
"loss": 2.7118,
"step": 300
},
{
"epoch": 0.7556368068251066,
"grad_norm": 0.3485548496246338,
"learning_rate": 8.526358541080173e-06,
"loss": 2.6857,
"step": 310
},
{
"epoch": 0.7800121876904327,
"grad_norm": 0.31290727853775024,
"learning_rate": 6.986960159980327e-06,
"loss": 2.71,
"step": 320
},
{
"epoch": 0.8043875685557587,
"grad_norm": 0.2982420325279236,
"learning_rate": 5.578050109624511e-06,
"loss": 2.8041,
"step": 330
},
{
"epoch": 0.8287629494210847,
"grad_norm": 0.29485708475112915,
"learning_rate": 4.309834680692832e-06,
"loss": 2.6818,
"step": 340
},
{
"epoch": 0.8531383302864107,
"grad_norm": 0.29043450951576233,
"learning_rate": 3.1915009574206262e-06,
"loss": 2.6666,
"step": 350
},
{
"epoch": 0.8775137111517367,
"grad_norm": 0.32468873262405396,
"learning_rate": 2.231150265406512e-06,
"loss": 2.7277,
"step": 360
},
{
"epoch": 0.9018890920170628,
"grad_norm": 0.3383389413356781,
"learning_rate": 1.435739484768603e-06,
"loss": 2.7256,
"step": 370
},
{
"epoch": 0.9262644728823888,
"grad_norm": 0.31536731123924255,
"learning_rate": 8.110306537826601e-07,
"loss": 2.7143,
"step": 380
},
{
"epoch": 0.9506398537477148,
"grad_norm": 0.30096814036369324,
"learning_rate": 3.6154922807863643e-07,
"loss": 2.7688,
"step": 390
},
{
"epoch": 0.9750152346130408,
"grad_norm": 0.2999325096607208,
"learning_rate": 9.055129777021665e-08,
"loss": 2.7355,
"step": 400
},
{
"epoch": 0.9993906154783668,
"grad_norm": 0.31551599502563477,
"learning_rate": 0.0,
"loss": 2.751,
"step": 410
},
{
"epoch": 0.9993906154783668,
"step": 410,
"total_flos": 6.066570733918618e+17,
"train_loss": 2.8908857531663847,
"train_runtime": 5592.6214,
"train_samples_per_second": 2.346,
"train_steps_per_second": 0.073
}
],
"logging_steps": 10,
"max_steps": 410,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.066570733918618e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}