Sailor-food / last-checkpoint /trainer_state.json
iamnguyen's picture
Training in progress, step 40, checkpoint
d989237 verified
raw
history blame
7.47 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.2980251346499103,
"eval_steps": 500,
"global_step": 40,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05745062836624776,
"grad_norm": 3.1636736392974854,
"learning_rate": 5e-06,
"loss": 2.3037,
"step": 1
},
{
"epoch": 0.11490125673249552,
"grad_norm": 3.3778610229492188,
"learning_rate": 1e-05,
"loss": 2.3337,
"step": 2
},
{
"epoch": 0.17235188509874327,
"grad_norm": 3.0278897285461426,
"learning_rate": 1.5e-05,
"loss": 2.2715,
"step": 3
},
{
"epoch": 0.22980251346499103,
"grad_norm": 2.5405051708221436,
"learning_rate": 2e-05,
"loss": 2.2422,
"step": 4
},
{
"epoch": 0.2872531418312388,
"grad_norm": 1.789209246635437,
"learning_rate": 2.5e-05,
"loss": 2.2079,
"step": 5
},
{
"epoch": 0.34470377019748655,
"grad_norm": 1.653626561164856,
"learning_rate": 3e-05,
"loss": 2.2231,
"step": 6
},
{
"epoch": 0.4021543985637343,
"grad_norm": 2.006115436553955,
"learning_rate": 3.5e-05,
"loss": 2.2855,
"step": 7
},
{
"epoch": 0.45960502692998206,
"grad_norm": 2.322628974914551,
"learning_rate": 4e-05,
"loss": 2.2126,
"step": 8
},
{
"epoch": 0.5170556552962298,
"grad_norm": 2.069558620452881,
"learning_rate": 4.5e-05,
"loss": 2.2389,
"step": 9
},
{
"epoch": 0.5745062836624776,
"grad_norm": 1.7339693307876587,
"learning_rate": 5e-05,
"loss": 2.1964,
"step": 10
},
{
"epoch": 0.6319569120287253,
"grad_norm": 1.6608643531799316,
"learning_rate": 4.992664502959351e-05,
"loss": 2.2079,
"step": 11
},
{
"epoch": 0.6894075403949731,
"grad_norm": 1.642751932144165,
"learning_rate": 4.970701059450872e-05,
"loss": 2.2359,
"step": 12
},
{
"epoch": 0.7468581687612208,
"grad_norm": 1.6817108392715454,
"learning_rate": 4.934238559694448e-05,
"loss": 2.2229,
"step": 13
},
{
"epoch": 0.8043087971274686,
"grad_norm": 1.6007517576217651,
"learning_rate": 4.8834909801373264e-05,
"loss": 2.203,
"step": 14
},
{
"epoch": 0.8617594254937163,
"grad_norm": 1.457571029663086,
"learning_rate": 4.8187561277552374e-05,
"loss": 2.0994,
"step": 15
},
{
"epoch": 0.9192100538599641,
"grad_norm": 1.486559271812439,
"learning_rate": 4.740413892402639e-05,
"loss": 2.1777,
"step": 16
},
{
"epoch": 0.9766606822262118,
"grad_norm": 1.5398966073989868,
"learning_rate": 4.648924017468003e-05,
"loss": 2.2126,
"step": 17
},
{
"epoch": 1.0341113105924595,
"grad_norm": 1.4850305318832397,
"learning_rate": 4.5448234019167945e-05,
"loss": 2.1019,
"step": 18
},
{
"epoch": 1.0915619389587075,
"grad_norm": 1.4637010097503662,
"learning_rate": 4.428722949554857e-05,
"loss": 2.1119,
"step": 19
},
{
"epoch": 1.1490125673249552,
"grad_norm": 1.3282859325408936,
"learning_rate": 4.301303984001967e-05,
"loss": 2.0976,
"step": 20
},
{
"epoch": 1.2064631956912029,
"grad_norm": 1.2366828918457031,
"learning_rate": 4.163314250413913e-05,
"loss": 2.0682,
"step": 21
},
{
"epoch": 1.2639138240574506,
"grad_norm": 1.3221453428268433,
"learning_rate": 4.015563527416595e-05,
"loss": 2.0266,
"step": 22
},
{
"epoch": 1.3213644524236985,
"grad_norm": 1.269490122795105,
"learning_rate": 3.858918875003053e-05,
"loss": 2.011,
"step": 23
},
{
"epoch": 1.3788150807899462,
"grad_norm": 1.2723491191864014,
"learning_rate": 3.694299546280657e-05,
"loss": 2.0503,
"step": 24
},
{
"epoch": 1.436265709156194,
"grad_norm": 1.3157715797424316,
"learning_rate": 3.5226715929283506e-05,
"loss": 2.0297,
"step": 25
},
{
"epoch": 1.4937163375224416,
"grad_norm": 1.367810606956482,
"learning_rate": 3.3450421960212566e-05,
"loss": 2.03,
"step": 26
},
{
"epoch": 1.5511669658886893,
"grad_norm": 1.4925018548965454,
"learning_rate": 3.162453755491655e-05,
"loss": 2.0478,
"step": 27
},
{
"epoch": 1.608617594254937,
"grad_norm": 1.350865125656128,
"learning_rate": 2.975977772911671e-05,
"loss": 2.1047,
"step": 28
},
{
"epoch": 1.666068222621185,
"grad_norm": 1.3046050071716309,
"learning_rate": 2.7867085634960016e-05,
"loss": 2.0175,
"step": 29
},
{
"epoch": 1.7235188509874326,
"grad_norm": 1.3713139295578003,
"learning_rate": 2.595756834225089e-05,
"loss": 2.0437,
"step": 30
},
{
"epoch": 1.7809694793536806,
"grad_norm": 1.2804160118103027,
"learning_rate": 2.4042431657749117e-05,
"loss": 2.0573,
"step": 31
},
{
"epoch": 1.8384201077199283,
"grad_norm": 1.257683277130127,
"learning_rate": 2.2132914365039993e-05,
"loss": 2.0105,
"step": 32
},
{
"epoch": 1.895870736086176,
"grad_norm": 1.2399568557739258,
"learning_rate": 2.0240222270883288e-05,
"loss": 2.0335,
"step": 33
},
{
"epoch": 1.9533213644524237,
"grad_norm": 1.1331653594970703,
"learning_rate": 1.8375462445083464e-05,
"loss": 2.0217,
"step": 34
},
{
"epoch": 2.0107719928186714,
"grad_norm": 1.219581961631775,
"learning_rate": 1.6549578039787436e-05,
"loss": 2.0096,
"step": 35
},
{
"epoch": 2.068222621184919,
"grad_norm": 1.239498257637024,
"learning_rate": 1.4773284070716503e-05,
"loss": 2.0598,
"step": 36
},
{
"epoch": 2.1256732495511668,
"grad_norm": 1.2558954954147339,
"learning_rate": 1.3057004537193423e-05,
"loss": 2.0412,
"step": 37
},
{
"epoch": 2.183123877917415,
"grad_norm": 1.0945749282836914,
"learning_rate": 1.1410811249969475e-05,
"loss": 1.959,
"step": 38
},
{
"epoch": 2.2405745062836626,
"grad_norm": 1.0557724237442017,
"learning_rate": 9.844364725834057e-06,
"loss": 1.9716,
"step": 39
},
{
"epoch": 2.2980251346499103,
"grad_norm": 1.1883456707000732,
"learning_rate": 8.36685749586087e-06,
"loss": 1.9485,
"step": 40
}
],
"logging_steps": 1,
"max_steps": 51,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 4,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.7513920113127424e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}