z3-api-reasoning / trainer_state.json
smjain's picture
Upload model checkpoint
32bb36e
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 30.0,
"eval_steps": 500,
"global_step": 60,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.5,
"grad_norm": 0.40977156162261963,
"learning_rate": 4e-05,
"loss": 0.6411,
"step": 1
},
{
"epoch": 1.0,
"grad_norm": 0.42220255732536316,
"learning_rate": 8e-05,
"loss": 0.6556,
"step": 2
},
{
"epoch": 1.5,
"grad_norm": 0.29474809765815735,
"learning_rate": 0.00012,
"loss": 0.6299,
"step": 3
},
{
"epoch": 2.0,
"grad_norm": 0.22392094135284424,
"learning_rate": 0.00016,
"loss": 0.6031,
"step": 4
},
{
"epoch": 2.5,
"grad_norm": 0.2246803194284439,
"learning_rate": 0.0002,
"loss": 0.5672,
"step": 5
},
{
"epoch": 3.0,
"grad_norm": 0.1952209174633026,
"learning_rate": 0.00019636363636363636,
"loss": 0.531,
"step": 6
},
{
"epoch": 3.5,
"grad_norm": 0.2026151567697525,
"learning_rate": 0.00019272727272727274,
"loss": 0.4935,
"step": 7
},
{
"epoch": 4.0,
"grad_norm": 0.20893992483615875,
"learning_rate": 0.0001890909090909091,
"loss": 0.4368,
"step": 8
},
{
"epoch": 4.5,
"grad_norm": 0.21967507898807526,
"learning_rate": 0.00018545454545454545,
"loss": 0.3984,
"step": 9
},
{
"epoch": 5.0,
"grad_norm": 0.2647532820701599,
"learning_rate": 0.00018181818181818183,
"loss": 0.3768,
"step": 10
},
{
"epoch": 5.5,
"grad_norm": 0.24694104492664337,
"learning_rate": 0.0001781818181818182,
"loss": 0.3285,
"step": 11
},
{
"epoch": 6.0,
"grad_norm": 0.3180811405181885,
"learning_rate": 0.00017454545454545454,
"loss": 0.2968,
"step": 12
},
{
"epoch": 6.5,
"grad_norm": 0.2574905753135681,
"learning_rate": 0.0001709090909090909,
"loss": 0.2625,
"step": 13
},
{
"epoch": 7.0,
"grad_norm": 0.5857559442520142,
"learning_rate": 0.00016727272727272728,
"loss": 0.2228,
"step": 14
},
{
"epoch": 7.5,
"grad_norm": 0.24397815763950348,
"learning_rate": 0.00016363636363636366,
"loss": 0.2072,
"step": 15
},
{
"epoch": 8.0,
"grad_norm": 0.2656276524066925,
"learning_rate": 0.00016,
"loss": 0.1601,
"step": 16
},
{
"epoch": 8.5,
"grad_norm": 0.24156434834003448,
"learning_rate": 0.00015636363636363637,
"loss": 0.1538,
"step": 17
},
{
"epoch": 9.0,
"grad_norm": 0.21211856603622437,
"learning_rate": 0.00015272727272727275,
"loss": 0.139,
"step": 18
},
{
"epoch": 9.5,
"grad_norm": 0.22596681118011475,
"learning_rate": 0.0001490909090909091,
"loss": 0.1297,
"step": 19
},
{
"epoch": 10.0,
"grad_norm": 0.3587157428264618,
"learning_rate": 0.00014545454545454546,
"loss": 0.1147,
"step": 20
},
{
"epoch": 10.5,
"grad_norm": 0.11396785080432892,
"learning_rate": 0.00014181818181818184,
"loss": 0.0921,
"step": 21
},
{
"epoch": 11.0,
"grad_norm": 0.16308319568634033,
"learning_rate": 0.0001381818181818182,
"loss": 0.1176,
"step": 22
},
{
"epoch": 11.5,
"grad_norm": 0.10265325754880905,
"learning_rate": 0.00013454545454545455,
"loss": 0.0933,
"step": 23
},
{
"epoch": 12.0,
"grad_norm": 0.12172083556652069,
"learning_rate": 0.00013090909090909093,
"loss": 0.0948,
"step": 24
},
{
"epoch": 12.5,
"grad_norm": 0.1143866702914238,
"learning_rate": 0.00012727272727272728,
"loss": 0.088,
"step": 25
},
{
"epoch": 13.0,
"grad_norm": 0.10438387840986252,
"learning_rate": 0.00012363636363636364,
"loss": 0.0815,
"step": 26
},
{
"epoch": 13.5,
"grad_norm": 0.09685764461755753,
"learning_rate": 0.00012,
"loss": 0.0796,
"step": 27
},
{
"epoch": 14.0,
"grad_norm": 0.09470539540052414,
"learning_rate": 0.00011636363636363636,
"loss": 0.0731,
"step": 28
},
{
"epoch": 14.5,
"grad_norm": 0.08961515873670578,
"learning_rate": 0.00011272727272727272,
"loss": 0.0635,
"step": 29
},
{
"epoch": 15.0,
"grad_norm": 0.09088480472564697,
"learning_rate": 0.00010909090909090909,
"loss": 0.0735,
"step": 30
},
{
"epoch": 15.5,
"grad_norm": 0.08304226398468018,
"learning_rate": 0.00010545454545454545,
"loss": 0.0608,
"step": 31
},
{
"epoch": 16.0,
"grad_norm": 0.08834656327962875,
"learning_rate": 0.00010181818181818181,
"loss": 0.0635,
"step": 32
},
{
"epoch": 16.5,
"grad_norm": 0.0853792354464531,
"learning_rate": 9.818181818181818e-05,
"loss": 0.0556,
"step": 33
},
{
"epoch": 17.0,
"grad_norm": 0.08675861358642578,
"learning_rate": 9.454545454545455e-05,
"loss": 0.0579,
"step": 34
},
{
"epoch": 17.5,
"grad_norm": 0.09384719282388687,
"learning_rate": 9.090909090909092e-05,
"loss": 0.0551,
"step": 35
},
{
"epoch": 18.0,
"grad_norm": 0.08688965439796448,
"learning_rate": 8.727272727272727e-05,
"loss": 0.0481,
"step": 36
},
{
"epoch": 18.5,
"grad_norm": 0.08417191356420517,
"learning_rate": 8.363636363636364e-05,
"loss": 0.0461,
"step": 37
},
{
"epoch": 19.0,
"grad_norm": 0.0866803452372551,
"learning_rate": 8e-05,
"loss": 0.0478,
"step": 38
},
{
"epoch": 19.5,
"grad_norm": 0.08191067725419998,
"learning_rate": 7.636363636363637e-05,
"loss": 0.0451,
"step": 39
},
{
"epoch": 20.0,
"grad_norm": 0.08162440359592438,
"learning_rate": 7.272727272727273e-05,
"loss": 0.0401,
"step": 40
},
{
"epoch": 20.5,
"grad_norm": 0.0843905583024025,
"learning_rate": 6.90909090909091e-05,
"loss": 0.0343,
"step": 41
},
{
"epoch": 21.0,
"grad_norm": 0.09161655604839325,
"learning_rate": 6.545454545454546e-05,
"loss": 0.0429,
"step": 42
},
{
"epoch": 21.5,
"grad_norm": 0.08548980951309204,
"learning_rate": 6.181818181818182e-05,
"loss": 0.0364,
"step": 43
},
{
"epoch": 22.0,
"grad_norm": 0.09480360895395279,
"learning_rate": 5.818181818181818e-05,
"loss": 0.0336,
"step": 44
},
{
"epoch": 22.5,
"grad_norm": 0.08082766085863113,
"learning_rate": 5.4545454545454546e-05,
"loss": 0.0313,
"step": 45
},
{
"epoch": 23.0,
"grad_norm": 0.08921442180871964,
"learning_rate": 5.090909090909091e-05,
"loss": 0.0321,
"step": 46
},
{
"epoch": 23.5,
"grad_norm": 0.08190133422613144,
"learning_rate": 4.7272727272727275e-05,
"loss": 0.0289,
"step": 47
},
{
"epoch": 24.0,
"grad_norm": 0.08818236738443375,
"learning_rate": 4.3636363636363636e-05,
"loss": 0.0287,
"step": 48
},
{
"epoch": 24.5,
"grad_norm": 0.0787026658654213,
"learning_rate": 4e-05,
"loss": 0.027,
"step": 49
},
{
"epoch": 25.0,
"grad_norm": 0.08572469651699066,
"learning_rate": 3.6363636363636364e-05,
"loss": 0.0252,
"step": 50
},
{
"epoch": 25.5,
"grad_norm": 0.0922461524605751,
"learning_rate": 3.272727272727273e-05,
"loss": 0.0265,
"step": 51
},
{
"epoch": 26.0,
"grad_norm": 0.09386759996414185,
"learning_rate": 2.909090909090909e-05,
"loss": 0.0218,
"step": 52
},
{
"epoch": 26.5,
"grad_norm": 0.09283447265625,
"learning_rate": 2.5454545454545454e-05,
"loss": 0.0231,
"step": 53
},
{
"epoch": 27.0,
"grad_norm": 0.08825419098138809,
"learning_rate": 2.1818181818181818e-05,
"loss": 0.0212,
"step": 54
},
{
"epoch": 27.5,
"grad_norm": 0.08191809058189392,
"learning_rate": 1.8181818181818182e-05,
"loss": 0.0217,
"step": 55
},
{
"epoch": 28.0,
"grad_norm": 0.08192411810159683,
"learning_rate": 1.4545454545454545e-05,
"loss": 0.0196,
"step": 56
},
{
"epoch": 28.5,
"grad_norm": 0.09538112580776215,
"learning_rate": 1.0909090909090909e-05,
"loss": 0.0202,
"step": 57
},
{
"epoch": 29.0,
"grad_norm": 0.09735918790102005,
"learning_rate": 7.272727272727272e-06,
"loss": 0.019,
"step": 58
},
{
"epoch": 29.5,
"grad_norm": 0.08030348271131516,
"learning_rate": 3.636363636363636e-06,
"loss": 0.0171,
"step": 59
},
{
"epoch": 30.0,
"grad_norm": 0.0887613520026207,
"learning_rate": 0.0,
"loss": 0.0207,
"step": 60
}
],
"logging_steps": 1,
"max_steps": 60,
"num_input_tokens_seen": 0,
"num_train_epochs": 30,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7072322089623552.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}