overall-colab-upload-jan29 / trainer_state.json
PsychicMoon's picture
first commit
3ae4876 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9411764705882355,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 0.00019805941782534764,
"loss": 2.0676,
"step": 1
},
{
"epoch": 0.06,
"learning_rate": 0.00019605881764529358,
"loss": 2.165,
"step": 2
},
{
"epoch": 0.09,
"learning_rate": 0.00019405821746523957,
"loss": 1.6457,
"step": 3
},
{
"epoch": 0.12,
"learning_rate": 0.00019205761728518557,
"loss": 1.2642,
"step": 4
},
{
"epoch": 0.15,
"learning_rate": 0.00019005701710513156,
"loss": 0.9945,
"step": 5
},
{
"epoch": 0.18,
"learning_rate": 0.00018805641692507753,
"loss": 1.1607,
"step": 6
},
{
"epoch": 0.21,
"learning_rate": 0.00018605581674502352,
"loss": 0.8384,
"step": 7
},
{
"epoch": 0.24,
"learning_rate": 0.00018405521656496952,
"loss": 1.4198,
"step": 8
},
{
"epoch": 0.26,
"learning_rate": 0.00018205461638491548,
"loss": 1.065,
"step": 9
},
{
"epoch": 0.29,
"learning_rate": 0.00018005401620486148,
"loss": 1.5488,
"step": 10
},
{
"epoch": 0.32,
"learning_rate": 0.00017805341602480744,
"loss": 0.926,
"step": 11
},
{
"epoch": 0.35,
"learning_rate": 0.00017605281584475344,
"loss": 1.1462,
"step": 12
},
{
"epoch": 0.38,
"learning_rate": 0.00017405221566469943,
"loss": 1.288,
"step": 13
},
{
"epoch": 0.41,
"learning_rate": 0.00017205161548464542,
"loss": 0.9373,
"step": 14
},
{
"epoch": 0.44,
"learning_rate": 0.00017005101530459136,
"loss": 0.7286,
"step": 15
},
{
"epoch": 0.47,
"learning_rate": 0.00016805041512453736,
"loss": 1.1179,
"step": 16
},
{
"epoch": 0.5,
"learning_rate": 0.00016604981494448335,
"loss": 0.6268,
"step": 17
},
{
"epoch": 0.53,
"learning_rate": 0.00016404921476442935,
"loss": 1.2474,
"step": 18
},
{
"epoch": 0.56,
"learning_rate": 0.0001620486145843753,
"loss": 0.8392,
"step": 19
},
{
"epoch": 0.59,
"learning_rate": 0.0001600480144043213,
"loss": 1.1382,
"step": 20
},
{
"epoch": 0.62,
"learning_rate": 0.0001580474142242673,
"loss": 1.171,
"step": 21
},
{
"epoch": 0.65,
"learning_rate": 0.00015604681404421327,
"loss": 0.8168,
"step": 22
},
{
"epoch": 0.68,
"learning_rate": 0.00015404621386415926,
"loss": 1.336,
"step": 23
},
{
"epoch": 0.71,
"learning_rate": 0.00015204561368410523,
"loss": 1.148,
"step": 24
},
{
"epoch": 0.74,
"learning_rate": 0.00015004501350405122,
"loss": 0.9818,
"step": 25
},
{
"epoch": 0.76,
"learning_rate": 0.00014804441332399721,
"loss": 1.1472,
"step": 26
},
{
"epoch": 0.79,
"learning_rate": 0.0001460438131439432,
"loss": 1.0053,
"step": 27
},
{
"epoch": 0.82,
"learning_rate": 0.00014404321296388918,
"loss": 1.053,
"step": 28
},
{
"epoch": 0.85,
"learning_rate": 0.00014204261278383514,
"loss": 1.2345,
"step": 29
},
{
"epoch": 0.88,
"learning_rate": 0.00014004201260378114,
"loss": 0.9872,
"step": 30
},
{
"epoch": 0.91,
"learning_rate": 0.00013804141242372713,
"loss": 1.1111,
"step": 31
},
{
"epoch": 0.94,
"learning_rate": 0.00013604081224367312,
"loss": 0.9571,
"step": 32
},
{
"epoch": 0.97,
"learning_rate": 0.0001340402120636191,
"loss": 1.0676,
"step": 33
},
{
"epoch": 1.0,
"learning_rate": 0.00013203961188356508,
"loss": 0.9105,
"step": 34
},
{
"epoch": 1.03,
"learning_rate": 0.00013003901170351108,
"loss": 0.638,
"step": 35
},
{
"epoch": 1.06,
"learning_rate": 0.00012803841152345704,
"loss": 1.0111,
"step": 36
},
{
"epoch": 1.09,
"learning_rate": 0.000126037811343403,
"loss": 0.6401,
"step": 37
},
{
"epoch": 1.12,
"learning_rate": 0.000124037211163349,
"loss": 0.7887,
"step": 38
},
{
"epoch": 1.15,
"learning_rate": 0.000122036610983295,
"loss": 0.7075,
"step": 39
},
{
"epoch": 1.18,
"learning_rate": 0.00012003601080324098,
"loss": 0.7454,
"step": 40
},
{
"epoch": 1.21,
"learning_rate": 0.00011803541062318697,
"loss": 0.9572,
"step": 41
},
{
"epoch": 1.24,
"learning_rate": 0.00011603481044313295,
"loss": 0.6438,
"step": 42
},
{
"epoch": 1.26,
"learning_rate": 0.00011403421026307892,
"loss": 0.6731,
"step": 43
},
{
"epoch": 1.29,
"learning_rate": 0.00011203361008302491,
"loss": 0.6977,
"step": 44
},
{
"epoch": 1.32,
"learning_rate": 0.0001100330099029709,
"loss": 0.6028,
"step": 45
},
{
"epoch": 1.35,
"learning_rate": 0.00010803240972291689,
"loss": 0.3236,
"step": 46
},
{
"epoch": 1.38,
"learning_rate": 0.00010603180954286287,
"loss": 0.7818,
"step": 47
},
{
"epoch": 1.41,
"learning_rate": 0.00010403120936280886,
"loss": 0.6282,
"step": 48
},
{
"epoch": 1.44,
"learning_rate": 0.00010203060918275482,
"loss": 0.7241,
"step": 49
},
{
"epoch": 1.47,
"learning_rate": 0.00010003000900270081,
"loss": 1.014,
"step": 50
},
{
"epoch": 1.5,
"learning_rate": 9.802940882264679e-05,
"loss": 0.5843,
"step": 51
},
{
"epoch": 1.53,
"learning_rate": 9.602880864259278e-05,
"loss": 0.5066,
"step": 52
},
{
"epoch": 1.56,
"learning_rate": 9.402820846253876e-05,
"loss": 0.922,
"step": 53
},
{
"epoch": 1.59,
"learning_rate": 9.202760828248476e-05,
"loss": 0.5464,
"step": 54
},
{
"epoch": 1.62,
"learning_rate": 9.002700810243074e-05,
"loss": 0.591,
"step": 55
},
{
"epoch": 1.65,
"learning_rate": 8.802640792237672e-05,
"loss": 0.8202,
"step": 56
},
{
"epoch": 1.68,
"learning_rate": 8.602580774232271e-05,
"loss": 0.8385,
"step": 57
},
{
"epoch": 1.71,
"learning_rate": 8.402520756226868e-05,
"loss": 0.8093,
"step": 58
},
{
"epoch": 1.74,
"learning_rate": 8.202460738221467e-05,
"loss": 0.7677,
"step": 59
},
{
"epoch": 1.76,
"learning_rate": 8.002400720216065e-05,
"loss": 0.7852,
"step": 60
},
{
"epoch": 1.79,
"learning_rate": 7.802340702210663e-05,
"loss": 0.9299,
"step": 61
},
{
"epoch": 1.82,
"learning_rate": 7.602280684205261e-05,
"loss": 0.7537,
"step": 62
},
{
"epoch": 1.85,
"learning_rate": 7.402220666199861e-05,
"loss": 0.8504,
"step": 63
},
{
"epoch": 1.88,
"learning_rate": 7.202160648194459e-05,
"loss": 0.6326,
"step": 64
},
{
"epoch": 1.91,
"learning_rate": 7.002100630189057e-05,
"loss": 0.4325,
"step": 65
},
{
"epoch": 1.94,
"learning_rate": 6.802040612183656e-05,
"loss": 0.7092,
"step": 66
},
{
"epoch": 1.97,
"learning_rate": 6.601980594178254e-05,
"loss": 0.9081,
"step": 67
},
{
"epoch": 2.0,
"learning_rate": 6.401920576172852e-05,
"loss": 0.5045,
"step": 68
},
{
"epoch": 2.03,
"learning_rate": 6.20186055816745e-05,
"loss": 0.418,
"step": 69
},
{
"epoch": 2.06,
"learning_rate": 6.001800540162049e-05,
"loss": 0.4485,
"step": 70
},
{
"epoch": 2.09,
"learning_rate": 5.801740522156648e-05,
"loss": 0.4887,
"step": 71
},
{
"epoch": 2.12,
"learning_rate": 5.601680504151246e-05,
"loss": 0.3122,
"step": 72
},
{
"epoch": 2.15,
"learning_rate": 5.4016204861458444e-05,
"loss": 0.4811,
"step": 73
},
{
"epoch": 2.18,
"learning_rate": 5.201560468140443e-05,
"loss": 0.2449,
"step": 74
},
{
"epoch": 2.21,
"learning_rate": 5.0015004501350405e-05,
"loss": 0.4157,
"step": 75
},
{
"epoch": 2.24,
"learning_rate": 4.801440432129639e-05,
"loss": 0.3642,
"step": 76
},
{
"epoch": 2.26,
"learning_rate": 4.601380414124238e-05,
"loss": 0.7396,
"step": 77
},
{
"epoch": 2.29,
"learning_rate": 4.401320396118836e-05,
"loss": 0.5625,
"step": 78
},
{
"epoch": 2.32,
"learning_rate": 4.201260378113434e-05,
"loss": 0.6648,
"step": 79
},
{
"epoch": 2.35,
"learning_rate": 4.0012003601080326e-05,
"loss": 0.3184,
"step": 80
},
{
"epoch": 2.38,
"learning_rate": 3.801140342102631e-05,
"loss": 0.3801,
"step": 81
},
{
"epoch": 2.41,
"learning_rate": 3.6010803240972294e-05,
"loss": 0.4922,
"step": 82
},
{
"epoch": 2.44,
"learning_rate": 3.401020306091828e-05,
"loss": 0.3716,
"step": 83
},
{
"epoch": 2.47,
"learning_rate": 3.200960288086426e-05,
"loss": 0.6029,
"step": 84
},
{
"epoch": 2.5,
"learning_rate": 3.0009002700810245e-05,
"loss": 0.5193,
"step": 85
},
{
"epoch": 2.53,
"learning_rate": 2.800840252075623e-05,
"loss": 0.5131,
"step": 86
},
{
"epoch": 2.56,
"learning_rate": 2.6007802340702216e-05,
"loss": 0.4021,
"step": 87
},
{
"epoch": 2.59,
"learning_rate": 2.4007202160648196e-05,
"loss": 0.1953,
"step": 88
},
{
"epoch": 2.62,
"learning_rate": 2.200660198059418e-05,
"loss": 0.4305,
"step": 89
},
{
"epoch": 2.65,
"learning_rate": 2.0006001800540163e-05,
"loss": 0.3563,
"step": 90
},
{
"epoch": 2.68,
"learning_rate": 1.8005401620486147e-05,
"loss": 0.4803,
"step": 91
},
{
"epoch": 2.71,
"learning_rate": 1.600480144043213e-05,
"loss": 0.3894,
"step": 92
},
{
"epoch": 2.74,
"learning_rate": 1.4004201260378114e-05,
"loss": 0.5841,
"step": 93
},
{
"epoch": 2.76,
"learning_rate": 1.2003601080324098e-05,
"loss": 0.2775,
"step": 94
},
{
"epoch": 2.79,
"learning_rate": 1.0003000900270082e-05,
"loss": 0.2276,
"step": 95
},
{
"epoch": 2.82,
"learning_rate": 8.002400720216065e-06,
"loss": 0.2473,
"step": 96
},
{
"epoch": 2.85,
"learning_rate": 6.001800540162049e-06,
"loss": 0.4294,
"step": 97
},
{
"epoch": 2.88,
"learning_rate": 4.001200360108033e-06,
"loss": 0.4284,
"step": 98
},
{
"epoch": 2.91,
"learning_rate": 2.0006001800540163e-06,
"loss": 0.2537,
"step": 99
},
{
"epoch": 2.94,
"learning_rate": 0.0,
"loss": 0.3062,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 10,
"total_flos": 6145255647191040.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}