mistral-nl-7b-sft-qlora / trainer_state.json
JorgeDeC's picture
Upload folder using huggingface_hub
9203aa8 verified
raw
history blame
15.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.07424456158586383,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 1.75,
"learning_rate": 2.9673590504451043e-07,
"loss": 1.433,
"step": 1
},
{
"epoch": 0.0,
"grad_norm": 1.578125,
"learning_rate": 1.483679525222552e-06,
"loss": 1.5006,
"step": 5
},
{
"epoch": 0.0,
"grad_norm": 1.4921875,
"learning_rate": 2.967359050445104e-06,
"loss": 1.4919,
"step": 10
},
{
"epoch": 0.0,
"grad_norm": 1.4921875,
"learning_rate": 4.451038575667656e-06,
"loss": 1.4531,
"step": 15
},
{
"epoch": 0.0,
"grad_norm": 1.0234375,
"learning_rate": 5.934718100890208e-06,
"loss": 1.4242,
"step": 20
},
{
"epoch": 0.0,
"grad_norm": 0.9453125,
"learning_rate": 7.418397626112759e-06,
"loss": 1.3932,
"step": 25
},
{
"epoch": 0.0,
"grad_norm": 0.8359375,
"learning_rate": 8.902077151335312e-06,
"loss": 1.3661,
"step": 30
},
{
"epoch": 0.01,
"grad_norm": 0.828125,
"learning_rate": 1.0385756676557864e-05,
"loss": 1.3677,
"step": 35
},
{
"epoch": 0.01,
"grad_norm": 0.59375,
"learning_rate": 1.1869436201780416e-05,
"loss": 1.3497,
"step": 40
},
{
"epoch": 0.01,
"grad_norm": 0.56640625,
"learning_rate": 1.3353115727002968e-05,
"loss": 1.3049,
"step": 45
},
{
"epoch": 0.01,
"grad_norm": 0.439453125,
"learning_rate": 1.4836795252225518e-05,
"loss": 1.2836,
"step": 50
},
{
"epoch": 0.01,
"grad_norm": 0.46484375,
"learning_rate": 1.6320474777448072e-05,
"loss": 1.3133,
"step": 55
},
{
"epoch": 0.01,
"grad_norm": 0.40625,
"learning_rate": 1.7804154302670624e-05,
"loss": 1.2596,
"step": 60
},
{
"epoch": 0.01,
"grad_norm": 0.408203125,
"learning_rate": 1.9287833827893176e-05,
"loss": 1.2612,
"step": 65
},
{
"epoch": 0.01,
"grad_norm": 0.376953125,
"learning_rate": 2.0771513353115728e-05,
"loss": 1.2584,
"step": 70
},
{
"epoch": 0.01,
"grad_norm": 0.40625,
"learning_rate": 2.225519287833828e-05,
"loss": 1.2386,
"step": 75
},
{
"epoch": 0.01,
"grad_norm": 0.353515625,
"learning_rate": 2.3738872403560832e-05,
"loss": 1.1833,
"step": 80
},
{
"epoch": 0.01,
"grad_norm": 0.349609375,
"learning_rate": 2.5222551928783384e-05,
"loss": 1.2411,
"step": 85
},
{
"epoch": 0.01,
"grad_norm": 0.36328125,
"learning_rate": 2.6706231454005936e-05,
"loss": 1.1956,
"step": 90
},
{
"epoch": 0.01,
"grad_norm": 0.369140625,
"learning_rate": 2.818991097922849e-05,
"loss": 1.2327,
"step": 95
},
{
"epoch": 0.01,
"grad_norm": 0.353515625,
"learning_rate": 2.9673590504451037e-05,
"loss": 1.1896,
"step": 100
},
{
"epoch": 0.02,
"grad_norm": 0.3984375,
"learning_rate": 3.115727002967359e-05,
"loss": 1.1979,
"step": 105
},
{
"epoch": 0.02,
"grad_norm": 0.388671875,
"learning_rate": 3.2640949554896144e-05,
"loss": 1.2004,
"step": 110
},
{
"epoch": 0.02,
"grad_norm": 0.392578125,
"learning_rate": 3.41246290801187e-05,
"loss": 1.1624,
"step": 115
},
{
"epoch": 0.02,
"grad_norm": 0.400390625,
"learning_rate": 3.560830860534125e-05,
"loss": 1.1914,
"step": 120
},
{
"epoch": 0.02,
"grad_norm": 0.41796875,
"learning_rate": 3.70919881305638e-05,
"loss": 1.1952,
"step": 125
},
{
"epoch": 0.02,
"grad_norm": 0.4140625,
"learning_rate": 3.857566765578635e-05,
"loss": 1.1538,
"step": 130
},
{
"epoch": 0.02,
"grad_norm": 0.4375,
"learning_rate": 4.005934718100891e-05,
"loss": 1.1702,
"step": 135
},
{
"epoch": 0.02,
"grad_norm": 0.44140625,
"learning_rate": 4.1543026706231456e-05,
"loss": 1.1683,
"step": 140
},
{
"epoch": 0.02,
"grad_norm": 0.443359375,
"learning_rate": 4.3026706231454005e-05,
"loss": 1.1822,
"step": 145
},
{
"epoch": 0.02,
"grad_norm": 0.482421875,
"learning_rate": 4.451038575667656e-05,
"loss": 1.1649,
"step": 150
},
{
"epoch": 0.02,
"grad_norm": 0.46484375,
"learning_rate": 4.5994065281899116e-05,
"loss": 1.1868,
"step": 155
},
{
"epoch": 0.02,
"grad_norm": 0.5,
"learning_rate": 4.7477744807121664e-05,
"loss": 1.161,
"step": 160
},
{
"epoch": 0.02,
"grad_norm": 0.451171875,
"learning_rate": 4.896142433234421e-05,
"loss": 1.1454,
"step": 165
},
{
"epoch": 0.03,
"grad_norm": 0.45703125,
"learning_rate": 5.044510385756677e-05,
"loss": 1.1388,
"step": 170
},
{
"epoch": 0.03,
"grad_norm": 0.490234375,
"learning_rate": 5.1928783382789324e-05,
"loss": 1.1253,
"step": 175
},
{
"epoch": 0.03,
"grad_norm": 0.51171875,
"learning_rate": 5.341246290801187e-05,
"loss": 1.1527,
"step": 180
},
{
"epoch": 0.03,
"grad_norm": 0.486328125,
"learning_rate": 5.489614243323442e-05,
"loss": 1.1234,
"step": 185
},
{
"epoch": 0.03,
"grad_norm": 0.46875,
"learning_rate": 5.637982195845698e-05,
"loss": 1.113,
"step": 190
},
{
"epoch": 0.03,
"grad_norm": 0.458984375,
"learning_rate": 5.7863501483679525e-05,
"loss": 1.1368,
"step": 195
},
{
"epoch": 0.03,
"grad_norm": 0.484375,
"learning_rate": 5.9347181008902074e-05,
"loss": 1.137,
"step": 200
},
{
"epoch": 0.03,
"grad_norm": 0.4765625,
"learning_rate": 6.0830860534124636e-05,
"loss": 1.1286,
"step": 205
},
{
"epoch": 0.03,
"grad_norm": 0.484375,
"learning_rate": 6.231454005934718e-05,
"loss": 1.1277,
"step": 210
},
{
"epoch": 0.03,
"grad_norm": 0.474609375,
"learning_rate": 6.379821958456974e-05,
"loss": 1.1268,
"step": 215
},
{
"epoch": 0.03,
"grad_norm": 0.47265625,
"learning_rate": 6.528189910979229e-05,
"loss": 1.0993,
"step": 220
},
{
"epoch": 0.03,
"grad_norm": 0.50390625,
"learning_rate": 6.676557863501484e-05,
"loss": 1.1017,
"step": 225
},
{
"epoch": 0.03,
"grad_norm": 0.4609375,
"learning_rate": 6.82492581602374e-05,
"loss": 1.1345,
"step": 230
},
{
"epoch": 0.03,
"grad_norm": 0.48828125,
"learning_rate": 6.973293768545995e-05,
"loss": 1.1086,
"step": 235
},
{
"epoch": 0.04,
"grad_norm": 0.45703125,
"learning_rate": 7.12166172106825e-05,
"loss": 1.0791,
"step": 240
},
{
"epoch": 0.04,
"grad_norm": 0.470703125,
"learning_rate": 7.270029673590505e-05,
"loss": 1.1158,
"step": 245
},
{
"epoch": 0.04,
"grad_norm": 0.46875,
"learning_rate": 7.41839762611276e-05,
"loss": 1.132,
"step": 250
},
{
"epoch": 0.04,
"grad_norm": 0.4609375,
"learning_rate": 7.566765578635016e-05,
"loss": 1.1438,
"step": 255
},
{
"epoch": 0.04,
"grad_norm": 0.470703125,
"learning_rate": 7.71513353115727e-05,
"loss": 1.1405,
"step": 260
},
{
"epoch": 0.04,
"grad_norm": 0.447265625,
"learning_rate": 7.863501483679525e-05,
"loss": 1.1124,
"step": 265
},
{
"epoch": 0.04,
"grad_norm": 0.486328125,
"learning_rate": 8.011869436201782e-05,
"loss": 1.0813,
"step": 270
},
{
"epoch": 0.04,
"grad_norm": 0.48046875,
"learning_rate": 8.160237388724036e-05,
"loss": 1.1194,
"step": 275
},
{
"epoch": 0.04,
"grad_norm": 0.462890625,
"learning_rate": 8.308605341246291e-05,
"loss": 1.0927,
"step": 280
},
{
"epoch": 0.04,
"grad_norm": 0.48046875,
"learning_rate": 8.456973293768546e-05,
"loss": 1.1277,
"step": 285
},
{
"epoch": 0.04,
"grad_norm": 0.4453125,
"learning_rate": 8.605341246290801e-05,
"loss": 1.1271,
"step": 290
},
{
"epoch": 0.04,
"grad_norm": 0.435546875,
"learning_rate": 8.753709198813057e-05,
"loss": 1.1162,
"step": 295
},
{
"epoch": 0.04,
"grad_norm": 0.44921875,
"learning_rate": 8.902077151335312e-05,
"loss": 1.0857,
"step": 300
},
{
"epoch": 0.05,
"grad_norm": 0.4375,
"learning_rate": 9.050445103857568e-05,
"loss": 1.0869,
"step": 305
},
{
"epoch": 0.05,
"grad_norm": 0.4453125,
"learning_rate": 9.198813056379823e-05,
"loss": 1.0655,
"step": 310
},
{
"epoch": 0.05,
"grad_norm": 0.4296875,
"learning_rate": 9.347181008902077e-05,
"loss": 1.0585,
"step": 315
},
{
"epoch": 0.05,
"grad_norm": 0.41796875,
"learning_rate": 9.495548961424333e-05,
"loss": 1.1144,
"step": 320
},
{
"epoch": 0.05,
"grad_norm": 0.43359375,
"learning_rate": 9.643916913946588e-05,
"loss": 1.0719,
"step": 325
},
{
"epoch": 0.05,
"grad_norm": 0.416015625,
"learning_rate": 9.792284866468843e-05,
"loss": 1.0919,
"step": 330
},
{
"epoch": 0.05,
"grad_norm": 0.423828125,
"learning_rate": 9.940652818991099e-05,
"loss": 1.1223,
"step": 335
},
{
"epoch": 0.05,
"grad_norm": 0.431640625,
"learning_rate": 0.00010089020771513354,
"loss": 1.0565,
"step": 340
},
{
"epoch": 0.05,
"grad_norm": 0.431640625,
"learning_rate": 0.00010237388724035609,
"loss": 1.0962,
"step": 345
},
{
"epoch": 0.05,
"grad_norm": 0.44921875,
"learning_rate": 0.00010385756676557865,
"loss": 1.0959,
"step": 350
},
{
"epoch": 0.05,
"grad_norm": 0.43359375,
"learning_rate": 0.0001053412462908012,
"loss": 1.0628,
"step": 355
},
{
"epoch": 0.05,
"grad_norm": 0.431640625,
"learning_rate": 0.00010682492581602374,
"loss": 1.0975,
"step": 360
},
{
"epoch": 0.05,
"grad_norm": 0.42578125,
"learning_rate": 0.0001083086053412463,
"loss": 1.0727,
"step": 365
},
{
"epoch": 0.05,
"grad_norm": 0.416015625,
"learning_rate": 0.00010979228486646884,
"loss": 1.0649,
"step": 370
},
{
"epoch": 0.06,
"grad_norm": 0.4296875,
"learning_rate": 0.00011127596439169139,
"loss": 1.0904,
"step": 375
},
{
"epoch": 0.06,
"grad_norm": 0.3984375,
"learning_rate": 0.00011275964391691397,
"loss": 1.079,
"step": 380
},
{
"epoch": 0.06,
"grad_norm": 0.40234375,
"learning_rate": 0.0001142433234421365,
"loss": 1.0522,
"step": 385
},
{
"epoch": 0.06,
"grad_norm": 0.431640625,
"learning_rate": 0.00011572700296735905,
"loss": 1.0579,
"step": 390
},
{
"epoch": 0.06,
"grad_norm": 0.396484375,
"learning_rate": 0.0001172106824925816,
"loss": 1.0871,
"step": 395
},
{
"epoch": 0.06,
"grad_norm": 0.41015625,
"learning_rate": 0.00011869436201780415,
"loss": 1.0936,
"step": 400
},
{
"epoch": 0.06,
"grad_norm": 0.412109375,
"learning_rate": 0.00012017804154302672,
"loss": 1.0734,
"step": 405
},
{
"epoch": 0.06,
"grad_norm": 0.392578125,
"learning_rate": 0.00012166172106824927,
"loss": 1.0657,
"step": 410
},
{
"epoch": 0.06,
"grad_norm": 0.4140625,
"learning_rate": 0.00012314540059347182,
"loss": 1.0884,
"step": 415
},
{
"epoch": 0.06,
"grad_norm": 0.408203125,
"learning_rate": 0.00012462908011869436,
"loss": 1.0683,
"step": 420
},
{
"epoch": 0.06,
"grad_norm": 0.3984375,
"learning_rate": 0.00012611275964391692,
"loss": 1.1073,
"step": 425
},
{
"epoch": 0.06,
"grad_norm": 0.412109375,
"learning_rate": 0.00012759643916913948,
"loss": 1.0849,
"step": 430
},
{
"epoch": 0.06,
"grad_norm": 0.40625,
"learning_rate": 0.00012908011869436204,
"loss": 1.0798,
"step": 435
},
{
"epoch": 0.07,
"grad_norm": 0.404296875,
"learning_rate": 0.00013056379821958458,
"loss": 1.1029,
"step": 440
},
{
"epoch": 0.07,
"grad_norm": 0.3828125,
"learning_rate": 0.0001320474777448071,
"loss": 1.0664,
"step": 445
},
{
"epoch": 0.07,
"grad_norm": 0.38671875,
"learning_rate": 0.00013353115727002967,
"loss": 1.0998,
"step": 450
},
{
"epoch": 0.07,
"grad_norm": 0.40234375,
"learning_rate": 0.00013501483679525224,
"loss": 1.0834,
"step": 455
},
{
"epoch": 0.07,
"grad_norm": 0.400390625,
"learning_rate": 0.0001364985163204748,
"loss": 1.062,
"step": 460
},
{
"epoch": 0.07,
"grad_norm": 0.3984375,
"learning_rate": 0.00013798219584569733,
"loss": 1.0825,
"step": 465
},
{
"epoch": 0.07,
"grad_norm": 0.388671875,
"learning_rate": 0.0001394658753709199,
"loss": 1.0689,
"step": 470
},
{
"epoch": 0.07,
"grad_norm": 0.392578125,
"learning_rate": 0.00014094955489614243,
"loss": 1.0557,
"step": 475
},
{
"epoch": 0.07,
"grad_norm": 0.380859375,
"learning_rate": 0.000142433234421365,
"loss": 1.0582,
"step": 480
},
{
"epoch": 0.07,
"grad_norm": 0.380859375,
"learning_rate": 0.00014391691394658756,
"loss": 1.0921,
"step": 485
},
{
"epoch": 0.07,
"grad_norm": 0.384765625,
"learning_rate": 0.0001454005934718101,
"loss": 1.0544,
"step": 490
},
{
"epoch": 0.07,
"grad_norm": 0.39453125,
"learning_rate": 0.00014688427299703265,
"loss": 1.0333,
"step": 495
},
{
"epoch": 0.07,
"grad_norm": 0.384765625,
"learning_rate": 0.0001483679525222552,
"loss": 1.0454,
"step": 500
}
],
"logging_steps": 5,
"max_steps": 6734,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 7.031295252564541e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}