Yi-6B-ruozhiba / trainer_state.json
yyx123's picture
Model save
cfc4e78 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 20.0,
"eval_steps": 500,
"global_step": 1100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 4.545454545454545e-07,
"loss": 2.3833,
"step": 1
},
{
"epoch": 0.07,
"learning_rate": 1.818181818181818e-06,
"loss": 2.4784,
"step": 4
},
{
"epoch": 0.15,
"learning_rate": 3.636363636363636e-06,
"loss": 2.3152,
"step": 8
},
{
"epoch": 0.22,
"learning_rate": 5.4545454545454545e-06,
"loss": 2.3229,
"step": 12
},
{
"epoch": 0.29,
"learning_rate": 7.272727272727272e-06,
"loss": 2.2926,
"step": 16
},
{
"epoch": 0.36,
"learning_rate": 9.090909090909091e-06,
"loss": 2.3494,
"step": 20
},
{
"epoch": 0.44,
"learning_rate": 1.0909090909090909e-05,
"loss": 2.3118,
"step": 24
},
{
"epoch": 0.51,
"learning_rate": 1.2727272727272727e-05,
"loss": 2.5019,
"step": 28
},
{
"epoch": 0.58,
"learning_rate": 1.4545454545454545e-05,
"loss": 2.4156,
"step": 32
},
{
"epoch": 0.65,
"learning_rate": 1.6363636363636366e-05,
"loss": 2.2803,
"step": 36
},
{
"epoch": 0.73,
"learning_rate": 1.8181818181818182e-05,
"loss": 2.1552,
"step": 40
},
{
"epoch": 0.8,
"learning_rate": 2e-05,
"loss": 2.2168,
"step": 44
},
{
"epoch": 0.87,
"learning_rate": 2.1818181818181818e-05,
"loss": 2.2977,
"step": 48
},
{
"epoch": 0.95,
"learning_rate": 2.3636363636363637e-05,
"loss": 2.0998,
"step": 52
},
{
"epoch": 1.0,
"gpt4_scores": 0.19999999999999998,
"step": 55
},
{
"epoch": 1.0,
"eval_loss": 2.0215020179748535,
"eval_runtime": 4.9485,
"eval_samples_per_second": 4.648,
"eval_steps_per_second": 1.212,
"step": 55
},
{
"epoch": 1.02,
"learning_rate": 2.5454545454545454e-05,
"loss": 1.9844,
"step": 56
},
{
"epoch": 1.09,
"learning_rate": 2.7272727272727273e-05,
"loss": 1.9635,
"step": 60
},
{
"epoch": 1.16,
"learning_rate": 2.909090909090909e-05,
"loss": 1.949,
"step": 64
},
{
"epoch": 1.24,
"learning_rate": 3.090909090909091e-05,
"loss": 1.9375,
"step": 68
},
{
"epoch": 1.31,
"learning_rate": 3.272727272727273e-05,
"loss": 2.0071,
"step": 72
},
{
"epoch": 1.38,
"learning_rate": 3.454545454545455e-05,
"loss": 1.8369,
"step": 76
},
{
"epoch": 1.45,
"learning_rate": 3.6363636363636364e-05,
"loss": 1.8529,
"step": 80
},
{
"epoch": 1.53,
"learning_rate": 3.818181818181819e-05,
"loss": 1.979,
"step": 84
},
{
"epoch": 1.6,
"learning_rate": 4e-05,
"loss": 1.9027,
"step": 88
},
{
"epoch": 1.67,
"learning_rate": 4.181818181818182e-05,
"loss": 1.7632,
"step": 92
},
{
"epoch": 1.75,
"learning_rate": 4.3636363636363636e-05,
"loss": 1.8529,
"step": 96
},
{
"epoch": 1.82,
"learning_rate": 4.545454545454546e-05,
"loss": 1.7896,
"step": 100
},
{
"epoch": 1.89,
"learning_rate": 4.7272727272727275e-05,
"loss": 1.7356,
"step": 104
},
{
"epoch": 1.96,
"learning_rate": 4.909090909090909e-05,
"loss": 1.7439,
"step": 108
},
{
"epoch": 2.0,
"gpt4_scores": 0.7333333333333334,
"step": 110
},
{
"epoch": 2.0,
"eval_loss": 1.8371074199676514,
"eval_runtime": 4.9302,
"eval_samples_per_second": 4.665,
"eval_steps_per_second": 1.217,
"step": 110
},
{
"epoch": 2.04,
"learning_rate": 4.999949650182266e-05,
"loss": 1.709,
"step": 112
},
{
"epoch": 2.11,
"learning_rate": 4.999546863808815e-05,
"loss": 1.7374,
"step": 116
},
{
"epoch": 2.18,
"learning_rate": 4.9987413559579636e-05,
"loss": 1.636,
"step": 120
},
{
"epoch": 2.25,
"learning_rate": 4.99753325641136e-05,
"loss": 1.7623,
"step": 124
},
{
"epoch": 2.33,
"learning_rate": 4.995922759815339e-05,
"loss": 1.7131,
"step": 128
},
{
"epoch": 2.4,
"learning_rate": 4.993910125649561e-05,
"loss": 1.65,
"step": 132
},
{
"epoch": 2.47,
"learning_rate": 4.991495678185202e-05,
"loss": 1.7748,
"step": 136
},
{
"epoch": 2.55,
"learning_rate": 4.988679806432712e-05,
"loss": 1.7411,
"step": 140
},
{
"epoch": 2.62,
"learning_rate": 4.985462964079137e-05,
"loss": 1.6517,
"step": 144
},
{
"epoch": 2.69,
"learning_rate": 4.981845669415022e-05,
"loss": 1.5844,
"step": 148
},
{
"epoch": 2.76,
"learning_rate": 4.977828505250903e-05,
"loss": 1.6811,
"step": 152
},
{
"epoch": 2.84,
"learning_rate": 4.973412118823412e-05,
"loss": 1.6825,
"step": 156
},
{
"epoch": 2.91,
"learning_rate": 4.968597221690986e-05,
"loss": 1.6176,
"step": 160
},
{
"epoch": 2.98,
"learning_rate": 4.963384589619233e-05,
"loss": 1.7017,
"step": 164
},
{
"epoch": 3.0,
"gpt4_scores": 0.6666666666666666,
"step": 165
},
{
"epoch": 3.0,
"eval_loss": 1.818211555480957,
"eval_runtime": 4.9115,
"eval_samples_per_second": 4.683,
"eval_steps_per_second": 1.222,
"step": 165
},
{
"epoch": 3.05,
"learning_rate": 4.957775062455933e-05,
"loss": 1.59,
"step": 168
},
{
"epoch": 3.13,
"learning_rate": 4.951769543995731e-05,
"loss": 1.326,
"step": 172
},
{
"epoch": 3.2,
"learning_rate": 4.9453690018345144e-05,
"loss": 1.4622,
"step": 176
},
{
"epoch": 3.27,
"learning_rate": 4.938574467213518e-05,
"loss": 1.4188,
"step": 180
},
{
"epoch": 3.35,
"learning_rate": 4.931387034853173e-05,
"loss": 1.4488,
"step": 184
},
{
"epoch": 3.42,
"learning_rate": 4.923807862776728e-05,
"loss": 1.5268,
"step": 188
},
{
"epoch": 3.49,
"learning_rate": 4.915838172123671e-05,
"loss": 1.3807,
"step": 192
},
{
"epoch": 3.56,
"learning_rate": 4.9074792469529815e-05,
"loss": 1.4319,
"step": 196
},
{
"epoch": 3.64,
"learning_rate": 4.898732434036244e-05,
"loss": 1.4653,
"step": 200
},
{
"epoch": 3.71,
"learning_rate": 4.889599142640663e-05,
"loss": 1.417,
"step": 204
},
{
"epoch": 3.78,
"learning_rate": 4.880080844302004e-05,
"loss": 1.4848,
"step": 208
},
{
"epoch": 3.85,
"learning_rate": 4.870179072587499e-05,
"loss": 1.4331,
"step": 212
},
{
"epoch": 3.93,
"learning_rate": 4.859895422848767e-05,
"loss": 1.3679,
"step": 216
},
{
"epoch": 4.0,
"learning_rate": 4.849231551964771e-05,
"loss": 1.3662,
"step": 220
},
{
"epoch": 4.0,
"gpt4_scores": 0.75,
"step": 220
},
{
"epoch": 4.0,
"eval_loss": 1.9195555448532104,
"eval_runtime": 4.9614,
"eval_samples_per_second": 4.636,
"eval_steps_per_second": 1.209,
"step": 220
},
{
"epoch": 4.07,
"learning_rate": 4.838189178074867e-05,
"loss": 1.1767,
"step": 224
},
{
"epoch": 4.15,
"learning_rate": 4.826770080301978e-05,
"loss": 1.1217,
"step": 228
},
{
"epoch": 4.22,
"learning_rate": 4.8149760984659506e-05,
"loss": 1.1938,
"step": 232
},
{
"epoch": 4.29,
"learning_rate": 4.802809132787125e-05,
"loss": 1.0658,
"step": 236
},
{
"epoch": 4.36,
"learning_rate": 4.790271143580174e-05,
"loss": 0.9068,
"step": 240
},
{
"epoch": 4.44,
"learning_rate": 4.777364150938263e-05,
"loss": 1.0878,
"step": 244
},
{
"epoch": 4.51,
"learning_rate": 4.764090234407577e-05,
"loss": 1.0284,
"step": 248
},
{
"epoch": 4.58,
"learning_rate": 4.75045153265227e-05,
"loss": 1.0695,
"step": 252
},
{
"epoch": 4.65,
"learning_rate": 4.7364502431098844e-05,
"loss": 1.1011,
"step": 256
},
{
"epoch": 4.73,
"learning_rate": 4.722088621637309e-05,
"loss": 1.0877,
"step": 260
},
{
"epoch": 4.8,
"learning_rate": 4.707368982147318e-05,
"loss": 0.9747,
"step": 264
},
{
"epoch": 4.87,
"learning_rate": 4.692293696235758e-05,
"loss": 0.9563,
"step": 268
},
{
"epoch": 4.95,
"learning_rate": 4.6768651927994434e-05,
"loss": 1.0344,
"step": 272
},
{
"epoch": 5.0,
"gpt4_scores": 0.6,
"step": 275
},
{
"epoch": 5.0,
"eval_loss": 2.226499557495117,
"eval_runtime": 4.9165,
"eval_samples_per_second": 4.678,
"eval_steps_per_second": 1.22,
"step": 275
},
{
"epoch": 5.02,
"learning_rate": 4.6610859576448176e-05,
"loss": 0.9582,
"step": 276
},
{
"epoch": 5.09,
"learning_rate": 4.644958533087443e-05,
"loss": 0.7354,
"step": 280
},
{
"epoch": 5.16,
"learning_rate": 4.628485517542392e-05,
"loss": 0.7346,
"step": 284
},
{
"epoch": 5.24,
"learning_rate": 4.611669565105596e-05,
"loss": 0.6712,
"step": 288
},
{
"epoch": 5.31,
"learning_rate": 4.594513385126218e-05,
"loss": 0.7341,
"step": 292
},
{
"epoch": 5.38,
"learning_rate": 4.5770197417701365e-05,
"loss": 0.6719,
"step": 296
},
{
"epoch": 5.45,
"learning_rate": 4.559191453574582e-05,
"loss": 0.6348,
"step": 300
},
{
"epoch": 5.53,
"learning_rate": 4.5410313929940244e-05,
"loss": 0.7058,
"step": 304
},
{
"epoch": 5.6,
"learning_rate": 4.522542485937369e-05,
"loss": 0.6653,
"step": 308
},
{
"epoch": 5.67,
"learning_rate": 4.503727711296538e-05,
"loss": 0.6575,
"step": 312
},
{
"epoch": 5.75,
"learning_rate": 4.4845901004665234e-05,
"loss": 0.6561,
"step": 316
},
{
"epoch": 5.82,
"learning_rate": 4.465132736856969e-05,
"loss": 0.6728,
"step": 320
},
{
"epoch": 5.89,
"learning_rate": 4.445358755395382e-05,
"loss": 0.703,
"step": 324
},
{
"epoch": 5.96,
"learning_rate": 4.425271342022039e-05,
"loss": 0.6959,
"step": 328
},
{
"epoch": 6.0,
"gpt4_scores": 0.7666666666666666,
"step": 330
},
{
"epoch": 6.0,
"eval_loss": 2.5187010765075684,
"eval_runtime": 4.9193,
"eval_samples_per_second": 4.675,
"eval_steps_per_second": 1.22,
"step": 330
},
{
"epoch": 6.04,
"learning_rate": 4.404873733176678e-05,
"loss": 0.6332,
"step": 332
},
{
"epoch": 6.11,
"learning_rate": 4.384169215277041e-05,
"loss": 0.3578,
"step": 336
},
{
"epoch": 6.18,
"learning_rate": 4.3631611241893874e-05,
"loss": 0.3878,
"step": 340
},
{
"epoch": 6.25,
"learning_rate": 4.341852844691012e-05,
"loss": 0.3941,
"step": 344
},
{
"epoch": 6.33,
"learning_rate": 4.3202478099249105e-05,
"loss": 0.3927,
"step": 348
},
{
"epoch": 6.4,
"learning_rate": 4.2983495008466276e-05,
"loss": 0.4278,
"step": 352
},
{
"epoch": 6.47,
"learning_rate": 4.276161445663423e-05,
"loss": 0.4638,
"step": 356
},
{
"epoch": 6.55,
"learning_rate": 4.2536872192658036e-05,
"loss": 0.4473,
"step": 360
},
{
"epoch": 6.62,
"learning_rate": 4.230930442651557e-05,
"loss": 0.4563,
"step": 364
},
{
"epoch": 6.69,
"learning_rate": 4.2078947823423364e-05,
"loss": 0.3735,
"step": 368
},
{
"epoch": 6.76,
"learning_rate": 4.1845839497929204e-05,
"loss": 0.4068,
"step": 372
},
{
"epoch": 6.84,
"learning_rate": 4.161001700793231e-05,
"loss": 0.455,
"step": 376
},
{
"epoch": 6.91,
"learning_rate": 4.137151834863213e-05,
"loss": 0.4346,
"step": 380
},
{
"epoch": 6.98,
"learning_rate": 4.113038194640658e-05,
"loss": 0.4353,
"step": 384
},
{
"epoch": 7.0,
"gpt4_scores": 0.75,
"step": 385
},
{
"epoch": 7.0,
"eval_loss": 2.8303465843200684,
"eval_runtime": 4.9148,
"eval_samples_per_second": 4.68,
"eval_steps_per_second": 1.221,
"step": 385
},
{
"epoch": 7.05,
"learning_rate": 4.088664665262091e-05,
"loss": 0.3461,
"step": 388
},
{
"epoch": 7.13,
"learning_rate": 4.064035173736804e-05,
"loss": 0.2204,
"step": 392
},
{
"epoch": 7.2,
"learning_rate": 4.039153688314145e-05,
"loss": 0.2423,
"step": 396
},
{
"epoch": 7.27,
"learning_rate": 4.014024217844167e-05,
"loss": 0.2308,
"step": 400
},
{
"epoch": 7.35,
"learning_rate": 3.98865081113172e-05,
"loss": 0.2553,
"step": 404
},
{
"epoch": 7.42,
"learning_rate": 3.9630375562841295e-05,
"loss": 0.2258,
"step": 408
},
{
"epoch": 7.49,
"learning_rate": 3.937188580052518e-05,
"loss": 0.264,
"step": 412
},
{
"epoch": 7.56,
"learning_rate": 3.911108047166924e-05,
"loss": 0.2548,
"step": 416
},
{
"epoch": 7.64,
"learning_rate": 3.884800159665276e-05,
"loss": 0.272,
"step": 420
},
{
"epoch": 7.71,
"learning_rate": 3.858269156216383e-05,
"loss": 0.2596,
"step": 424
},
{
"epoch": 7.78,
"learning_rate": 3.8315193114369996e-05,
"loss": 0.24,
"step": 428
},
{
"epoch": 7.85,
"learning_rate": 3.804554935203115e-05,
"loss": 0.2434,
"step": 432
},
{
"epoch": 7.93,
"learning_rate": 3.7773803719555514e-05,
"loss": 0.2694,
"step": 436
},
{
"epoch": 8.0,
"learning_rate": 3.7500000000000003e-05,
"loss": 0.2736,
"step": 440
},
{
"epoch": 8.0,
"gpt4_scores": 0.5833333333333334,
"step": 440
},
{
"epoch": 8.0,
"eval_loss": 3.1702194213867188,
"eval_runtime": 4.9672,
"eval_samples_per_second": 4.63,
"eval_steps_per_second": 1.208,
"step": 440
},
{
"epoch": 8.07,
"learning_rate": 3.7224182308015975e-05,
"loss": 0.1419,
"step": 444
},
{
"epoch": 8.15,
"learning_rate": 3.694639508274158e-05,
"loss": 0.148,
"step": 448
},
{
"epoch": 8.22,
"learning_rate": 3.6666683080641846e-05,
"loss": 0.1635,
"step": 452
},
{
"epoch": 8.29,
"learning_rate": 3.638509136829758e-05,
"loss": 0.1667,
"step": 456
},
{
"epoch": 8.36,
"learning_rate": 3.610166531514436e-05,
"loss": 0.164,
"step": 460
},
{
"epoch": 8.44,
"learning_rate": 3.581645058616271e-05,
"loss": 0.1428,
"step": 464
},
{
"epoch": 8.51,
"learning_rate": 3.552949313452067e-05,
"loss": 0.1392,
"step": 468
},
{
"epoch": 8.58,
"learning_rate": 3.5240839194169885e-05,
"loss": 0.1557,
"step": 472
},
{
"epoch": 8.65,
"learning_rate": 3.495053527239656e-05,
"loss": 0.1714,
"step": 476
},
{
"epoch": 8.73,
"learning_rate": 3.465862814232822e-05,
"loss": 0.1622,
"step": 480
},
{
"epoch": 8.8,
"learning_rate": 3.436516483539781e-05,
"loss": 0.18,
"step": 484
},
{
"epoch": 8.87,
"learning_rate": 3.4070192633766025e-05,
"loss": 0.1654,
"step": 488
},
{
"epoch": 8.95,
"learning_rate": 3.3773759062703396e-05,
"loss": 0.1598,
"step": 492
},
{
"epoch": 9.0,
"gpt4_scores": 0.5499999999999999,
"step": 495
},
{
"epoch": 9.0,
"eval_loss": 3.373944044113159,
"eval_runtime": 4.9104,
"eval_samples_per_second": 4.684,
"eval_steps_per_second": 1.222,
"step": 495
},
{
"epoch": 9.02,
"learning_rate": 3.3475911882933015e-05,
"loss": 0.1388,
"step": 496
},
{
"epoch": 9.09,
"learning_rate": 3.3176699082935545e-05,
"loss": 0.1064,
"step": 500
},
{
"epoch": 9.16,
"learning_rate": 3.2876168871217325e-05,
"loss": 0.1153,
"step": 504
},
{
"epoch": 9.24,
"learning_rate": 3.257436966854319e-05,
"loss": 0.1225,
"step": 508
},
{
"epoch": 9.31,
"learning_rate": 3.2271350100134975e-05,
"loss": 0.1146,
"step": 512
},
{
"epoch": 9.38,
"learning_rate": 3.19671589878372e-05,
"loss": 0.1202,
"step": 516
},
{
"epoch": 9.45,
"learning_rate": 3.166184534225087e-05,
"loss": 0.1132,
"step": 520
},
{
"epoch": 9.53,
"learning_rate": 3.135545835483718e-05,
"loss": 0.106,
"step": 524
},
{
"epoch": 9.6,
"learning_rate": 3.104804738999169e-05,
"loss": 0.1127,
"step": 528
},
{
"epoch": 9.67,
"learning_rate": 3.073966197709103e-05,
"loss": 0.1059,
"step": 532
},
{
"epoch": 9.75,
"learning_rate": 3.0430351802512698e-05,
"loss": 0.1089,
"step": 536
},
{
"epoch": 9.82,
"learning_rate": 3.012016670162977e-05,
"loss": 0.1139,
"step": 540
},
{
"epoch": 9.89,
"learning_rate": 2.9809156650781528e-05,
"loss": 0.1247,
"step": 544
},
{
"epoch": 9.96,
"learning_rate": 2.9497371759221347e-05,
"loss": 0.117,
"step": 548
},
{
"epoch": 10.0,
"gpt4_scores": 0.7000000000000001,
"step": 550
},
{
"epoch": 10.0,
"eval_loss": 3.4963889122009277,
"eval_runtime": 4.9532,
"eval_samples_per_second": 4.643,
"eval_steps_per_second": 1.211,
"step": 550
},
{
"epoch": 10.04,
"learning_rate": 2.918486226104327e-05,
"loss": 0.0894,
"step": 552
},
{
"epoch": 10.11,
"learning_rate": 2.8871678507088312e-05,
"loss": 0.0835,
"step": 556
},
{
"epoch": 10.18,
"learning_rate": 2.8557870956832132e-05,
"loss": 0.0913,
"step": 560
},
{
"epoch": 10.25,
"learning_rate": 2.8243490170255043e-05,
"loss": 0.0737,
"step": 564
},
{
"epoch": 10.33,
"learning_rate": 2.792858679969596e-05,
"loss": 0.0897,
"step": 568
},
{
"epoch": 10.4,
"learning_rate": 2.761321158169134e-05,
"loss": 0.0825,
"step": 572
},
{
"epoch": 10.47,
"learning_rate": 2.7297415328800692e-05,
"loss": 0.0845,
"step": 576
},
{
"epoch": 10.55,
"learning_rate": 2.698124892141971e-05,
"loss": 0.0819,
"step": 580
},
{
"epoch": 10.62,
"learning_rate": 2.6664763299582602e-05,
"loss": 0.0868,
"step": 584
},
{
"epoch": 10.69,
"learning_rate": 2.6348009454754653e-05,
"loss": 0.083,
"step": 588
},
{
"epoch": 10.76,
"learning_rate": 2.6031038421616683e-05,
"loss": 0.0937,
"step": 592
},
{
"epoch": 10.84,
"learning_rate": 2.5713901269842404e-05,
"loss": 0.0872,
"step": 596
},
{
"epoch": 10.91,
"learning_rate": 2.5396649095870202e-05,
"loss": 0.0869,
"step": 600
},
{
"epoch": 10.98,
"learning_rate": 2.507933301467056e-05,
"loss": 0.0856,
"step": 604
},
{
"epoch": 11.0,
"gpt4_scores": 0.48333333333333334,
"step": 605
},
{
"epoch": 11.0,
"eval_loss": 3.6468489170074463,
"eval_runtime": 4.9449,
"eval_samples_per_second": 4.651,
"eval_steps_per_second": 1.213,
"step": 605
},
{
"epoch": 11.05,
"learning_rate": 2.4762004151510584e-05,
"loss": 0.0808,
"step": 608
},
{
"epoch": 11.13,
"learning_rate": 2.4444713633716765e-05,
"loss": 0.0641,
"step": 612
},
{
"epoch": 11.2,
"learning_rate": 2.4127512582437485e-05,
"loss": 0.0699,
"step": 616
},
{
"epoch": 11.27,
"learning_rate": 2.3810452104406444e-05,
"loss": 0.0692,
"step": 620
},
{
"epoch": 11.35,
"learning_rate": 2.349358328370854e-05,
"loss": 0.0689,
"step": 624
},
{
"epoch": 11.42,
"learning_rate": 2.3176957173549235e-05,
"loss": 0.0763,
"step": 628
},
{
"epoch": 11.49,
"learning_rate": 2.2860624788029013e-05,
"loss": 0.0609,
"step": 632
},
{
"epoch": 11.56,
"learning_rate": 2.2544637093924074e-05,
"loss": 0.0679,
"step": 636
},
{
"epoch": 11.64,
"learning_rate": 2.222904500247473e-05,
"loss": 0.0675,
"step": 640
},
{
"epoch": 11.71,
"learning_rate": 2.1913899361182632e-05,
"loss": 0.0741,
"step": 644
},
{
"epoch": 11.78,
"learning_rate": 2.1599250945618402e-05,
"loss": 0.0644,
"step": 648
},
{
"epoch": 11.85,
"learning_rate": 2.1285150451240713e-05,
"loss": 0.0672,
"step": 652
},
{
"epoch": 11.93,
"learning_rate": 2.09716484852284e-05,
"loss": 0.0722,
"step": 656
},
{
"epoch": 12.0,
"learning_rate": 2.0658795558326743e-05,
"loss": 0.0681,
"step": 660
},
{
"epoch": 12.0,
"gpt4_scores": 0.7166666666666667,
"step": 660
},
{
"epoch": 12.0,
"eval_loss": 3.8001902103424072,
"eval_runtime": 4.9149,
"eval_samples_per_second": 4.68,
"eval_steps_per_second": 1.221,
"step": 660
},
{
"epoch": 12.07,
"learning_rate": 2.034664207670925e-05,
"loss": 0.0662,
"step": 664
},
{
"epoch": 12.15,
"learning_rate": 2.003523833385637e-05,
"loss": 0.0572,
"step": 668
},
{
"epoch": 12.22,
"learning_rate": 1.972463450245226e-05,
"loss": 0.061,
"step": 672
},
{
"epoch": 12.29,
"learning_rate": 1.9414880626301147e-05,
"loss": 0.0611,
"step": 676
},
{
"epoch": 12.36,
"learning_rate": 1.9106026612264316e-05,
"loss": 0.0631,
"step": 680
},
{
"epoch": 12.44,
"learning_rate": 1.879812222221929e-05,
"loss": 0.0586,
"step": 684
},
{
"epoch": 12.51,
"learning_rate": 1.84912170650422e-05,
"loss": 0.0597,
"step": 688
},
{
"epoch": 12.58,
"learning_rate": 1.8185360588615058e-05,
"loss": 0.0566,
"step": 692
},
{
"epoch": 12.65,
"learning_rate": 1.7880602071858692e-05,
"loss": 0.0569,
"step": 696
},
{
"epoch": 12.73,
"learning_rate": 1.7576990616793137e-05,
"loss": 0.0548,
"step": 700
},
{
"epoch": 12.8,
"learning_rate": 1.7274575140626318e-05,
"loss": 0.058,
"step": 704
},
{
"epoch": 12.87,
"learning_rate": 1.697340436787273e-05,
"loss": 0.0512,
"step": 708
},
{
"epoch": 12.95,
"learning_rate": 1.667352682250298e-05,
"loss": 0.058,
"step": 712
},
{
"epoch": 13.0,
"gpt4_scores": 0.5,
"step": 715
},
{
"epoch": 13.0,
"eval_loss": 3.918207883834839,
"eval_runtime": 4.9544,
"eval_samples_per_second": 4.642,
"eval_steps_per_second": 1.211,
"step": 715
},
{
"epoch": 13.02,
"learning_rate": 1.637499082012574e-05,
"loss": 0.0681,
"step": 716
},
{
"epoch": 13.09,
"learning_rate": 1.6077844460203206e-05,
"loss": 0.0547,
"step": 720
},
{
"epoch": 13.16,
"learning_rate": 1.5782135618301486e-05,
"loss": 0.0449,
"step": 724
},
{
"epoch": 13.24,
"learning_rate": 1.5487911938376924e-05,
"loss": 0.0555,
"step": 728
},
{
"epoch": 13.31,
"learning_rate": 1.5195220825099862e-05,
"loss": 0.0508,
"step": 732
},
{
"epoch": 13.38,
"learning_rate": 1.4904109436216884e-05,
"loss": 0.0527,
"step": 736
},
{
"epoch": 13.45,
"learning_rate": 1.4614624674952842e-05,
"loss": 0.0521,
"step": 740
},
{
"epoch": 13.53,
"learning_rate": 1.4326813182453958e-05,
"loss": 0.0554,
"step": 744
},
{
"epoch": 13.6,
"learning_rate": 1.4040721330273062e-05,
"loss": 0.05,
"step": 748
},
{
"epoch": 13.67,
"learning_rate": 1.3756395212898359e-05,
"loss": 0.0509,
"step": 752
},
{
"epoch": 13.75,
"learning_rate": 1.3473880640326725e-05,
"loss": 0.0517,
"step": 756
},
{
"epoch": 13.82,
"learning_rate": 1.3193223130682936e-05,
"loss": 0.0557,
"step": 760
},
{
"epoch": 13.89,
"learning_rate": 1.2914467902885901e-05,
"loss": 0.0524,
"step": 764
},
{
"epoch": 13.96,
"learning_rate": 1.2637659869363083e-05,
"loss": 0.0532,
"step": 768
},
{
"epoch": 14.0,
"gpt4_scores": 0.5166666666666667,
"step": 770
},
{
"epoch": 14.0,
"eval_loss": 4.011674880981445,
"eval_runtime": 4.9088,
"eval_samples_per_second": 4.685,
"eval_steps_per_second": 1.222,
"step": 770
},
{
"epoch": 14.04,
"learning_rate": 1.2362843628814267e-05,
"loss": 0.0456,
"step": 772
},
{
"epoch": 14.11,
"learning_rate": 1.2090063459025955e-05,
"loss": 0.0476,
"step": 776
},
{
"epoch": 14.18,
"learning_rate": 1.181936330973744e-05,
"loss": 0.0501,
"step": 780
},
{
"epoch": 14.25,
"learning_rate": 1.155078679555969e-05,
"loss": 0.0497,
"step": 784
},
{
"epoch": 14.33,
"learning_rate": 1.1284377188948258e-05,
"loss": 0.0496,
"step": 788
},
{
"epoch": 14.4,
"learning_rate": 1.1020177413231334e-05,
"loss": 0.0467,
"step": 792
},
{
"epoch": 14.47,
"learning_rate": 1.0758230035694031e-05,
"loss": 0.0539,
"step": 796
},
{
"epoch": 14.55,
"learning_rate": 1.049857726072005e-05,
"loss": 0.0446,
"step": 800
},
{
"epoch": 14.62,
"learning_rate": 1.024126092299176e-05,
"loss": 0.0477,
"step": 804
},
{
"epoch": 14.69,
"learning_rate": 9.986322480749927e-06,
"loss": 0.0529,
"step": 808
},
{
"epoch": 14.76,
"learning_rate": 9.733803009114045e-06,
"loss": 0.0466,
"step": 812
},
{
"epoch": 14.84,
"learning_rate": 9.483743193464408e-06,
"loss": 0.0518,
"step": 816
},
{
"epoch": 14.91,
"learning_rate": 9.236183322886945e-06,
"loss": 0.0479,
"step": 820
},
{
"epoch": 14.98,
"learning_rate": 8.991163283681944e-06,
"loss": 0.0443,
"step": 824
},
{
"epoch": 15.0,
"gpt4_scores": 0.6666666666666666,
"step": 825
},
{
"epoch": 15.0,
"eval_loss": 4.091893672943115,
"eval_runtime": 4.9566,
"eval_samples_per_second": 4.64,
"eval_steps_per_second": 1.211,
"step": 825
},
{
"epoch": 15.05,
"learning_rate": 8.748722552937689e-06,
"loss": 0.0471,
"step": 828
},
{
"epoch": 15.13,
"learning_rate": 8.508900192169964e-06,
"loss": 0.0467,
"step": 832
},
{
"epoch": 15.2,
"learning_rate": 8.271734841028553e-06,
"loss": 0.0498,
"step": 836
},
{
"epoch": 15.27,
"learning_rate": 8.0372647110717e-06,
"loss": 0.0476,
"step": 840
},
{
"epoch": 15.35,
"learning_rate": 7.805527579609576e-06,
"loss": 0.0481,
"step": 844
},
{
"epoch": 15.42,
"learning_rate": 7.576560783617668e-06,
"loss": 0.0466,
"step": 848
},
{
"epoch": 15.49,
"learning_rate": 7.350401213721089e-06,
"loss": 0.0455,
"step": 852
},
{
"epoch": 15.56,
"learning_rate": 7.127085308250914e-06,
"loss": 0.048,
"step": 856
},
{
"epoch": 15.64,
"learning_rate": 6.906649047373246e-06,
"loss": 0.0422,
"step": 860
},
{
"epoch": 15.71,
"learning_rate": 6.689127947292231e-06,
"loss": 0.0442,
"step": 864
},
{
"epoch": 15.78,
"learning_rate": 6.4745570545277075e-06,
"loss": 0.0414,
"step": 868
},
{
"epoch": 15.85,
"learning_rate": 6.2629709402686535e-06,
"loss": 0.0455,
"step": 872
},
{
"epoch": 15.93,
"learning_rate": 6.054403694803079e-06,
"loss": 0.0519,
"step": 876
},
{
"epoch": 16.0,
"learning_rate": 5.848888922025553e-06,
"loss": 0.0464,
"step": 880
},
{
"epoch": 16.07,
"learning_rate": 5.646459734022938e-06,
"loss": 0.0443,
"step": 884
},
{
"epoch": 16.15,
"learning_rate": 5.4471487457395225e-06,
"loss": 0.0405,
"step": 888
},
{
"epoch": 16.22,
"learning_rate": 5.250988069722096e-06,
"loss": 0.0441,
"step": 892
},
{
"epoch": 16.29,
"learning_rate": 5.058009310946119e-06,
"loss": 0.0412,
"step": 896
},
{
"epoch": 16.36,
"learning_rate": 4.868243561723535e-06,
"loss": 0.0468,
"step": 900
},
{
"epoch": 16.44,
"learning_rate": 4.681721396693303e-06,
"loss": 0.0476,
"step": 904
},
{
"epoch": 16.51,
"learning_rate": 4.498472867895223e-06,
"loss": 0.0481,
"step": 908
},
{
"epoch": 16.58,
"learning_rate": 4.318527499928074e-06,
"loss": 0.0485,
"step": 912
},
{
"epoch": 16.65,
"learning_rate": 4.141914285192619e-06,
"loss": 0.0441,
"step": 916
},
{
"epoch": 16.73,
"learning_rate": 3.968661679220468e-06,
"loss": 0.0469,
"step": 920
},
{
"epoch": 16.8,
"learning_rate": 3.798797596089351e-06,
"loss": 0.0436,
"step": 924
},
{
"epoch": 16.87,
"learning_rate": 3.632349403925664e-06,
"loss": 0.0528,
"step": 928
},
{
"epoch": 16.95,
"learning_rate": 3.4693439204949858e-06,
"loss": 0.0461,
"step": 932
},
{
"epoch": 17.0,
"gpt4_scores": 0.4833333333333334,
"step": 935
},
{
"epoch": 17.0,
"eval_loss": 4.148024082183838,
"eval_runtime": 4.957,
"eval_samples_per_second": 4.64,
"eval_steps_per_second": 1.21,
"step": 935
},
{
"epoch": 17.02,
"learning_rate": 3.3098074088812686e-06,
"loss": 0.0472,
"step": 936
},
{
"epoch": 17.09,
"learning_rate": 3.1537655732553768e-06,
"loss": 0.0422,
"step": 940
},
{
"epoch": 17.16,
"learning_rate": 3.0012435547336737e-06,
"loss": 0.0415,
"step": 944
},
{
"epoch": 17.24,
"learning_rate": 2.8522659273273604e-06,
"loss": 0.0446,
"step": 948
},
{
"epoch": 17.31,
"learning_rate": 2.7068566939831645e-06,
"loss": 0.0477,
"step": 952
},
{
"epoch": 17.38,
"learning_rate": 2.565039282716045e-06,
"loss": 0.0448,
"step": 956
},
{
"epoch": 17.45,
"learning_rate": 2.4268365428344736e-06,
"loss": 0.0408,
"step": 960
},
{
"epoch": 17.53,
"learning_rate": 2.29227074125907e-06,
"loss": 0.0472,
"step": 964
},
{
"epoch": 17.6,
"learning_rate": 2.1613635589349756e-06,
"loss": 0.0485,
"step": 968
},
{
"epoch": 17.67,
"learning_rate": 2.0341360873386674e-06,
"loss": 0.0419,
"step": 972
},
{
"epoch": 17.75,
"learning_rate": 1.9106088250797267e-06,
"loss": 0.0515,
"step": 976
},
{
"epoch": 17.82,
"learning_rate": 1.790801674598186e-06,
"loss": 0.0502,
"step": 980
},
{
"epoch": 17.89,
"learning_rate": 1.674733938957873e-06,
"loss": 0.0422,
"step": 984
},
{
"epoch": 17.96,
"learning_rate": 1.5624243187363441e-06,
"loss": 0.0465,
"step": 988
},
{
"epoch": 18.0,
"gpt4_scores": 0.4000000000000001,
"step": 990
},
{
"epoch": 18.0,
"eval_loss": 4.16222620010376,
"eval_runtime": 4.9609,
"eval_samples_per_second": 4.636,
"eval_steps_per_second": 1.209,
"step": 990
},
{
"epoch": 18.04,
"learning_rate": 1.4538909090118846e-06,
"loss": 0.0463,
"step": 992
},
{
"epoch": 18.11,
"learning_rate": 1.3491511964480702e-06,
"loss": 0.0433,
"step": 996
},
{
"epoch": 18.18,
"learning_rate": 1.248222056476367e-06,
"loss": 0.0388,
"step": 1000
},
{
"epoch": 18.25,
"learning_rate": 1.1511197505771843e-06,
"loss": 0.0491,
"step": 1004
},
{
"epoch": 18.33,
"learning_rate": 1.0578599236598707e-06,
"loss": 0.0486,
"step": 1008
},
{
"epoch": 18.4,
"learning_rate": 9.684576015420278e-07,
"loss": 0.0537,
"step": 1012
},
{
"epoch": 18.47,
"learning_rate": 8.829271885286094e-07,
"loss": 0.0464,
"step": 1016
},
{
"epoch": 18.55,
"learning_rate": 8.012824650910938e-07,
"loss": 0.0418,
"step": 1020
},
{
"epoch": 18.62,
"learning_rate": 7.235365856472442e-07,
"loss": 0.0506,
"step": 1024
},
{
"epoch": 18.69,
"learning_rate": 6.497020764416633e-07,
"loss": 0.0398,
"step": 1028
},
{
"epoch": 18.76,
"learning_rate": 5.797908335276214e-07,
"loss": 0.0418,
"step": 1032
},
{
"epoch": 18.84,
"learning_rate": 5.1381412085037e-07,
"loss": 0.041,
"step": 1036
},
{
"epoch": 18.91,
"learning_rate": 4.517825684323324e-07,
"loss": 0.0446,
"step": 1040
},
{
"epoch": 18.98,
"learning_rate": 3.9370617066040726e-07,
"loss": 0.049,
"step": 1044
},
{
"epoch": 19.0,
"gpt4_scores": 0.5333333333333333,
"step": 1045
},
{
"epoch": 19.0,
"eval_loss": 4.166710376739502,
"eval_runtime": 4.9226,
"eval_samples_per_second": 4.672,
"eval_steps_per_second": 1.219,
"step": 1045
},
{
"epoch": 19.05,
"learning_rate": 3.395942846757066e-07,
"loss": 0.0449,
"step": 1048
},
{
"epoch": 19.13,
"learning_rate": 2.894556288659395e-07,
"loss": 0.0444,
"step": 1052
},
{
"epoch": 19.2,
"learning_rate": 2.4329828146074095e-07,
"loss": 0.0465,
"step": 1056
},
{
"epoch": 19.27,
"learning_rate": 2.011296792301165e-07,
"loss": 0.0464,
"step": 1060
},
{
"epoch": 19.35,
"learning_rate": 1.6295661628624447e-07,
"loss": 0.0446,
"step": 1064
},
{
"epoch": 19.42,
"learning_rate": 1.2878524298882698e-07,
"loss": 0.0425,
"step": 1068
},
{
"epoch": 19.49,
"learning_rate": 9.862106495415469e-08,
"loss": 0.054,
"step": 1072
},
{
"epoch": 19.56,
"learning_rate": 7.246894216806355e-08,
"loss": 0.0421,
"step": 1076
},
{
"epoch": 19.64,
"learning_rate": 5.033308820289184e-08,
"loss": 0.045,
"step": 1080
},
{
"epoch": 19.71,
"learning_rate": 3.221706953860093e-08,
"loss": 0.0455,
"step": 1084
},
{
"epoch": 19.78,
"learning_rate": 1.812380498815991e-08,
"loss": 0.0411,
"step": 1088
},
{
"epoch": 19.85,
"learning_rate": 8.055565227271799e-09,
"loss": 0.0504,
"step": 1092
},
{
"epoch": 19.93,
"learning_rate": 2.0139724285161977e-09,
"loss": 0.0432,
"step": 1096
},
{
"epoch": 20.0,
"learning_rate": 0.0,
"loss": 0.0416,
"step": 1100
},
{
"epoch": 20.0,
"gpt4_scores": 0.43333333333333335,
"step": 1100
},
{
"epoch": 20.0,
"eval_loss": 4.167160511016846,
"eval_runtime": 4.9522,
"eval_samples_per_second": 4.644,
"eval_steps_per_second": 1.212,
"step": 1100
},
{
"epoch": 20.0,
"step": 1100,
"total_flos": 3.795619210798694e+16,
"train_loss": 0.009085019393400713,
"train_runtime": 1593.5865,
"train_samples_per_second": 2.723,
"train_steps_per_second": 0.69
}
],
"logging_steps": 4,
"max_steps": 1100,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 55,
"total_flos": 3.795619210798694e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}