Yi-6B-ruozhiba-1e-5 / trainer_state.json
yyx123's picture
Model save
5db4d09 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 20.0,
"eval_steps": 500,
"global_step": 1100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 9.090909090909091e-08,
"loss": 2.3833,
"step": 1
},
{
"epoch": 0.07,
"learning_rate": 3.6363636363636366e-07,
"loss": 2.4789,
"step": 4
},
{
"epoch": 0.15,
"learning_rate": 7.272727272727273e-07,
"loss": 2.3195,
"step": 8
},
{
"epoch": 0.22,
"learning_rate": 1.090909090909091e-06,
"loss": 2.3366,
"step": 12
},
{
"epoch": 0.29,
"learning_rate": 1.4545454545454546e-06,
"loss": 2.3221,
"step": 16
},
{
"epoch": 0.36,
"learning_rate": 1.8181818181818183e-06,
"loss": 2.4036,
"step": 20
},
{
"epoch": 0.44,
"learning_rate": 2.181818181818182e-06,
"loss": 2.4224,
"step": 24
},
{
"epoch": 0.51,
"learning_rate": 2.5454545454545456e-06,
"loss": 2.6085,
"step": 28
},
{
"epoch": 0.58,
"learning_rate": 2.9090909090909093e-06,
"loss": 2.5477,
"step": 32
},
{
"epoch": 0.65,
"learning_rate": 3.272727272727273e-06,
"loss": 2.4446,
"step": 36
},
{
"epoch": 0.73,
"learning_rate": 3.6363636363636366e-06,
"loss": 2.3109,
"step": 40
},
{
"epoch": 0.8,
"learning_rate": 4.000000000000001e-06,
"loss": 2.4149,
"step": 44
},
{
"epoch": 0.87,
"learning_rate": 4.363636363636364e-06,
"loss": 2.5514,
"step": 48
},
{
"epoch": 0.95,
"learning_rate": 4.727272727272728e-06,
"loss": 2.3816,
"step": 52
},
{
"epoch": 1.02,
"learning_rate": 5.090909090909091e-06,
"loss": 2.6293,
"step": 56
},
{
"epoch": 1.09,
"learning_rate": 5.4545454545454545e-06,
"loss": 2.2422,
"step": 60
},
{
"epoch": 1.16,
"learning_rate": 5.8181818181818185e-06,
"loss": 2.4031,
"step": 64
},
{
"epoch": 1.24,
"learning_rate": 6.181818181818182e-06,
"loss": 2.2303,
"step": 68
},
{
"epoch": 1.31,
"learning_rate": 6.545454545454546e-06,
"loss": 2.2847,
"step": 72
},
{
"epoch": 1.38,
"learning_rate": 6.90909090909091e-06,
"loss": 2.1578,
"step": 76
},
{
"epoch": 1.45,
"learning_rate": 7.272727272727273e-06,
"loss": 2.1774,
"step": 80
},
{
"epoch": 1.53,
"learning_rate": 7.636363636363638e-06,
"loss": 2.197,
"step": 84
},
{
"epoch": 1.6,
"learning_rate": 8.000000000000001e-06,
"loss": 2.2093,
"step": 88
},
{
"epoch": 1.67,
"learning_rate": 8.363636363636365e-06,
"loss": 2.1004,
"step": 92
},
{
"epoch": 1.75,
"learning_rate": 8.727272727272728e-06,
"loss": 2.0526,
"step": 96
},
{
"epoch": 1.82,
"learning_rate": 9.090909090909091e-06,
"loss": 2.0771,
"step": 100
},
{
"epoch": 1.89,
"learning_rate": 9.454545454545456e-06,
"loss": 2.0219,
"step": 104
},
{
"epoch": 1.96,
"learning_rate": 9.81818181818182e-06,
"loss": 1.9439,
"step": 108
},
{
"epoch": 2.0,
"gpt4_scores": 0.3833333333333333,
"step": 110
},
{
"epoch": 2.0,
"eval_loss": 2.0205769538879395,
"eval_runtime": 4.9749,
"eval_samples_per_second": 4.623,
"eval_steps_per_second": 1.206,
"step": 110
},
{
"epoch": 2.04,
"learning_rate": 9.999899300364534e-06,
"loss": 2.1647,
"step": 112
},
{
"epoch": 2.11,
"learning_rate": 9.99909372761763e-06,
"loss": 1.9397,
"step": 116
},
{
"epoch": 2.18,
"learning_rate": 9.997482711915926e-06,
"loss": 2.1249,
"step": 120
},
{
"epoch": 2.25,
"learning_rate": 9.99506651282272e-06,
"loss": 1.9123,
"step": 124
},
{
"epoch": 2.33,
"learning_rate": 9.991845519630679e-06,
"loss": 1.8704,
"step": 128
},
{
"epoch": 2.4,
"learning_rate": 9.987820251299121e-06,
"loss": 1.9337,
"step": 132
},
{
"epoch": 2.47,
"learning_rate": 9.982991356370404e-06,
"loss": 1.8947,
"step": 136
},
{
"epoch": 2.55,
"learning_rate": 9.977359612865424e-06,
"loss": 1.8811,
"step": 140
},
{
"epoch": 2.62,
"learning_rate": 9.970925928158275e-06,
"loss": 1.8535,
"step": 144
},
{
"epoch": 2.69,
"learning_rate": 9.963691338830045e-06,
"loss": 1.8775,
"step": 148
},
{
"epoch": 2.76,
"learning_rate": 9.955657010501807e-06,
"loss": 1.8215,
"step": 152
},
{
"epoch": 2.84,
"learning_rate": 9.946824237646823e-06,
"loss": 1.9721,
"step": 156
},
{
"epoch": 2.91,
"learning_rate": 9.937194443381972e-06,
"loss": 2.0102,
"step": 160
},
{
"epoch": 2.98,
"learning_rate": 9.926769179238467e-06,
"loss": 1.8731,
"step": 164
},
{
"epoch": 3.0,
"gpt4_scores": 0.7999999999999999,
"step": 165
},
{
"epoch": 3.0,
"eval_loss": 1.9054837226867676,
"eval_runtime": 4.9773,
"eval_samples_per_second": 4.621,
"eval_steps_per_second": 1.205,
"step": 165
},
{
"epoch": 3.05,
"learning_rate": 9.915550124911866e-06,
"loss": 1.7406,
"step": 168
},
{
"epoch": 3.13,
"learning_rate": 9.903539087991462e-06,
"loss": 1.9294,
"step": 172
},
{
"epoch": 3.2,
"learning_rate": 9.890738003669029e-06,
"loss": 1.8174,
"step": 176
},
{
"epoch": 3.27,
"learning_rate": 9.877148934427037e-06,
"loss": 1.8922,
"step": 180
},
{
"epoch": 3.35,
"learning_rate": 9.862774069706346e-06,
"loss": 1.823,
"step": 184
},
{
"epoch": 3.42,
"learning_rate": 9.847615725553457e-06,
"loss": 1.7867,
"step": 188
},
{
"epoch": 3.49,
"learning_rate": 9.831676344247343e-06,
"loss": 1.7675,
"step": 192
},
{
"epoch": 3.56,
"learning_rate": 9.814958493905962e-06,
"loss": 1.8052,
"step": 196
},
{
"epoch": 3.64,
"learning_rate": 9.797464868072489e-06,
"loss": 1.799,
"step": 200
},
{
"epoch": 3.71,
"learning_rate": 9.779198285281326e-06,
"loss": 1.8455,
"step": 204
},
{
"epoch": 3.78,
"learning_rate": 9.760161688604008e-06,
"loss": 1.8541,
"step": 208
},
{
"epoch": 3.85,
"learning_rate": 9.740358145174999e-06,
"loss": 1.7094,
"step": 212
},
{
"epoch": 3.93,
"learning_rate": 9.719790845697534e-06,
"loss": 1.8727,
"step": 216
},
{
"epoch": 4.0,
"learning_rate": 9.698463103929542e-06,
"loss": 1.7574,
"step": 220
},
{
"epoch": 4.0,
"gpt4_scores": 0.7999999999999999,
"step": 220
},
{
"epoch": 4.0,
"eval_loss": 1.8509678840637207,
"eval_runtime": 4.9667,
"eval_samples_per_second": 4.631,
"eval_steps_per_second": 1.208,
"step": 220
},
{
"epoch": 4.07,
"learning_rate": 9.676378356149733e-06,
"loss": 1.7719,
"step": 224
},
{
"epoch": 4.15,
"learning_rate": 9.653540160603956e-06,
"loss": 1.7947,
"step": 228
},
{
"epoch": 4.22,
"learning_rate": 9.629952196931902e-06,
"loss": 1.6527,
"step": 232
},
{
"epoch": 4.29,
"learning_rate": 9.60561826557425e-06,
"loss": 1.8207,
"step": 236
},
{
"epoch": 4.36,
"learning_rate": 9.580542287160348e-06,
"loss": 1.8435,
"step": 240
},
{
"epoch": 4.44,
"learning_rate": 9.554728301876525e-06,
"loss": 1.6849,
"step": 244
},
{
"epoch": 4.51,
"learning_rate": 9.528180468815155e-06,
"loss": 1.7372,
"step": 248
},
{
"epoch": 4.58,
"learning_rate": 9.50090306530454e-06,
"loss": 1.7699,
"step": 252
},
{
"epoch": 4.65,
"learning_rate": 9.47290048621977e-06,
"loss": 1.7707,
"step": 256
},
{
"epoch": 4.73,
"learning_rate": 9.444177243274619e-06,
"loss": 1.7384,
"step": 260
},
{
"epoch": 4.8,
"learning_rate": 9.414737964294636e-06,
"loss": 1.7769,
"step": 264
},
{
"epoch": 4.87,
"learning_rate": 9.384587392471516e-06,
"loss": 1.7427,
"step": 268
},
{
"epoch": 4.95,
"learning_rate": 9.353730385598887e-06,
"loss": 1.7266,
"step": 272
},
{
"epoch": 5.0,
"gpt4_scores": 0.7833333333333332,
"step": 275
},
{
"epoch": 5.0,
"eval_loss": 1.8366360664367676,
"eval_runtime": 4.9298,
"eval_samples_per_second": 4.665,
"eval_steps_per_second": 1.217,
"step": 275
},
{
"epoch": 5.02,
"learning_rate": 9.322171915289635e-06,
"loss": 1.7487,
"step": 276
},
{
"epoch": 5.09,
"learning_rate": 9.289917066174887e-06,
"loss": 1.765,
"step": 280
},
{
"epoch": 5.16,
"learning_rate": 9.256971035084786e-06,
"loss": 1.7863,
"step": 284
},
{
"epoch": 5.24,
"learning_rate": 9.223339130211194e-06,
"loss": 1.7161,
"step": 288
},
{
"epoch": 5.31,
"learning_rate": 9.189026770252437e-06,
"loss": 1.699,
"step": 292
},
{
"epoch": 5.38,
"learning_rate": 9.154039483540273e-06,
"loss": 1.8007,
"step": 296
},
{
"epoch": 5.45,
"learning_rate": 9.118382907149164e-06,
"loss": 1.7352,
"step": 300
},
{
"epoch": 5.53,
"learning_rate": 9.08206278598805e-06,
"loss": 1.6892,
"step": 304
},
{
"epoch": 5.6,
"learning_rate": 9.045084971874738e-06,
"loss": 1.7667,
"step": 308
},
{
"epoch": 5.67,
"learning_rate": 9.007455422593077e-06,
"loss": 1.6885,
"step": 312
},
{
"epoch": 5.75,
"learning_rate": 8.969180200933048e-06,
"loss": 1.6391,
"step": 316
},
{
"epoch": 5.82,
"learning_rate": 8.930265473713939e-06,
"loss": 1.7314,
"step": 320
},
{
"epoch": 5.89,
"learning_rate": 8.890717510790763e-06,
"loss": 1.6487,
"step": 324
},
{
"epoch": 5.96,
"learning_rate": 8.850542684044078e-06,
"loss": 1.6627,
"step": 328
},
{
"epoch": 6.04,
"learning_rate": 8.809747466353356e-06,
"loss": 1.7486,
"step": 332
},
{
"epoch": 6.11,
"learning_rate": 8.768338430554083e-06,
"loss": 1.7223,
"step": 336
},
{
"epoch": 6.18,
"learning_rate": 8.726322248378775e-06,
"loss": 1.675,
"step": 340
},
{
"epoch": 6.25,
"learning_rate": 8.683705689382025e-06,
"loss": 1.6991,
"step": 344
},
{
"epoch": 6.33,
"learning_rate": 8.640495619849821e-06,
"loss": 1.6633,
"step": 348
},
{
"epoch": 6.4,
"learning_rate": 8.596699001693257e-06,
"loss": 1.7376,
"step": 352
},
{
"epoch": 6.47,
"learning_rate": 8.552322891326846e-06,
"loss": 1.7234,
"step": 356
},
{
"epoch": 6.55,
"learning_rate": 8.507374438531606e-06,
"loss": 1.6112,
"step": 360
},
{
"epoch": 6.62,
"learning_rate": 8.461860885303116e-06,
"loss": 1.6037,
"step": 364
},
{
"epoch": 6.69,
"learning_rate": 8.415789564684673e-06,
"loss": 1.6574,
"step": 368
},
{
"epoch": 6.76,
"learning_rate": 8.36916789958584e-06,
"loss": 1.6418,
"step": 372
},
{
"epoch": 6.84,
"learning_rate": 8.322003401586463e-06,
"loss": 1.7189,
"step": 376
},
{
"epoch": 6.91,
"learning_rate": 8.274303669726427e-06,
"loss": 1.5976,
"step": 380
},
{
"epoch": 6.98,
"learning_rate": 8.226076389281316e-06,
"loss": 1.6036,
"step": 384
},
{
"epoch": 7.0,
"gpt4_scores": 0.75,
"step": 385
},
{
"epoch": 7.0,
"eval_loss": 1.830824851989746,
"eval_runtime": 4.9258,
"eval_samples_per_second": 4.669,
"eval_steps_per_second": 1.218,
"step": 385
},
{
"epoch": 7.05,
"learning_rate": 8.177329330524182e-06,
"loss": 1.852,
"step": 388
},
{
"epoch": 7.13,
"learning_rate": 8.128070347473609e-06,
"loss": 1.6013,
"step": 392
},
{
"epoch": 7.2,
"learning_rate": 8.078307376628292e-06,
"loss": 1.587,
"step": 396
},
{
"epoch": 7.27,
"learning_rate": 8.028048435688333e-06,
"loss": 1.6268,
"step": 400
},
{
"epoch": 7.35,
"learning_rate": 7.97730162226344e-06,
"loss": 1.5518,
"step": 404
},
{
"epoch": 7.42,
"learning_rate": 7.92607511256826e-06,
"loss": 1.59,
"step": 408
},
{
"epoch": 7.49,
"learning_rate": 7.874377160105037e-06,
"loss": 1.6115,
"step": 412
},
{
"epoch": 7.56,
"learning_rate": 7.822216094333847e-06,
"loss": 1.5037,
"step": 416
},
{
"epoch": 7.64,
"learning_rate": 7.769600319330553e-06,
"loss": 1.6263,
"step": 420
},
{
"epoch": 7.71,
"learning_rate": 7.716538312432767e-06,
"loss": 1.6295,
"step": 424
},
{
"epoch": 7.78,
"learning_rate": 7.663038622873999e-06,
"loss": 1.6025,
"step": 428
},
{
"epoch": 7.85,
"learning_rate": 7.60910987040623e-06,
"loss": 1.6275,
"step": 432
},
{
"epoch": 7.93,
"learning_rate": 7.554760743911104e-06,
"loss": 1.6367,
"step": 436
},
{
"epoch": 8.0,
"learning_rate": 7.500000000000001e-06,
"loss": 1.7214,
"step": 440
},
{
"epoch": 8.0,
"gpt4_scores": 0.75,
"step": 440
},
{
"epoch": 8.0,
"eval_loss": 1.8379725217819214,
"eval_runtime": 4.9284,
"eval_samples_per_second": 4.667,
"eval_steps_per_second": 1.217,
"step": 440
},
{
"epoch": 8.07,
"learning_rate": 7.444836461603195e-06,
"loss": 1.5514,
"step": 444
},
{
"epoch": 8.15,
"learning_rate": 7.3892790165483164e-06,
"loss": 1.5264,
"step": 448
},
{
"epoch": 8.22,
"learning_rate": 7.333336616128369e-06,
"loss": 1.5404,
"step": 452
},
{
"epoch": 8.29,
"learning_rate": 7.2770182736595164e-06,
"loss": 1.5687,
"step": 456
},
{
"epoch": 8.36,
"learning_rate": 7.2203330630288714e-06,
"loss": 1.5514,
"step": 460
},
{
"epoch": 8.44,
"learning_rate": 7.163290117232542e-06,
"loss": 1.5833,
"step": 464
},
{
"epoch": 8.51,
"learning_rate": 7.105898626904134e-06,
"loss": 1.5976,
"step": 468
},
{
"epoch": 8.58,
"learning_rate": 7.048167838833977e-06,
"loss": 1.5742,
"step": 472
},
{
"epoch": 8.65,
"learning_rate": 6.990107054479313e-06,
"loss": 1.6667,
"step": 476
},
{
"epoch": 8.73,
"learning_rate": 6.931725628465643e-06,
"loss": 1.5979,
"step": 480
},
{
"epoch": 8.8,
"learning_rate": 6.873032967079562e-06,
"loss": 1.5281,
"step": 484
},
{
"epoch": 8.87,
"learning_rate": 6.814038526753205e-06,
"loss": 1.5594,
"step": 488
},
{
"epoch": 8.95,
"learning_rate": 6.75475181254068e-06,
"loss": 1.5245,
"step": 492
},
{
"epoch": 9.0,
"gpt4_scores": 0.7666666666666666,
"step": 495
},
{
"epoch": 9.0,
"eval_loss": 1.8495147228240967,
"eval_runtime": 4.9697,
"eval_samples_per_second": 4.628,
"eval_steps_per_second": 1.207,
"step": 495
},
{
"epoch": 9.02,
"learning_rate": 6.695182376586603e-06,
"loss": 1.4976,
"step": 496
},
{
"epoch": 9.09,
"learning_rate": 6.635339816587109e-06,
"loss": 1.5139,
"step": 500
},
{
"epoch": 9.16,
"learning_rate": 6.5752337742434644e-06,
"loss": 1.5678,
"step": 504
},
{
"epoch": 9.24,
"learning_rate": 6.514873933708637e-06,
"loss": 1.5552,
"step": 508
},
{
"epoch": 9.31,
"learning_rate": 6.454270020026996e-06,
"loss": 1.5398,
"step": 512
},
{
"epoch": 9.38,
"learning_rate": 6.39343179756744e-06,
"loss": 1.4932,
"step": 516
},
{
"epoch": 9.45,
"learning_rate": 6.332369068450175e-06,
"loss": 1.5297,
"step": 520
},
{
"epoch": 9.53,
"learning_rate": 6.271091670967437e-06,
"loss": 1.5346,
"step": 524
},
{
"epoch": 9.6,
"learning_rate": 6.209609477998339e-06,
"loss": 1.4501,
"step": 528
},
{
"epoch": 9.67,
"learning_rate": 6.1479323954182055e-06,
"loss": 1.48,
"step": 532
},
{
"epoch": 9.75,
"learning_rate": 6.08607036050254e-06,
"loss": 1.5156,
"step": 536
},
{
"epoch": 9.82,
"learning_rate": 6.024033340325954e-06,
"loss": 1.55,
"step": 540
},
{
"epoch": 9.89,
"learning_rate": 5.961831330156306e-06,
"loss": 1.4848,
"step": 544
},
{
"epoch": 9.96,
"learning_rate": 5.89947435184427e-06,
"loss": 1.5239,
"step": 548
},
{
"epoch": 10.0,
"gpt4_scores": 0.7333333333333334,
"step": 550
},
{
"epoch": 10.0,
"eval_loss": 1.8637609481811523,
"eval_runtime": 4.95,
"eval_samples_per_second": 4.646,
"eval_steps_per_second": 1.212,
"step": 550
},
{
"epoch": 10.04,
"learning_rate": 5.8369724522086545e-06,
"loss": 1.4492,
"step": 552
},
{
"epoch": 10.11,
"learning_rate": 5.774335701417662e-06,
"loss": 1.5163,
"step": 556
},
{
"epoch": 10.18,
"learning_rate": 5.711574191366427e-06,
"loss": 1.3988,
"step": 560
},
{
"epoch": 10.25,
"learning_rate": 5.648698034051009e-06,
"loss": 1.4406,
"step": 564
},
{
"epoch": 10.33,
"learning_rate": 5.585717359939192e-06,
"loss": 1.4894,
"step": 568
},
{
"epoch": 10.4,
"learning_rate": 5.522642316338268e-06,
"loss": 1.5209,
"step": 572
},
{
"epoch": 10.47,
"learning_rate": 5.459483065760138e-06,
"loss": 1.4599,
"step": 576
},
{
"epoch": 10.55,
"learning_rate": 5.396249784283943e-06,
"loss": 1.44,
"step": 580
},
{
"epoch": 10.62,
"learning_rate": 5.33295265991652e-06,
"loss": 1.4532,
"step": 584
},
{
"epoch": 10.69,
"learning_rate": 5.26960189095093e-06,
"loss": 1.4861,
"step": 588
},
{
"epoch": 10.76,
"learning_rate": 5.206207684323337e-06,
"loss": 1.5065,
"step": 592
},
{
"epoch": 10.84,
"learning_rate": 5.142780253968481e-06,
"loss": 1.5798,
"step": 596
},
{
"epoch": 10.91,
"learning_rate": 5.07932981917404e-06,
"loss": 1.4649,
"step": 600
},
{
"epoch": 10.98,
"learning_rate": 5.015866602934112e-06,
"loss": 1.4286,
"step": 604
},
{
"epoch": 11.0,
"gpt4_scores": 0.7333333333333334,
"step": 605
},
{
"epoch": 11.0,
"eval_loss": 1.8770924806594849,
"eval_runtime": 4.9833,
"eval_samples_per_second": 4.615,
"eval_steps_per_second": 1.204,
"step": 605
},
{
"epoch": 11.05,
"learning_rate": 4.952400830302117e-06,
"loss": 1.437,
"step": 608
},
{
"epoch": 11.13,
"learning_rate": 4.888942726743353e-06,
"loss": 1.4056,
"step": 612
},
{
"epoch": 11.2,
"learning_rate": 4.825502516487497e-06,
"loss": 1.5407,
"step": 616
},
{
"epoch": 11.27,
"learning_rate": 4.762090420881289e-06,
"loss": 1.3875,
"step": 620
},
{
"epoch": 11.35,
"learning_rate": 4.6987166567417085e-06,
"loss": 1.3708,
"step": 624
},
{
"epoch": 11.42,
"learning_rate": 4.635391434709847e-06,
"loss": 1.4348,
"step": 628
},
{
"epoch": 11.49,
"learning_rate": 4.572124957605803e-06,
"loss": 1.4,
"step": 632
},
{
"epoch": 11.56,
"learning_rate": 4.5089274187848144e-06,
"loss": 1.5172,
"step": 636
},
{
"epoch": 11.64,
"learning_rate": 4.445809000494945e-06,
"loss": 1.505,
"step": 640
},
{
"epoch": 11.71,
"learning_rate": 4.382779872236527e-06,
"loss": 1.3405,
"step": 644
},
{
"epoch": 11.78,
"learning_rate": 4.319850189123681e-06,
"loss": 1.4869,
"step": 648
},
{
"epoch": 11.85,
"learning_rate": 4.257030090248142e-06,
"loss": 1.4366,
"step": 652
},
{
"epoch": 11.93,
"learning_rate": 4.194329697045681e-06,
"loss": 1.3966,
"step": 656
},
{
"epoch": 12.0,
"learning_rate": 4.131759111665349e-06,
"loss": 1.3534,
"step": 660
},
{
"epoch": 12.0,
"gpt4_scores": 0.7666666666666666,
"step": 660
},
{
"epoch": 12.0,
"eval_loss": 1.9029945135116577,
"eval_runtime": 4.9424,
"eval_samples_per_second": 4.654,
"eval_steps_per_second": 1.214,
"step": 660
},
{
"epoch": 12.07,
"learning_rate": 4.06932841534185e-06,
"loss": 1.294,
"step": 664
},
{
"epoch": 12.15,
"learning_rate": 4.007047666771274e-06,
"loss": 1.4497,
"step": 668
},
{
"epoch": 12.22,
"learning_rate": 3.944926900490452e-06,
"loss": 1.4023,
"step": 672
},
{
"epoch": 12.29,
"learning_rate": 3.882976125260229e-06,
"loss": 1.4619,
"step": 676
},
{
"epoch": 12.36,
"learning_rate": 3.821205322452863e-06,
"loss": 1.4371,
"step": 680
},
{
"epoch": 12.44,
"learning_rate": 3.7596244444438577e-06,
"loss": 1.4326,
"step": 684
},
{
"epoch": 12.51,
"learning_rate": 3.69824341300844e-06,
"loss": 1.3918,
"step": 688
},
{
"epoch": 12.58,
"learning_rate": 3.637072117723012e-06,
"loss": 1.3524,
"step": 692
},
{
"epoch": 12.65,
"learning_rate": 3.5761204143717387e-06,
"loss": 1.3517,
"step": 696
},
{
"epoch": 12.73,
"learning_rate": 3.5153981233586277e-06,
"loss": 1.3572,
"step": 700
},
{
"epoch": 12.8,
"learning_rate": 3.4549150281252635e-06,
"loss": 1.3718,
"step": 704
},
{
"epoch": 12.87,
"learning_rate": 3.394680873574546e-06,
"loss": 1.3663,
"step": 708
},
{
"epoch": 12.95,
"learning_rate": 3.3347053645005965e-06,
"loss": 1.386,
"step": 712
},
{
"epoch": 13.02,
"learning_rate": 3.274998164025148e-06,
"loss": 1.4448,
"step": 716
},
{
"epoch": 13.09,
"learning_rate": 3.2155688920406415e-06,
"loss": 1.438,
"step": 720
},
{
"epoch": 13.16,
"learning_rate": 3.156427123660297e-06,
"loss": 1.3065,
"step": 724
},
{
"epoch": 13.24,
"learning_rate": 3.097582387675385e-06,
"loss": 1.377,
"step": 728
},
{
"epoch": 13.31,
"learning_rate": 3.0390441650199727e-06,
"loss": 1.336,
"step": 732
},
{
"epoch": 13.38,
"learning_rate": 2.980821887243377e-06,
"loss": 1.4014,
"step": 736
},
{
"epoch": 13.45,
"learning_rate": 2.9229249349905686e-06,
"loss": 1.2758,
"step": 740
},
{
"epoch": 13.53,
"learning_rate": 2.8653626364907918e-06,
"loss": 1.3139,
"step": 744
},
{
"epoch": 13.6,
"learning_rate": 2.8081442660546126e-06,
"loss": 1.3392,
"step": 748
},
{
"epoch": 13.67,
"learning_rate": 2.751279042579672e-06,
"loss": 1.4052,
"step": 752
},
{
"epoch": 13.75,
"learning_rate": 2.694776128065345e-06,
"loss": 1.3885,
"step": 756
},
{
"epoch": 13.82,
"learning_rate": 2.6386446261365874e-06,
"loss": 1.2347,
"step": 760
},
{
"epoch": 13.89,
"learning_rate": 2.5828935805771804e-06,
"loss": 1.3792,
"step": 764
},
{
"epoch": 13.96,
"learning_rate": 2.527531973872617e-06,
"loss": 1.3895,
"step": 768
},
{
"epoch": 14.0,
"gpt4_scores": 0.7999999999999999,
"step": 770
},
{
"epoch": 14.0,
"eval_loss": 1.9447271823883057,
"eval_runtime": 4.942,
"eval_samples_per_second": 4.654,
"eval_steps_per_second": 1.214,
"step": 770
},
{
"epoch": 14.04,
"learning_rate": 2.4725687257628533e-06,
"loss": 1.5377,
"step": 772
},
{
"epoch": 14.11,
"learning_rate": 2.418012691805191e-06,
"loss": 1.3932,
"step": 776
},
{
"epoch": 14.18,
"learning_rate": 2.363872661947488e-06,
"loss": 1.3257,
"step": 780
},
{
"epoch": 14.25,
"learning_rate": 2.310157359111938e-06,
"loss": 1.3721,
"step": 784
},
{
"epoch": 14.33,
"learning_rate": 2.2568754377896516e-06,
"loss": 1.3564,
"step": 788
},
{
"epoch": 14.4,
"learning_rate": 2.204035482646267e-06,
"loss": 1.2608,
"step": 792
},
{
"epoch": 14.47,
"learning_rate": 2.1516460071388062e-06,
"loss": 1.3862,
"step": 796
},
{
"epoch": 14.55,
"learning_rate": 2.09971545214401e-06,
"loss": 1.2653,
"step": 800
},
{
"epoch": 14.62,
"learning_rate": 2.0482521845983522e-06,
"loss": 1.3125,
"step": 804
},
{
"epoch": 14.69,
"learning_rate": 1.9972644961499853e-06,
"loss": 1.31,
"step": 808
},
{
"epoch": 14.76,
"learning_rate": 1.946760601822809e-06,
"loss": 1.3196,
"step": 812
},
{
"epoch": 14.84,
"learning_rate": 1.8967486386928819e-06,
"loss": 1.3496,
"step": 816
},
{
"epoch": 14.91,
"learning_rate": 1.8472366645773892e-06,
"loss": 1.3419,
"step": 820
},
{
"epoch": 14.98,
"learning_rate": 1.798232656736389e-06,
"loss": 1.3721,
"step": 824
},
{
"epoch": 15.0,
"gpt4_scores": 0.7999999999999999,
"step": 825
},
{
"epoch": 15.0,
"eval_loss": 1.9617422819137573,
"eval_runtime": 4.9528,
"eval_samples_per_second": 4.644,
"eval_steps_per_second": 1.211,
"step": 825
},
{
"epoch": 15.05,
"learning_rate": 1.7497445105875377e-06,
"loss": 1.4496,
"step": 828
},
{
"epoch": 15.13,
"learning_rate": 1.7017800384339928e-06,
"loss": 1.3551,
"step": 832
},
{
"epoch": 15.2,
"learning_rate": 1.6543469682057105e-06,
"loss": 1.3492,
"step": 836
},
{
"epoch": 15.27,
"learning_rate": 1.6074529422143398e-06,
"loss": 1.2737,
"step": 840
},
{
"epoch": 15.35,
"learning_rate": 1.561105515921915e-06,
"loss": 1.2914,
"step": 844
},
{
"epoch": 15.42,
"learning_rate": 1.5153121567235334e-06,
"loss": 1.2787,
"step": 848
},
{
"epoch": 15.49,
"learning_rate": 1.470080242744218e-06,
"loss": 1.3057,
"step": 852
},
{
"epoch": 15.56,
"learning_rate": 1.4254170616501828e-06,
"loss": 1.3169,
"step": 856
},
{
"epoch": 15.64,
"learning_rate": 1.3813298094746491e-06,
"loss": 1.3178,
"step": 860
},
{
"epoch": 15.71,
"learning_rate": 1.3378255894584463e-06,
"loss": 1.2875,
"step": 864
},
{
"epoch": 15.78,
"learning_rate": 1.2949114109055417e-06,
"loss": 1.3542,
"step": 868
},
{
"epoch": 15.85,
"learning_rate": 1.2525941880537307e-06,
"loss": 1.3405,
"step": 872
},
{
"epoch": 15.93,
"learning_rate": 1.210880738960616e-06,
"loss": 1.2646,
"step": 876
},
{
"epoch": 16.0,
"learning_rate": 1.1697777844051105e-06,
"loss": 1.3598,
"step": 880
},
{
"epoch": 16.0,
"gpt4_scores": 0.6166666666666667,
"step": 880
},
{
"epoch": 16.0,
"eval_loss": 1.971917986869812,
"eval_runtime": 4.9531,
"eval_samples_per_second": 4.644,
"eval_steps_per_second": 1.211,
"step": 880
},
{
"epoch": 16.07,
"learning_rate": 1.1292919468045876e-06,
"loss": 1.2695,
"step": 884
},
{
"epoch": 16.15,
"learning_rate": 1.0894297491479044e-06,
"loss": 1.3693,
"step": 888
},
{
"epoch": 16.22,
"learning_rate": 1.0501976139444191e-06,
"loss": 1.2719,
"step": 892
},
{
"epoch": 16.29,
"learning_rate": 1.0116018621892237e-06,
"loss": 1.3008,
"step": 896
},
{
"epoch": 16.36,
"learning_rate": 9.73648712344707e-07,
"loss": 1.344,
"step": 900
},
{
"epoch": 16.44,
"learning_rate": 9.363442793386606e-07,
"loss": 1.2761,
"step": 904
},
{
"epoch": 16.51,
"learning_rate": 8.996945735790447e-07,
"loss": 1.3216,
"step": 908
},
{
"epoch": 16.58,
"learning_rate": 8.637054999856148e-07,
"loss": 1.3374,
"step": 912
},
{
"epoch": 16.65,
"learning_rate": 8.283828570385239e-07,
"loss": 1.269,
"step": 916
},
{
"epoch": 16.73,
"learning_rate": 7.937323358440935e-07,
"loss": 1.2961,
"step": 920
},
{
"epoch": 16.8,
"learning_rate": 7.597595192178702e-07,
"loss": 1.277,
"step": 924
},
{
"epoch": 16.87,
"learning_rate": 7.264698807851328e-07,
"loss": 1.3616,
"step": 928
},
{
"epoch": 16.95,
"learning_rate": 6.938687840989972e-07,
"loss": 1.3015,
"step": 932
},
{
"epoch": 17.0,
"gpt4_scores": 0.5666666666666667,
"step": 935
},
{
"epoch": 17.0,
"eval_loss": 1.9795942306518555,
"eval_runtime": 4.9559,
"eval_samples_per_second": 4.641,
"eval_steps_per_second": 1.211,
"step": 935
},
{
"epoch": 17.02,
"learning_rate": 6.619614817762537e-07,
"loss": 1.2956,
"step": 936
},
{
"epoch": 17.09,
"learning_rate": 6.307531146510754e-07,
"loss": 1.2213,
"step": 940
},
{
"epoch": 17.16,
"learning_rate": 6.002487109467347e-07,
"loss": 1.292,
"step": 944
},
{
"epoch": 17.24,
"learning_rate": 5.704531854654721e-07,
"loss": 1.3196,
"step": 948
},
{
"epoch": 17.31,
"learning_rate": 5.413713387966329e-07,
"loss": 1.3676,
"step": 952
},
{
"epoch": 17.38,
"learning_rate": 5.130078565432089e-07,
"loss": 1.3627,
"step": 956
},
{
"epoch": 17.45,
"learning_rate": 4.853673085668947e-07,
"loss": 1.2544,
"step": 960
},
{
"epoch": 17.53,
"learning_rate": 4.58454148251814e-07,
"loss": 1.3126,
"step": 964
},
{
"epoch": 17.6,
"learning_rate": 4.322727117869951e-07,
"loss": 1.3056,
"step": 968
},
{
"epoch": 17.67,
"learning_rate": 4.0682721746773346e-07,
"loss": 1.2876,
"step": 972
},
{
"epoch": 17.75,
"learning_rate": 3.821217650159453e-07,
"loss": 1.3168,
"step": 976
},
{
"epoch": 17.82,
"learning_rate": 3.581603349196372e-07,
"loss": 1.2738,
"step": 980
},
{
"epoch": 17.89,
"learning_rate": 3.3494678779157464e-07,
"loss": 1.2393,
"step": 984
},
{
"epoch": 17.96,
"learning_rate": 3.1248486374726884e-07,
"loss": 1.3456,
"step": 988
},
{
"epoch": 18.0,
"gpt4_scores": 0.45,
"step": 990
},
{
"epoch": 18.0,
"eval_loss": 1.983115315437317,
"eval_runtime": 4.9224,
"eval_samples_per_second": 4.673,
"eval_steps_per_second": 1.219,
"step": 990
},
{
"epoch": 18.04,
"learning_rate": 2.9077818180237693e-07,
"loss": 1.2949,
"step": 992
},
{
"epoch": 18.11,
"learning_rate": 2.6983023928961406e-07,
"loss": 1.3541,
"step": 996
},
{
"epoch": 18.18,
"learning_rate": 2.4964441129527337e-07,
"loss": 1.293,
"step": 1000
},
{
"epoch": 18.25,
"learning_rate": 2.3022395011543687e-07,
"loss": 1.2917,
"step": 1004
},
{
"epoch": 18.33,
"learning_rate": 2.1157198473197417e-07,
"loss": 1.318,
"step": 1008
},
{
"epoch": 18.4,
"learning_rate": 1.9369152030840553e-07,
"loss": 1.2806,
"step": 1012
},
{
"epoch": 18.47,
"learning_rate": 1.765854377057219e-07,
"loss": 1.2441,
"step": 1016
},
{
"epoch": 18.55,
"learning_rate": 1.6025649301821877e-07,
"loss": 1.3167,
"step": 1020
},
{
"epoch": 18.62,
"learning_rate": 1.4470731712944885e-07,
"loss": 1.3758,
"step": 1024
},
{
"epoch": 18.69,
"learning_rate": 1.2994041528833267e-07,
"loss": 1.2589,
"step": 1028
},
{
"epoch": 18.76,
"learning_rate": 1.1595816670552429e-07,
"loss": 1.3814,
"step": 1032
},
{
"epoch": 18.84,
"learning_rate": 1.0276282417007399e-07,
"loss": 1.254,
"step": 1036
},
{
"epoch": 18.91,
"learning_rate": 9.035651368646647e-08,
"loss": 1.2075,
"step": 1040
},
{
"epoch": 18.98,
"learning_rate": 7.874123413208145e-08,
"loss": 1.2136,
"step": 1044
},
{
"epoch": 19.0,
"gpt4_scores": 0.55,
"step": 1045
},
{
"epoch": 19.0,
"eval_loss": 1.9847663640975952,
"eval_runtime": 4.9193,
"eval_samples_per_second": 4.676,
"eval_steps_per_second": 1.22,
"step": 1045
},
{
"epoch": 19.05,
"learning_rate": 6.791885693514134e-08,
"loss": 1.2188,
"step": 1048
},
{
"epoch": 19.13,
"learning_rate": 5.7891125773187896e-08,
"loss": 1.3258,
"step": 1052
},
{
"epoch": 19.2,
"learning_rate": 4.865965629214819e-08,
"loss": 1.3501,
"step": 1056
},
{
"epoch": 19.27,
"learning_rate": 4.02259358460233e-08,
"loss": 1.2407,
"step": 1060
},
{
"epoch": 19.35,
"learning_rate": 3.25913232572489e-08,
"loss": 1.2737,
"step": 1064
},
{
"epoch": 19.42,
"learning_rate": 2.57570485977654e-08,
"loss": 1.2886,
"step": 1068
},
{
"epoch": 19.49,
"learning_rate": 1.9724212990830938e-08,
"loss": 1.331,
"step": 1072
},
{
"epoch": 19.56,
"learning_rate": 1.449378843361271e-08,
"loss": 1.2453,
"step": 1076
},
{
"epoch": 19.64,
"learning_rate": 1.006661764057837e-08,
"loss": 1.2749,
"step": 1080
},
{
"epoch": 19.71,
"learning_rate": 6.4434139077201865e-09,
"loss": 1.2996,
"step": 1084
},
{
"epoch": 19.78,
"learning_rate": 3.6247609976319818e-09,
"loss": 1.2314,
"step": 1088
},
{
"epoch": 19.85,
"learning_rate": 1.61111304545436e-09,
"loss": 1.3066,
"step": 1092
},
{
"epoch": 19.93,
"learning_rate": 4.027944857032395e-10,
"loss": 1.3807,
"step": 1096
},
{
"epoch": 20.0,
"learning_rate": 0.0,
"loss": 1.302,
"step": 1100
},
{
"epoch": 20.0,
"step": 1100,
"total_flos": 3.76665795378217e+16,
"train_loss": 0.0,
"train_runtime": 10.5489,
"train_samples_per_second": 411.417,
"train_steps_per_second": 104.276
}
],
"logging_steps": 4,
"max_steps": 1100,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 55,
"total_flos": 3.76665795378217e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}