Yi-6B-ruozhiba-1e-5-50 / trainer_state.json
yyx123's picture
Model save
3432bb1 verified
raw
history blame
40.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 20.0,
"eval_steps": 500,
"global_step": 1100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 9.090909090909091e-08,
"loss": 2.3833,
"step": 1
},
{
"epoch": 0.07,
"learning_rate": 3.6363636363636366e-07,
"loss": 2.4789,
"step": 4
},
{
"epoch": 0.15,
"learning_rate": 7.272727272727273e-07,
"loss": 2.3195,
"step": 8
},
{
"epoch": 0.22,
"learning_rate": 1.090909090909091e-06,
"loss": 2.3366,
"step": 12
},
{
"epoch": 0.29,
"learning_rate": 1.4545454545454546e-06,
"loss": 2.3221,
"step": 16
},
{
"epoch": 0.36,
"learning_rate": 1.8181818181818183e-06,
"loss": 2.4036,
"step": 20
},
{
"epoch": 0.44,
"learning_rate": 2.181818181818182e-06,
"loss": 2.4224,
"step": 24
},
{
"epoch": 0.51,
"learning_rate": 2.5454545454545456e-06,
"loss": 2.6085,
"step": 28
},
{
"epoch": 0.58,
"learning_rate": 2.9090909090909093e-06,
"loss": 2.5477,
"step": 32
},
{
"epoch": 0.65,
"learning_rate": 3.272727272727273e-06,
"loss": 2.4446,
"step": 36
},
{
"epoch": 0.73,
"learning_rate": 3.6363636363636366e-06,
"loss": 2.3109,
"step": 40
},
{
"epoch": 0.8,
"learning_rate": 4.000000000000001e-06,
"loss": 2.4149,
"step": 44
},
{
"epoch": 0.87,
"learning_rate": 4.363636363636364e-06,
"loss": 2.5514,
"step": 48
},
{
"epoch": 0.95,
"learning_rate": 4.727272727272728e-06,
"loss": 2.3816,
"step": 52
},
{
"epoch": 1.02,
"learning_rate": 5.090909090909091e-06,
"loss": 2.6293,
"step": 56
},
{
"epoch": 1.09,
"learning_rate": 5.4545454545454545e-06,
"loss": 2.2422,
"step": 60
},
{
"epoch": 1.16,
"learning_rate": 5.8181818181818185e-06,
"loss": 2.4031,
"step": 64
},
{
"epoch": 1.24,
"learning_rate": 6.181818181818182e-06,
"loss": 2.2303,
"step": 68
},
{
"epoch": 1.31,
"learning_rate": 6.545454545454546e-06,
"loss": 2.2847,
"step": 72
},
{
"epoch": 1.38,
"learning_rate": 6.90909090909091e-06,
"loss": 2.1578,
"step": 76
},
{
"epoch": 1.45,
"learning_rate": 7.272727272727273e-06,
"loss": 2.1774,
"step": 80
},
{
"epoch": 1.53,
"learning_rate": 7.636363636363638e-06,
"loss": 2.197,
"step": 84
},
{
"epoch": 1.6,
"learning_rate": 8.000000000000001e-06,
"loss": 2.2093,
"step": 88
},
{
"epoch": 1.67,
"learning_rate": 8.363636363636365e-06,
"loss": 2.1004,
"step": 92
},
{
"epoch": 1.75,
"learning_rate": 8.727272727272728e-06,
"loss": 2.0526,
"step": 96
},
{
"epoch": 1.82,
"learning_rate": 9.090909090909091e-06,
"loss": 2.0771,
"step": 100
},
{
"epoch": 1.89,
"learning_rate": 9.454545454545456e-06,
"loss": 2.0219,
"step": 104
},
{
"epoch": 1.96,
"learning_rate": 9.81818181818182e-06,
"loss": 1.9439,
"step": 108
},
{
"epoch": 2.04,
"learning_rate": 9.999899300364534e-06,
"loss": 1.9175,
"step": 112
},
{
"epoch": 2.11,
"learning_rate": 9.99909372761763e-06,
"loss": 1.8281,
"step": 116
},
{
"epoch": 2.18,
"learning_rate": 9.997482711915926e-06,
"loss": 1.9423,
"step": 120
},
{
"epoch": 2.25,
"learning_rate": 9.99506651282272e-06,
"loss": 1.9385,
"step": 124
},
{
"epoch": 2.33,
"learning_rate": 9.991845519630679e-06,
"loss": 2.0381,
"step": 128
},
{
"epoch": 2.4,
"learning_rate": 9.987820251299121e-06,
"loss": 2.0296,
"step": 132
},
{
"epoch": 2.47,
"learning_rate": 9.982991356370404e-06,
"loss": 1.9167,
"step": 136
},
{
"epoch": 2.55,
"learning_rate": 9.977359612865424e-06,
"loss": 1.9132,
"step": 140
},
{
"epoch": 2.62,
"learning_rate": 9.970925928158275e-06,
"loss": 1.8455,
"step": 144
},
{
"epoch": 2.69,
"learning_rate": 9.963691338830045e-06,
"loss": 1.8692,
"step": 148
},
{
"epoch": 2.76,
"learning_rate": 9.955657010501807e-06,
"loss": 1.8812,
"step": 152
},
{
"epoch": 2.84,
"learning_rate": 9.946824237646823e-06,
"loss": 1.9785,
"step": 156
},
{
"epoch": 2.91,
"learning_rate": 9.937194443381972e-06,
"loss": 1.9511,
"step": 160
},
{
"epoch": 2.98,
"learning_rate": 9.926769179238467e-06,
"loss": 1.8505,
"step": 164
},
{
"epoch": 3.05,
"learning_rate": 9.915550124911866e-06,
"loss": 1.9195,
"step": 168
},
{
"epoch": 3.13,
"learning_rate": 9.903539087991462e-06,
"loss": 1.8527,
"step": 172
},
{
"epoch": 3.2,
"learning_rate": 9.890738003669029e-06,
"loss": 1.7579,
"step": 176
},
{
"epoch": 3.27,
"learning_rate": 9.877148934427037e-06,
"loss": 1.9244,
"step": 180
},
{
"epoch": 3.35,
"learning_rate": 9.862774069706346e-06,
"loss": 1.8144,
"step": 184
},
{
"epoch": 3.42,
"learning_rate": 9.847615725553457e-06,
"loss": 1.8462,
"step": 188
},
{
"epoch": 3.49,
"learning_rate": 9.831676344247343e-06,
"loss": 1.7888,
"step": 192
},
{
"epoch": 3.56,
"learning_rate": 9.814958493905962e-06,
"loss": 1.9216,
"step": 196
},
{
"epoch": 3.64,
"learning_rate": 9.797464868072489e-06,
"loss": 1.7411,
"step": 200
},
{
"epoch": 3.71,
"learning_rate": 9.779198285281326e-06,
"loss": 1.7989,
"step": 204
},
{
"epoch": 3.78,
"learning_rate": 9.760161688604008e-06,
"loss": 1.8476,
"step": 208
},
{
"epoch": 3.85,
"learning_rate": 9.740358145174999e-06,
"loss": 1.7767,
"step": 212
},
{
"epoch": 3.93,
"learning_rate": 9.719790845697534e-06,
"loss": 1.7036,
"step": 216
},
{
"epoch": 4.0,
"learning_rate": 9.698463103929542e-06,
"loss": 1.8114,
"step": 220
},
{
"epoch": 4.0,
"gpt4_scores": 0.65,
"step": 220
},
{
"epoch": 4.0,
"std": 0.11335784048754634,
"step": 220
},
{
"epoch": 4.0,
"eval_loss": 1.8505451679229736,
"eval_runtime": 4.9659,
"eval_samples_per_second": 4.632,
"eval_steps_per_second": 1.208,
"step": 220
},
{
"epoch": 4.07,
"learning_rate": 9.676378356149733e-06,
"loss": 1.7635,
"step": 224
},
{
"epoch": 4.15,
"learning_rate": 9.653540160603956e-06,
"loss": 1.5819,
"step": 228
},
{
"epoch": 4.22,
"learning_rate": 9.629952196931902e-06,
"loss": 1.8023,
"step": 232
},
{
"epoch": 4.29,
"learning_rate": 9.60561826557425e-06,
"loss": 1.7447,
"step": 236
},
{
"epoch": 4.36,
"learning_rate": 9.580542287160348e-06,
"loss": 1.7547,
"step": 240
},
{
"epoch": 4.44,
"learning_rate": 9.554728301876525e-06,
"loss": 1.8657,
"step": 244
},
{
"epoch": 4.51,
"learning_rate": 9.528180468815155e-06,
"loss": 1.7641,
"step": 248
},
{
"epoch": 4.58,
"learning_rate": 9.50090306530454e-06,
"loss": 1.7088,
"step": 252
},
{
"epoch": 4.65,
"learning_rate": 9.47290048621977e-06,
"loss": 1.8404,
"step": 256
},
{
"epoch": 4.73,
"learning_rate": 9.444177243274619e-06,
"loss": 1.7781,
"step": 260
},
{
"epoch": 4.8,
"learning_rate": 9.414737964294636e-06,
"loss": 1.804,
"step": 264
},
{
"epoch": 4.87,
"learning_rate": 9.384587392471516e-06,
"loss": 1.8061,
"step": 268
},
{
"epoch": 4.95,
"learning_rate": 9.353730385598887e-06,
"loss": 1.6723,
"step": 272
},
{
"epoch": 5.0,
"gpt4_scores": 0.73,
"step": 275
},
{
"epoch": 5.0,
"std": 0.10959014554237986,
"step": 275
},
{
"epoch": 5.0,
"eval_loss": 1.8371697664260864,
"eval_runtime": 4.9576,
"eval_samples_per_second": 4.639,
"eval_steps_per_second": 1.21,
"step": 275
},
{
"epoch": 5.02,
"learning_rate": 9.322171915289635e-06,
"loss": 1.7486,
"step": 276
},
{
"epoch": 5.09,
"learning_rate": 9.289917066174887e-06,
"loss": 1.7675,
"step": 280
},
{
"epoch": 5.16,
"learning_rate": 9.256971035084786e-06,
"loss": 1.7188,
"step": 284
},
{
"epoch": 5.24,
"learning_rate": 9.223339130211194e-06,
"loss": 1.7628,
"step": 288
},
{
"epoch": 5.31,
"learning_rate": 9.189026770252437e-06,
"loss": 1.6931,
"step": 292
},
{
"epoch": 5.38,
"learning_rate": 9.154039483540273e-06,
"loss": 1.5731,
"step": 296
},
{
"epoch": 5.45,
"learning_rate": 9.118382907149164e-06,
"loss": 1.7763,
"step": 300
},
{
"epoch": 5.53,
"learning_rate": 9.08206278598805e-06,
"loss": 1.7601,
"step": 304
},
{
"epoch": 5.6,
"learning_rate": 9.045084971874738e-06,
"loss": 1.6893,
"step": 308
},
{
"epoch": 5.67,
"learning_rate": 9.007455422593077e-06,
"loss": 1.7848,
"step": 312
},
{
"epoch": 5.75,
"learning_rate": 8.969180200933048e-06,
"loss": 1.7517,
"step": 316
},
{
"epoch": 5.82,
"learning_rate": 8.930265473713939e-06,
"loss": 1.6602,
"step": 320
},
{
"epoch": 5.89,
"learning_rate": 8.890717510790763e-06,
"loss": 1.6003,
"step": 324
},
{
"epoch": 5.96,
"learning_rate": 8.850542684044078e-06,
"loss": 1.6532,
"step": 328
},
{
"epoch": 6.0,
"gpt4_scores": 0.65,
"step": 330
},
{
"epoch": 6.0,
"std": 0.1298075498574717,
"step": 330
},
{
"epoch": 6.0,
"eval_loss": 1.8295962810516357,
"eval_runtime": 4.9535,
"eval_samples_per_second": 4.643,
"eval_steps_per_second": 1.211,
"step": 330
},
{
"epoch": 6.04,
"learning_rate": 8.809747466353356e-06,
"loss": 1.597,
"step": 332
},
{
"epoch": 6.11,
"learning_rate": 8.768338430554083e-06,
"loss": 1.7247,
"step": 336
},
{
"epoch": 6.18,
"learning_rate": 8.726322248378775e-06,
"loss": 1.7433,
"step": 340
},
{
"epoch": 6.25,
"learning_rate": 8.683705689382025e-06,
"loss": 1.6082,
"step": 344
},
{
"epoch": 6.33,
"learning_rate": 8.640495619849821e-06,
"loss": 1.6855,
"step": 348
},
{
"epoch": 6.4,
"learning_rate": 8.596699001693257e-06,
"loss": 1.5826,
"step": 352
},
{
"epoch": 6.47,
"learning_rate": 8.552322891326846e-06,
"loss": 1.6749,
"step": 356
},
{
"epoch": 6.55,
"learning_rate": 8.507374438531606e-06,
"loss": 1.649,
"step": 360
},
{
"epoch": 6.62,
"learning_rate": 8.461860885303116e-06,
"loss": 1.6959,
"step": 364
},
{
"epoch": 6.69,
"learning_rate": 8.415789564684673e-06,
"loss": 1.6805,
"step": 368
},
{
"epoch": 6.76,
"learning_rate": 8.36916789958584e-06,
"loss": 1.67,
"step": 372
},
{
"epoch": 6.84,
"learning_rate": 8.322003401586463e-06,
"loss": 1.6226,
"step": 376
},
{
"epoch": 6.91,
"learning_rate": 8.274303669726427e-06,
"loss": 1.6525,
"step": 380
},
{
"epoch": 6.98,
"learning_rate": 8.226076389281316e-06,
"loss": 1.7187,
"step": 384
},
{
"epoch": 7.0,
"gpt4_scores": 0.74,
"step": 385
},
{
"epoch": 7.0,
"std": 0.11153474794878948,
"step": 385
},
{
"epoch": 7.0,
"eval_loss": 1.8273346424102783,
"eval_runtime": 4.923,
"eval_samples_per_second": 4.672,
"eval_steps_per_second": 1.219,
"step": 385
},
{
"epoch": 7.05,
"learning_rate": 8.177329330524182e-06,
"loss": 1.6103,
"step": 388
},
{
"epoch": 7.13,
"learning_rate": 8.128070347473609e-06,
"loss": 1.546,
"step": 392
},
{
"epoch": 7.2,
"learning_rate": 8.078307376628292e-06,
"loss": 1.551,
"step": 396
},
{
"epoch": 7.27,
"learning_rate": 8.028048435688333e-06,
"loss": 1.6379,
"step": 400
},
{
"epoch": 7.35,
"learning_rate": 7.97730162226344e-06,
"loss": 1.61,
"step": 404
},
{
"epoch": 7.42,
"learning_rate": 7.92607511256826e-06,
"loss": 1.6151,
"step": 408
},
{
"epoch": 7.49,
"learning_rate": 7.874377160105037e-06,
"loss": 1.6989,
"step": 412
},
{
"epoch": 7.56,
"learning_rate": 7.822216094333847e-06,
"loss": 1.6711,
"step": 416
},
{
"epoch": 7.64,
"learning_rate": 7.769600319330553e-06,
"loss": 1.6344,
"step": 420
},
{
"epoch": 7.71,
"learning_rate": 7.716538312432767e-06,
"loss": 1.6,
"step": 424
},
{
"epoch": 7.78,
"learning_rate": 7.663038622873999e-06,
"loss": 1.6563,
"step": 428
},
{
"epoch": 7.85,
"learning_rate": 7.60910987040623e-06,
"loss": 1.5933,
"step": 432
},
{
"epoch": 7.93,
"learning_rate": 7.554760743911104e-06,
"loss": 1.6445,
"step": 436
},
{
"epoch": 8.0,
"learning_rate": 7.500000000000001e-06,
"loss": 1.6945,
"step": 440
},
{
"epoch": 8.0,
"gpt4_scores": 0.71,
"step": 440
},
{
"epoch": 8.0,
"std": 0.10904127658827183,
"step": 440
},
{
"epoch": 8.0,
"eval_loss": 1.8345035314559937,
"eval_runtime": 4.9576,
"eval_samples_per_second": 4.639,
"eval_steps_per_second": 1.21,
"step": 440
},
{
"epoch": 8.07,
"learning_rate": 7.444836461603195e-06,
"loss": 1.6089,
"step": 444
},
{
"epoch": 8.15,
"learning_rate": 7.3892790165483164e-06,
"loss": 1.6294,
"step": 448
},
{
"epoch": 8.22,
"learning_rate": 7.333336616128369e-06,
"loss": 1.6516,
"step": 452
},
{
"epoch": 8.29,
"learning_rate": 7.2770182736595164e-06,
"loss": 1.4944,
"step": 456
},
{
"epoch": 8.36,
"learning_rate": 7.2203330630288714e-06,
"loss": 1.5007,
"step": 460
},
{
"epoch": 8.44,
"learning_rate": 7.163290117232542e-06,
"loss": 1.5207,
"step": 464
},
{
"epoch": 8.51,
"learning_rate": 7.105898626904134e-06,
"loss": 1.6485,
"step": 468
},
{
"epoch": 8.58,
"learning_rate": 7.048167838833977e-06,
"loss": 1.554,
"step": 472
},
{
"epoch": 8.65,
"learning_rate": 6.990107054479313e-06,
"loss": 1.5661,
"step": 476
},
{
"epoch": 8.73,
"learning_rate": 6.931725628465643e-06,
"loss": 1.496,
"step": 480
},
{
"epoch": 8.8,
"learning_rate": 6.873032967079562e-06,
"loss": 1.554,
"step": 484
},
{
"epoch": 8.87,
"learning_rate": 6.814038526753205e-06,
"loss": 1.574,
"step": 488
},
{
"epoch": 8.95,
"learning_rate": 6.75475181254068e-06,
"loss": 1.5494,
"step": 492
},
{
"epoch": 9.0,
"gpt4_scores": 0.5700000000000001,
"step": 495
},
{
"epoch": 9.0,
"std": 0.13787675656179324,
"step": 495
},
{
"epoch": 9.0,
"eval_loss": 1.8451542854309082,
"eval_runtime": 4.9243,
"eval_samples_per_second": 4.671,
"eval_steps_per_second": 1.218,
"step": 495
},
{
"epoch": 9.02,
"learning_rate": 6.695182376586603e-06,
"loss": 1.6572,
"step": 496
},
{
"epoch": 9.09,
"learning_rate": 6.635339816587109e-06,
"loss": 1.5122,
"step": 500
},
{
"epoch": 9.16,
"learning_rate": 6.5752337742434644e-06,
"loss": 1.4632,
"step": 504
},
{
"epoch": 9.24,
"learning_rate": 6.514873933708637e-06,
"loss": 1.4757,
"step": 508
},
{
"epoch": 9.31,
"learning_rate": 6.454270020026996e-06,
"loss": 1.5465,
"step": 512
},
{
"epoch": 9.38,
"learning_rate": 6.39343179756744e-06,
"loss": 1.5713,
"step": 516
},
{
"epoch": 9.45,
"learning_rate": 6.332369068450175e-06,
"loss": 1.5246,
"step": 520
},
{
"epoch": 9.53,
"learning_rate": 6.271091670967437e-06,
"loss": 1.478,
"step": 524
},
{
"epoch": 9.6,
"learning_rate": 6.209609477998339e-06,
"loss": 1.5593,
"step": 528
},
{
"epoch": 9.67,
"learning_rate": 6.1479323954182055e-06,
"loss": 1.5553,
"step": 532
},
{
"epoch": 9.75,
"learning_rate": 6.08607036050254e-06,
"loss": 1.5434,
"step": 536
},
{
"epoch": 9.82,
"learning_rate": 6.024033340325954e-06,
"loss": 1.4686,
"step": 540
},
{
"epoch": 9.89,
"learning_rate": 5.961831330156306e-06,
"loss": 1.4344,
"step": 544
},
{
"epoch": 9.96,
"learning_rate": 5.89947435184427e-06,
"loss": 1.5329,
"step": 548
},
{
"epoch": 10.0,
"gpt4_scores": 0.6,
"step": 550
},
{
"epoch": 10.0,
"std": 0.14352700094407322,
"step": 550
},
{
"epoch": 10.0,
"eval_loss": 1.8664953708648682,
"eval_runtime": 4.9654,
"eval_samples_per_second": 4.632,
"eval_steps_per_second": 1.208,
"step": 550
},
{
"epoch": 10.04,
"learning_rate": 5.8369724522086545e-06,
"loss": 1.5753,
"step": 552
},
{
"epoch": 10.11,
"learning_rate": 5.774335701417662e-06,
"loss": 1.4623,
"step": 556
},
{
"epoch": 10.18,
"learning_rate": 5.711574191366427e-06,
"loss": 1.4908,
"step": 560
},
{
"epoch": 10.25,
"learning_rate": 5.648698034051009e-06,
"loss": 1.4846,
"step": 564
},
{
"epoch": 10.33,
"learning_rate": 5.585717359939192e-06,
"loss": 1.4591,
"step": 568
},
{
"epoch": 10.4,
"learning_rate": 5.522642316338268e-06,
"loss": 1.4508,
"step": 572
},
{
"epoch": 10.47,
"learning_rate": 5.459483065760138e-06,
"loss": 1.4282,
"step": 576
},
{
"epoch": 10.55,
"learning_rate": 5.396249784283943e-06,
"loss": 1.4565,
"step": 580
},
{
"epoch": 10.62,
"learning_rate": 5.33295265991652e-06,
"loss": 1.5174,
"step": 584
},
{
"epoch": 10.69,
"learning_rate": 5.26960189095093e-06,
"loss": 1.4063,
"step": 588
},
{
"epoch": 10.76,
"learning_rate": 5.206207684323337e-06,
"loss": 1.4412,
"step": 592
},
{
"epoch": 10.84,
"learning_rate": 5.142780253968481e-06,
"loss": 1.5706,
"step": 596
},
{
"epoch": 10.91,
"learning_rate": 5.07932981917404e-06,
"loss": 1.4704,
"step": 600
},
{
"epoch": 10.98,
"learning_rate": 5.015866602934112e-06,
"loss": 1.4105,
"step": 604
},
{
"epoch": 11.0,
"gpt4_scores": 0.5700000000000001,
"step": 605
},
{
"epoch": 11.0,
"std": 0.12653062870309306,
"step": 605
},
{
"epoch": 11.0,
"eval_loss": 1.8876867294311523,
"eval_runtime": 4.9571,
"eval_samples_per_second": 4.64,
"eval_steps_per_second": 1.21,
"step": 605
},
{
"epoch": 11.05,
"learning_rate": 4.952400830302117e-06,
"loss": 1.3259,
"step": 608
},
{
"epoch": 11.13,
"learning_rate": 4.888942726743353e-06,
"loss": 1.371,
"step": 612
},
{
"epoch": 11.2,
"learning_rate": 4.825502516487497e-06,
"loss": 1.4825,
"step": 616
},
{
"epoch": 11.27,
"learning_rate": 4.762090420881289e-06,
"loss": 1.3511,
"step": 620
},
{
"epoch": 11.35,
"learning_rate": 4.6987166567417085e-06,
"loss": 1.4314,
"step": 624
},
{
"epoch": 11.42,
"learning_rate": 4.635391434709847e-06,
"loss": 1.4736,
"step": 628
},
{
"epoch": 11.49,
"learning_rate": 4.572124957605803e-06,
"loss": 1.4653,
"step": 632
},
{
"epoch": 11.56,
"learning_rate": 4.5089274187848144e-06,
"loss": 1.4053,
"step": 636
},
{
"epoch": 11.64,
"learning_rate": 4.445809000494945e-06,
"loss": 1.4959,
"step": 640
},
{
"epoch": 11.71,
"learning_rate": 4.382779872236527e-06,
"loss": 1.4142,
"step": 644
},
{
"epoch": 11.78,
"learning_rate": 4.319850189123681e-06,
"loss": 1.3764,
"step": 648
},
{
"epoch": 11.85,
"learning_rate": 4.257030090248142e-06,
"loss": 1.5262,
"step": 652
},
{
"epoch": 11.93,
"learning_rate": 4.194329697045681e-06,
"loss": 1.4114,
"step": 656
},
{
"epoch": 12.0,
"learning_rate": 4.131759111665349e-06,
"loss": 1.3862,
"step": 660
},
{
"epoch": 12.0,
"gpt4_scores": 0.64,
"step": 660
},
{
"epoch": 12.0,
"std": 0.13505554412907303,
"step": 660
},
{
"epoch": 12.0,
"eval_loss": 1.9065581560134888,
"eval_runtime": 4.9613,
"eval_samples_per_second": 4.636,
"eval_steps_per_second": 1.209,
"step": 660
},
{
"epoch": 12.07,
"learning_rate": 4.06932841534185e-06,
"loss": 1.4282,
"step": 664
},
{
"epoch": 12.15,
"learning_rate": 4.007047666771274e-06,
"loss": 1.3369,
"step": 668
},
{
"epoch": 12.22,
"learning_rate": 3.944926900490452e-06,
"loss": 1.4198,
"step": 672
},
{
"epoch": 12.29,
"learning_rate": 3.882976125260229e-06,
"loss": 1.3799,
"step": 676
},
{
"epoch": 12.36,
"learning_rate": 3.821205322452863e-06,
"loss": 1.4461,
"step": 680
},
{
"epoch": 12.44,
"learning_rate": 3.7596244444438577e-06,
"loss": 1.4154,
"step": 684
},
{
"epoch": 12.51,
"learning_rate": 3.69824341300844e-06,
"loss": 1.3719,
"step": 688
},
{
"epoch": 12.58,
"learning_rate": 3.637072117723012e-06,
"loss": 1.3079,
"step": 692
},
{
"epoch": 12.65,
"learning_rate": 3.5761204143717387e-06,
"loss": 1.3786,
"step": 696
},
{
"epoch": 12.73,
"learning_rate": 3.5153981233586277e-06,
"loss": 1.3406,
"step": 700
},
{
"epoch": 12.8,
"learning_rate": 3.4549150281252635e-06,
"loss": 1.3967,
"step": 704
},
{
"epoch": 12.87,
"learning_rate": 3.394680873574546e-06,
"loss": 1.4914,
"step": 708
},
{
"epoch": 12.95,
"learning_rate": 3.3347053645005965e-06,
"loss": 1.4126,
"step": 712
},
{
"epoch": 13.0,
"gpt4_scores": 0.53,
"step": 715
},
{
"epoch": 13.0,
"std": 0.14356183336806477,
"step": 715
},
{
"epoch": 13.0,
"eval_loss": 1.9303113222122192,
"eval_runtime": 4.9701,
"eval_samples_per_second": 4.628,
"eval_steps_per_second": 1.207,
"step": 715
},
{
"epoch": 13.02,
"learning_rate": 3.274998164025148e-06,
"loss": 1.4012,
"step": 716
},
{
"epoch": 13.09,
"learning_rate": 3.2155688920406415e-06,
"loss": 1.4258,
"step": 720
},
{
"epoch": 13.16,
"learning_rate": 3.156427123660297e-06,
"loss": 1.3023,
"step": 724
},
{
"epoch": 13.24,
"learning_rate": 3.097582387675385e-06,
"loss": 1.3797,
"step": 728
},
{
"epoch": 13.31,
"learning_rate": 3.0390441650199727e-06,
"loss": 1.335,
"step": 732
},
{
"epoch": 13.38,
"learning_rate": 2.980821887243377e-06,
"loss": 1.388,
"step": 736
},
{
"epoch": 13.45,
"learning_rate": 2.9229249349905686e-06,
"loss": 1.2886,
"step": 740
},
{
"epoch": 13.53,
"learning_rate": 2.8653626364907918e-06,
"loss": 1.3144,
"step": 744
},
{
"epoch": 13.6,
"learning_rate": 2.8081442660546126e-06,
"loss": 1.3285,
"step": 748
},
{
"epoch": 13.67,
"learning_rate": 2.751279042579672e-06,
"loss": 1.4061,
"step": 752
},
{
"epoch": 13.75,
"learning_rate": 2.694776128065345e-06,
"loss": 1.3877,
"step": 756
},
{
"epoch": 13.82,
"learning_rate": 2.6386446261365874e-06,
"loss": 1.2318,
"step": 760
},
{
"epoch": 13.89,
"learning_rate": 2.5828935805771804e-06,
"loss": 1.3826,
"step": 764
},
{
"epoch": 13.96,
"learning_rate": 2.527531973872617e-06,
"loss": 1.388,
"step": 768
},
{
"epoch": 14.0,
"gpt4_scores": 0.62,
"step": 770
},
{
"epoch": 14.0,
"std": 0.1362350909274112,
"step": 770
},
{
"epoch": 14.0,
"eval_loss": 1.9448539018630981,
"eval_runtime": 4.9425,
"eval_samples_per_second": 4.654,
"eval_steps_per_second": 1.214,
"step": 770
},
{
"epoch": 14.04,
"learning_rate": 2.4725687257628533e-06,
"loss": 1.524,
"step": 772
},
{
"epoch": 14.11,
"learning_rate": 2.418012691805191e-06,
"loss": 1.3961,
"step": 776
},
{
"epoch": 14.18,
"learning_rate": 2.363872661947488e-06,
"loss": 1.3303,
"step": 780
},
{
"epoch": 14.25,
"learning_rate": 2.310157359111938e-06,
"loss": 1.3642,
"step": 784
},
{
"epoch": 14.33,
"learning_rate": 2.2568754377896516e-06,
"loss": 1.3532,
"step": 788
},
{
"epoch": 14.4,
"learning_rate": 2.204035482646267e-06,
"loss": 1.2528,
"step": 792
},
{
"epoch": 14.47,
"learning_rate": 2.1516460071388062e-06,
"loss": 1.3888,
"step": 796
},
{
"epoch": 14.55,
"learning_rate": 2.09971545214401e-06,
"loss": 1.269,
"step": 800
},
{
"epoch": 14.62,
"learning_rate": 2.0482521845983522e-06,
"loss": 1.3103,
"step": 804
},
{
"epoch": 14.69,
"learning_rate": 1.9972644961499853e-06,
"loss": 1.3151,
"step": 808
},
{
"epoch": 14.76,
"learning_rate": 1.946760601822809e-06,
"loss": 1.3055,
"step": 812
},
{
"epoch": 14.84,
"learning_rate": 1.8967486386928819e-06,
"loss": 1.3414,
"step": 816
},
{
"epoch": 14.91,
"learning_rate": 1.8472366645773892e-06,
"loss": 1.3481,
"step": 820
},
{
"epoch": 14.98,
"learning_rate": 1.798232656736389e-06,
"loss": 1.3653,
"step": 824
},
{
"epoch": 15.0,
"gpt4_scores": 0.605,
"step": 825
},
{
"epoch": 15.0,
"std": 0.13312588027877975,
"step": 825
},
{
"epoch": 15.0,
"eval_loss": 1.9636751413345337,
"eval_runtime": 4.9484,
"eval_samples_per_second": 4.648,
"eval_steps_per_second": 1.213,
"step": 825
},
{
"epoch": 15.05,
"learning_rate": 1.7497445105875377e-06,
"loss": 1.4389,
"step": 828
},
{
"epoch": 15.13,
"learning_rate": 1.7017800384339928e-06,
"loss": 1.3568,
"step": 832
},
{
"epoch": 15.2,
"learning_rate": 1.6543469682057105e-06,
"loss": 1.3535,
"step": 836
},
{
"epoch": 15.27,
"learning_rate": 1.6074529422143398e-06,
"loss": 1.2696,
"step": 840
},
{
"epoch": 15.35,
"learning_rate": 1.561105515921915e-06,
"loss": 1.2854,
"step": 844
},
{
"epoch": 15.42,
"learning_rate": 1.5153121567235334e-06,
"loss": 1.2779,
"step": 848
},
{
"epoch": 15.49,
"learning_rate": 1.470080242744218e-06,
"loss": 1.3024,
"step": 852
},
{
"epoch": 15.56,
"learning_rate": 1.4254170616501828e-06,
"loss": 1.3081,
"step": 856
},
{
"epoch": 15.64,
"learning_rate": 1.3813298094746491e-06,
"loss": 1.2898,
"step": 860
},
{
"epoch": 15.71,
"learning_rate": 1.3378255894584463e-06,
"loss": 1.2944,
"step": 864
},
{
"epoch": 15.78,
"learning_rate": 1.2949114109055417e-06,
"loss": 1.3638,
"step": 868
},
{
"epoch": 15.85,
"learning_rate": 1.2525941880537307e-06,
"loss": 1.3272,
"step": 872
},
{
"epoch": 15.93,
"learning_rate": 1.210880738960616e-06,
"loss": 1.2646,
"step": 876
},
{
"epoch": 16.0,
"learning_rate": 1.1697777844051105e-06,
"loss": 1.361,
"step": 880
},
{
"epoch": 16.0,
"gpt4_scores": 0.5999999999999999,
"step": 880
},
{
"epoch": 16.0,
"std": 0.1319090595827292,
"step": 880
},
{
"epoch": 16.0,
"eval_loss": 1.9738112688064575,
"eval_runtime": 4.9552,
"eval_samples_per_second": 4.642,
"eval_steps_per_second": 1.211,
"step": 880
},
{
"epoch": 16.07,
"learning_rate": 1.1292919468045876e-06,
"loss": 1.2656,
"step": 884
},
{
"epoch": 16.15,
"learning_rate": 1.0894297491479044e-06,
"loss": 1.376,
"step": 888
},
{
"epoch": 16.22,
"learning_rate": 1.0501976139444191e-06,
"loss": 1.2771,
"step": 892
},
{
"epoch": 16.29,
"learning_rate": 1.0116018621892237e-06,
"loss": 1.2919,
"step": 896
},
{
"epoch": 16.36,
"learning_rate": 9.73648712344707e-07,
"loss": 1.3542,
"step": 900
},
{
"epoch": 16.44,
"learning_rate": 9.363442793386606e-07,
"loss": 1.275,
"step": 904
},
{
"epoch": 16.51,
"learning_rate": 8.996945735790447e-07,
"loss": 1.3197,
"step": 908
},
{
"epoch": 16.58,
"learning_rate": 8.637054999856148e-07,
"loss": 1.3272,
"step": 912
},
{
"epoch": 16.65,
"learning_rate": 8.283828570385239e-07,
"loss": 1.2585,
"step": 916
},
{
"epoch": 16.73,
"learning_rate": 7.937323358440935e-07,
"loss": 1.2851,
"step": 920
},
{
"epoch": 16.8,
"learning_rate": 7.597595192178702e-07,
"loss": 1.2787,
"step": 924
},
{
"epoch": 16.87,
"learning_rate": 7.264698807851328e-07,
"loss": 1.3594,
"step": 928
},
{
"epoch": 16.95,
"learning_rate": 6.938687840989972e-07,
"loss": 1.2944,
"step": 932
},
{
"epoch": 17.0,
"gpt4_scores": 0.6100000000000001,
"step": 935
},
{
"epoch": 17.0,
"std": 0.1352405264704334,
"step": 935
},
{
"epoch": 17.0,
"eval_loss": 1.9819345474243164,
"eval_runtime": 4.9634,
"eval_samples_per_second": 4.634,
"eval_steps_per_second": 1.209,
"step": 935
},
{
"epoch": 17.02,
"learning_rate": 6.619614817762537e-07,
"loss": 1.3019,
"step": 936
},
{
"epoch": 17.09,
"learning_rate": 6.307531146510754e-07,
"loss": 1.2263,
"step": 940
},
{
"epoch": 17.16,
"learning_rate": 6.002487109467347e-07,
"loss": 1.2774,
"step": 944
},
{
"epoch": 17.24,
"learning_rate": 5.704531854654721e-07,
"loss": 1.3117,
"step": 948
},
{
"epoch": 17.31,
"learning_rate": 5.413713387966329e-07,
"loss": 1.3669,
"step": 952
},
{
"epoch": 17.38,
"learning_rate": 5.130078565432089e-07,
"loss": 1.3741,
"step": 956
},
{
"epoch": 17.45,
"learning_rate": 4.853673085668947e-07,
"loss": 1.259,
"step": 960
},
{
"epoch": 17.53,
"learning_rate": 4.58454148251814e-07,
"loss": 1.2952,
"step": 964
},
{
"epoch": 17.6,
"learning_rate": 4.322727117869951e-07,
"loss": 1.2957,
"step": 968
},
{
"epoch": 17.67,
"learning_rate": 4.0682721746773346e-07,
"loss": 1.2796,
"step": 972
},
{
"epoch": 17.75,
"learning_rate": 3.821217650159453e-07,
"loss": 1.3251,
"step": 976
},
{
"epoch": 17.82,
"learning_rate": 3.581603349196372e-07,
"loss": 1.2795,
"step": 980
},
{
"epoch": 17.89,
"learning_rate": 3.3494678779157464e-07,
"loss": 1.2337,
"step": 984
},
{
"epoch": 17.96,
"learning_rate": 3.1248486374726884e-07,
"loss": 1.3433,
"step": 988
},
{
"epoch": 18.0,
"gpt4_scores": 0.5599999999999999,
"step": 990
},
{
"epoch": 18.0,
"std": 0.14573949361789343,
"step": 990
},
{
"epoch": 18.0,
"eval_loss": 1.9856066703796387,
"eval_runtime": 4.9567,
"eval_samples_per_second": 4.64,
"eval_steps_per_second": 1.21,
"step": 990
},
{
"epoch": 18.04,
"learning_rate": 2.9077818180237693e-07,
"loss": 1.2889,
"step": 992
},
{
"epoch": 18.11,
"learning_rate": 2.6983023928961406e-07,
"loss": 1.3571,
"step": 996
},
{
"epoch": 18.18,
"learning_rate": 2.4964441129527337e-07,
"loss": 1.2855,
"step": 1000
},
{
"epoch": 18.25,
"learning_rate": 2.3022395011543687e-07,
"loss": 1.2844,
"step": 1004
},
{
"epoch": 18.33,
"learning_rate": 2.1157198473197417e-07,
"loss": 1.3131,
"step": 1008
},
{
"epoch": 18.4,
"learning_rate": 1.9369152030840553e-07,
"loss": 1.2825,
"step": 1012
},
{
"epoch": 18.47,
"learning_rate": 1.765854377057219e-07,
"loss": 1.2378,
"step": 1016
},
{
"epoch": 18.55,
"learning_rate": 1.6025649301821877e-07,
"loss": 1.3067,
"step": 1020
},
{
"epoch": 18.62,
"learning_rate": 1.4470731712944885e-07,
"loss": 1.367,
"step": 1024
},
{
"epoch": 18.69,
"learning_rate": 1.2994041528833267e-07,
"loss": 1.2553,
"step": 1028
},
{
"epoch": 18.76,
"learning_rate": 1.1595816670552429e-07,
"loss": 1.392,
"step": 1032
},
{
"epoch": 18.84,
"learning_rate": 1.0276282417007399e-07,
"loss": 1.2524,
"step": 1036
},
{
"epoch": 18.91,
"learning_rate": 9.035651368646647e-08,
"loss": 1.2067,
"step": 1040
},
{
"epoch": 18.98,
"learning_rate": 7.874123413208145e-08,
"loss": 1.2058,
"step": 1044
},
{
"epoch": 19.0,
"gpt4_scores": 0.5399999999999999,
"step": 1045
},
{
"epoch": 19.0,
"std": 0.1422673539502299,
"step": 1045
},
{
"epoch": 19.0,
"eval_loss": 1.9871299266815186,
"eval_runtime": 4.9631,
"eval_samples_per_second": 4.634,
"eval_steps_per_second": 1.209,
"step": 1045
},
{
"epoch": 19.05,
"learning_rate": 6.791885693514134e-08,
"loss": 1.2248,
"step": 1048
},
{
"epoch": 19.13,
"learning_rate": 5.7891125773187896e-08,
"loss": 1.3235,
"step": 1052
},
{
"epoch": 19.2,
"learning_rate": 4.865965629214819e-08,
"loss": 1.337,
"step": 1056
},
{
"epoch": 19.27,
"learning_rate": 4.02259358460233e-08,
"loss": 1.2373,
"step": 1060
},
{
"epoch": 19.35,
"learning_rate": 3.25913232572489e-08,
"loss": 1.2713,
"step": 1064
},
{
"epoch": 19.42,
"learning_rate": 2.57570485977654e-08,
"loss": 1.2856,
"step": 1068
},
{
"epoch": 19.49,
"learning_rate": 1.9724212990830938e-08,
"loss": 1.3416,
"step": 1072
},
{
"epoch": 19.56,
"learning_rate": 1.449378843361271e-08,
"loss": 1.2418,
"step": 1076
},
{
"epoch": 19.64,
"learning_rate": 1.006661764057837e-08,
"loss": 1.2665,
"step": 1080
},
{
"epoch": 19.71,
"learning_rate": 6.4434139077201865e-09,
"loss": 1.2837,
"step": 1084
},
{
"epoch": 19.78,
"learning_rate": 3.6247609976319818e-09,
"loss": 1.2405,
"step": 1088
},
{
"epoch": 19.85,
"learning_rate": 1.61111304545436e-09,
"loss": 1.3201,
"step": 1092
},
{
"epoch": 19.93,
"learning_rate": 4.027944857032395e-10,
"loss": 1.37,
"step": 1096
},
{
"epoch": 20.0,
"learning_rate": 0.0,
"loss": 1.2904,
"step": 1100
},
{
"epoch": 20.0,
"gpt4_scores": 0.59,
"step": 1100
},
{
"epoch": 20.0,
"std": 0.130728726758888,
"step": 1100
},
{
"epoch": 20.0,
"eval_loss": 1.987522840499878,
"eval_runtime": 4.9557,
"eval_samples_per_second": 4.641,
"eval_steps_per_second": 1.211,
"step": 1100
},
{
"epoch": 20.0,
"step": 1100,
"total_flos": 3.792475205866291e+16,
"train_loss": 1.2573659207604149,
"train_runtime": 10567.2291,
"train_samples_per_second": 0.411,
"train_steps_per_second": 0.104
}
],
"logging_steps": 4,
"max_steps": 1100,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 55,
"total_flos": 3.792475205866291e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}