0x-YuAN's picture
Upload folder using huggingface_hub
f35c203
{
"best_metric": 1.4371482133865356,
"best_model_checkpoint": "saves/ChineseLLaMA2-7B-Chat/lora/2023-09-07-12-02-29/checkpoint-1000",
"epoch": 1.083717149823896,
"eval_steps": 100,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 0.0009999919374161553,
"loss": 2.0025,
"step": 5
},
{
"epoch": 0.01,
"learning_rate": 0.0009999677499246417,
"loss": 1.7737,
"step": 10
},
{
"epoch": 0.02,
"learning_rate": 0.0009999274383055143,
"loss": 1.7391,
"step": 15
},
{
"epoch": 0.02,
"learning_rate": 0.0009998710038588363,
"loss": 1.7959,
"step": 20
},
{
"epoch": 0.03,
"learning_rate": 0.0009997984484046375,
"loss": 1.713,
"step": 25
},
{
"epoch": 0.03,
"learning_rate": 0.0009997097742828556,
"loss": 1.6441,
"step": 30
},
{
"epoch": 0.04,
"learning_rate": 0.0009996049843532607,
"loss": 1.704,
"step": 35
},
{
"epoch": 0.04,
"learning_rate": 0.0009994840819953633,
"loss": 1.6532,
"step": 40
},
{
"epoch": 0.05,
"learning_rate": 0.0009993470711083048,
"loss": 1.6791,
"step": 45
},
{
"epoch": 0.05,
"learning_rate": 0.0009991939561107325,
"loss": 1.6465,
"step": 50
},
{
"epoch": 0.06,
"learning_rate": 0.000999024741940656,
"loss": 1.6511,
"step": 55
},
{
"epoch": 0.07,
"learning_rate": 0.0009988394340552898,
"loss": 1.6727,
"step": 60
},
{
"epoch": 0.07,
"learning_rate": 0.0009986380384308746,
"loss": 1.6653,
"step": 65
},
{
"epoch": 0.08,
"learning_rate": 0.0009984205615624873,
"loss": 1.6339,
"step": 70
},
{
"epoch": 0.08,
"learning_rate": 0.0009981870104638294,
"loss": 1.5562,
"step": 75
},
{
"epoch": 0.09,
"learning_rate": 0.0009979373926670028,
"loss": 1.6291,
"step": 80
},
{
"epoch": 0.09,
"learning_rate": 0.0009976717162222645,
"loss": 1.625,
"step": 85
},
{
"epoch": 0.1,
"learning_rate": 0.0009973899896977695,
"loss": 1.6008,
"step": 90
},
{
"epoch": 0.1,
"learning_rate": 0.000997092222179292,
"loss": 1.6821,
"step": 95
},
{
"epoch": 0.11,
"learning_rate": 0.0009967784232699352,
"loss": 1.582,
"step": 100
},
{
"epoch": 0.11,
"eval_loss": 1.6186352968215942,
"eval_runtime": 10.6735,
"eval_samples_per_second": 14.054,
"eval_steps_per_second": 1.78,
"step": 100
},
{
"epoch": 0.11,
"learning_rate": 0.0009964486030898186,
"loss": 1.5769,
"step": 105
},
{
"epoch": 0.12,
"learning_rate": 0.0009961027722757538,
"loss": 1.5868,
"step": 110
},
{
"epoch": 0.12,
"learning_rate": 0.0009957409419809006,
"loss": 1.5601,
"step": 115
},
{
"epoch": 0.13,
"learning_rate": 0.000995363123874407,
"loss": 1.6061,
"step": 120
},
{
"epoch": 0.14,
"learning_rate": 0.0009949693301410341,
"loss": 1.6073,
"step": 125
},
{
"epoch": 0.14,
"learning_rate": 0.0009945595734807615,
"loss": 1.4998,
"step": 130
},
{
"epoch": 0.15,
"learning_rate": 0.0009941338671083794,
"loss": 1.5295,
"step": 135
},
{
"epoch": 0.15,
"learning_rate": 0.0009936922247530606,
"loss": 1.5418,
"step": 140
},
{
"epoch": 0.16,
"learning_rate": 0.0009932346606579192,
"loss": 1.554,
"step": 145
},
{
"epoch": 0.16,
"learning_rate": 0.0009927611895795513,
"loss": 1.5509,
"step": 150
},
{
"epoch": 0.17,
"learning_rate": 0.0009922718267875571,
"loss": 1.6123,
"step": 155
},
{
"epoch": 0.17,
"learning_rate": 0.0009917665880640515,
"loss": 1.6267,
"step": 160
},
{
"epoch": 0.18,
"learning_rate": 0.0009912454897031524,
"loss": 1.6116,
"step": 165
},
{
"epoch": 0.18,
"learning_rate": 0.0009907085485104568,
"loss": 1.5618,
"step": 170
},
{
"epoch": 0.19,
"learning_rate": 0.0009901557818024981,
"loss": 1.6085,
"step": 175
},
{
"epoch": 0.2,
"learning_rate": 0.0009895872074061885,
"loss": 1.5829,
"step": 180
},
{
"epoch": 0.2,
"learning_rate": 0.0009890028436582426,
"loss": 1.5407,
"step": 185
},
{
"epoch": 0.21,
"learning_rate": 0.0009884027094045871,
"loss": 1.5568,
"step": 190
},
{
"epoch": 0.21,
"learning_rate": 0.0009877868239997532,
"loss": 1.5831,
"step": 195
},
{
"epoch": 0.22,
"learning_rate": 0.0009871552073062516,
"loss": 1.5231,
"step": 200
},
{
"epoch": 0.22,
"eval_loss": 1.5717933177947998,
"eval_runtime": 10.6708,
"eval_samples_per_second": 14.057,
"eval_steps_per_second": 1.781,
"step": 200
},
{
"epoch": 0.22,
"learning_rate": 0.0009865078796939327,
"loss": 1.5467,
"step": 205
},
{
"epoch": 0.23,
"learning_rate": 0.000985844862039329,
"loss": 1.6403,
"step": 210
},
{
"epoch": 0.23,
"learning_rate": 0.0009851661757249823,
"loss": 1.5352,
"step": 215
},
{
"epoch": 0.24,
"learning_rate": 0.0009844718426387537,
"loss": 1.5616,
"step": 220
},
{
"epoch": 0.24,
"learning_rate": 0.000983761885173118,
"loss": 1.5274,
"step": 225
},
{
"epoch": 0.25,
"learning_rate": 0.000983036326224442,
"loss": 1.6153,
"step": 230
},
{
"epoch": 0.25,
"learning_rate": 0.0009822951891922448,
"loss": 1.5062,
"step": 235
},
{
"epoch": 0.26,
"learning_rate": 0.0009815384979784444,
"loss": 1.6038,
"step": 240
},
{
"epoch": 0.27,
"learning_rate": 0.000980766276986586,
"loss": 1.5097,
"step": 245
},
{
"epoch": 0.27,
"learning_rate": 0.0009799785511210557,
"loss": 1.535,
"step": 250
},
{
"epoch": 0.28,
"learning_rate": 0.000979175345786277,
"loss": 1.52,
"step": 255
},
{
"epoch": 0.28,
"learning_rate": 0.0009783566868858912,
"loss": 1.5678,
"step": 260
},
{
"epoch": 0.29,
"learning_rate": 0.0009775226008219224,
"loss": 1.5536,
"step": 265
},
{
"epoch": 0.29,
"learning_rate": 0.0009766731144939258,
"loss": 1.4826,
"step": 270
},
{
"epoch": 0.3,
"learning_rate": 0.0009758082552981204,
"loss": 1.5537,
"step": 275
},
{
"epoch": 0.3,
"learning_rate": 0.0009749280511265056,
"loss": 1.5277,
"step": 280
},
{
"epoch": 0.31,
"learning_rate": 0.0009740325303659609,
"loss": 1.5445,
"step": 285
},
{
"epoch": 0.31,
"learning_rate": 0.000973121721897331,
"loss": 1.4944,
"step": 290
},
{
"epoch": 0.32,
"learning_rate": 0.0009721956550944948,
"loss": 1.5088,
"step": 295
},
{
"epoch": 0.33,
"learning_rate": 0.0009712543598234172,
"loss": 1.585,
"step": 300
},
{
"epoch": 0.33,
"eval_loss": 1.5345921516418457,
"eval_runtime": 10.6704,
"eval_samples_per_second": 14.058,
"eval_steps_per_second": 1.781,
"step": 300
},
{
"epoch": 0.33,
"learning_rate": 0.0009702978664411863,
"loss": 1.5427,
"step": 305
},
{
"epoch": 0.34,
"learning_rate": 0.0009693262057950345,
"loss": 1.4475,
"step": 310
},
{
"epoch": 0.34,
"learning_rate": 0.0009683394092213436,
"loss": 1.5321,
"step": 315
},
{
"epoch": 0.35,
"learning_rate": 0.0009673375085446339,
"loss": 1.5171,
"step": 320
},
{
"epoch": 0.35,
"learning_rate": 0.0009663205360765382,
"loss": 1.5198,
"step": 325
},
{
"epoch": 0.36,
"learning_rate": 0.00096528852461476,
"loss": 1.492,
"step": 330
},
{
"epoch": 0.36,
"learning_rate": 0.0009642415074420146,
"loss": 1.5036,
"step": 335
},
{
"epoch": 0.37,
"learning_rate": 0.0009631795183249573,
"loss": 1.5134,
"step": 340
},
{
"epoch": 0.37,
"learning_rate": 0.0009621025915130932,
"loss": 1.5568,
"step": 345
},
{
"epoch": 0.38,
"learning_rate": 0.0009610107617376733,
"loss": 1.503,
"step": 350
},
{
"epoch": 0.38,
"learning_rate": 0.0009599040642105736,
"loss": 1.4584,
"step": 355
},
{
"epoch": 0.39,
"learning_rate": 0.000958782534623161,
"loss": 1.4832,
"step": 360
},
{
"epoch": 0.4,
"learning_rate": 0.0009576462091451406,
"loss": 1.4598,
"step": 365
},
{
"epoch": 0.4,
"learning_rate": 0.0009564951244233901,
"loss": 1.5492,
"step": 370
},
{
"epoch": 0.41,
"learning_rate": 0.000955329317580778,
"loss": 1.5145,
"step": 375
},
{
"epoch": 0.41,
"learning_rate": 0.0009541488262149661,
"loss": 1.589,
"step": 380
},
{
"epoch": 0.42,
"learning_rate": 0.0009529536883971963,
"loss": 1.6003,
"step": 385
},
{
"epoch": 0.42,
"learning_rate": 0.0009517439426710646,
"loss": 1.55,
"step": 390
},
{
"epoch": 0.43,
"learning_rate": 0.0009505196280512762,
"loss": 1.5359,
"step": 395
},
{
"epoch": 0.43,
"learning_rate": 0.0009492807840223881,
"loss": 1.4854,
"step": 400
},
{
"epoch": 0.43,
"eval_loss": 1.5193477869033813,
"eval_runtime": 10.6722,
"eval_samples_per_second": 14.055,
"eval_steps_per_second": 1.78,
"step": 400
},
{
"epoch": 0.44,
"learning_rate": 0.0009480274505375358,
"loss": 1.4891,
"step": 405
},
{
"epoch": 0.44,
"learning_rate": 0.0009467596680171446,
"loss": 1.4719,
"step": 410
},
{
"epoch": 0.45,
"learning_rate": 0.0009454774773476257,
"loss": 1.4939,
"step": 415
},
{
"epoch": 0.46,
"learning_rate": 0.0009441809198800587,
"loss": 1.4382,
"step": 420
},
{
"epoch": 0.46,
"learning_rate": 0.0009428700374288564,
"loss": 1.4427,
"step": 425
},
{
"epoch": 0.47,
"learning_rate": 0.0009415448722704175,
"loss": 1.4767,
"step": 430
},
{
"epoch": 0.47,
"learning_rate": 0.0009402054671417628,
"loss": 1.4799,
"step": 435
},
{
"epoch": 0.48,
"learning_rate": 0.0009388518652391571,
"loss": 1.4608,
"step": 440
},
{
"epoch": 0.48,
"learning_rate": 0.0009374841102167157,
"loss": 1.4937,
"step": 445
},
{
"epoch": 0.49,
"learning_rate": 0.0009361022461849965,
"loss": 1.5468,
"step": 450
},
{
"epoch": 0.49,
"learning_rate": 0.0009347063177095783,
"loss": 1.5481,
"step": 455
},
{
"epoch": 0.5,
"learning_rate": 0.0009332963698096223,
"loss": 1.4478,
"step": 460
},
{
"epoch": 0.5,
"learning_rate": 0.0009318724479564215,
"loss": 1.4977,
"step": 465
},
{
"epoch": 0.51,
"learning_rate": 0.0009304345980719329,
"loss": 1.5091,
"step": 470
},
{
"epoch": 0.51,
"learning_rate": 0.0009289828665272977,
"loss": 1.43,
"step": 475
},
{
"epoch": 0.52,
"learning_rate": 0.0009275173001413448,
"loss": 1.4725,
"step": 480
},
{
"epoch": 0.53,
"learning_rate": 0.0009260379461790822,
"loss": 1.3741,
"step": 485
},
{
"epoch": 0.53,
"learning_rate": 0.0009245448523501708,
"loss": 1.4917,
"step": 490
},
{
"epoch": 0.54,
"learning_rate": 0.0009230380668073877,
"loss": 1.4684,
"step": 495
},
{
"epoch": 0.54,
"learning_rate": 0.0009215176381450717,
"loss": 1.5209,
"step": 500
},
{
"epoch": 0.54,
"eval_loss": 1.5050214529037476,
"eval_runtime": 10.6706,
"eval_samples_per_second": 14.057,
"eval_steps_per_second": 1.781,
"step": 500
},
{
"epoch": 0.55,
"learning_rate": 0.0009199836153975573,
"loss": 1.4913,
"step": 505
},
{
"epoch": 0.55,
"learning_rate": 0.0009184360480375926,
"loss": 1.5377,
"step": 510
},
{
"epoch": 0.56,
"learning_rate": 0.0009168749859747438,
"loss": 1.4608,
"step": 515
},
{
"epoch": 0.56,
"learning_rate": 0.0009153004795537861,
"loss": 1.4738,
"step": 520
},
{
"epoch": 0.57,
"learning_rate": 0.0009137125795530795,
"loss": 1.4947,
"step": 525
},
{
"epoch": 0.57,
"learning_rate": 0.0009121113371829318,
"loss": 1.5267,
"step": 530
},
{
"epoch": 0.58,
"learning_rate": 0.0009104968040839463,
"loss": 1.5116,
"step": 535
},
{
"epoch": 0.59,
"learning_rate": 0.000908869032325357,
"loss": 1.4423,
"step": 540
},
{
"epoch": 0.59,
"learning_rate": 0.000907228074403349,
"loss": 1.4565,
"step": 545
},
{
"epoch": 0.6,
"learning_rate": 0.0009055739832393655,
"loss": 1.4923,
"step": 550
},
{
"epoch": 0.6,
"learning_rate": 0.0009039068121784016,
"loss": 1.4304,
"step": 555
},
{
"epoch": 0.61,
"learning_rate": 0.0009022266149872829,
"loss": 1.4422,
"step": 560
},
{
"epoch": 0.61,
"learning_rate": 0.0009005334458529322,
"loss": 1.522,
"step": 565
},
{
"epoch": 0.62,
"learning_rate": 0.0008988273593806222,
"loss": 1.499,
"step": 570
},
{
"epoch": 0.62,
"learning_rate": 0.0008971084105922139,
"loss": 1.4796,
"step": 575
},
{
"epoch": 0.63,
"learning_rate": 0.0008953766549243818,
"loss": 1.4231,
"step": 580
},
{
"epoch": 0.63,
"learning_rate": 0.0008936321482268275,
"loss": 1.462,
"step": 585
},
{
"epoch": 0.64,
"learning_rate": 0.0008918749467604766,
"loss": 1.5191,
"step": 590
},
{
"epoch": 0.64,
"learning_rate": 0.0008901051071956661,
"loss": 1.4845,
"step": 595
},
{
"epoch": 0.65,
"learning_rate": 0.0008883226866103152,
"loss": 1.4652,
"step": 600
},
{
"epoch": 0.65,
"eval_loss": 1.486396074295044,
"eval_runtime": 10.6718,
"eval_samples_per_second": 14.056,
"eval_steps_per_second": 1.78,
"step": 600
},
{
"epoch": 0.66,
"learning_rate": 0.0008865277424880859,
"loss": 1.4773,
"step": 605
},
{
"epoch": 0.66,
"learning_rate": 0.0008847203327165278,
"loss": 1.4555,
"step": 610
},
{
"epoch": 0.67,
"learning_rate": 0.0008829005155852125,
"loss": 1.5235,
"step": 615
},
{
"epoch": 0.67,
"learning_rate": 0.0008810683497838525,
"loss": 1.4329,
"step": 620
},
{
"epoch": 0.68,
"learning_rate": 0.0008792238944004096,
"loss": 1.4515,
"step": 625
},
{
"epoch": 0.68,
"learning_rate": 0.0008773672089191885,
"loss": 1.4616,
"step": 630
},
{
"epoch": 0.69,
"learning_rate": 0.0008754983532189185,
"loss": 1.3931,
"step": 635
},
{
"epoch": 0.69,
"learning_rate": 0.0008736173875708229,
"loss": 1.4714,
"step": 640
},
{
"epoch": 0.7,
"learning_rate": 0.0008717243726366746,
"loss": 1.4831,
"step": 645
},
{
"epoch": 0.7,
"learning_rate": 0.00086981936946684,
"loss": 1.4928,
"step": 650
},
{
"epoch": 0.71,
"learning_rate": 0.0008679024394983105,
"loss": 1.3735,
"step": 655
},
{
"epoch": 0.72,
"learning_rate": 0.0008659736445527202,
"loss": 1.4587,
"step": 660
},
{
"epoch": 0.72,
"learning_rate": 0.0008640330468343532,
"loss": 1.5138,
"step": 665
},
{
"epoch": 0.73,
"learning_rate": 0.0008620807089281364,
"loss": 1.4625,
"step": 670
},
{
"epoch": 0.73,
"learning_rate": 0.0008601166937976226,
"loss": 1.4173,
"step": 675
},
{
"epoch": 0.74,
"learning_rate": 0.000858141064782958,
"loss": 1.4901,
"step": 680
},
{
"epoch": 0.74,
"learning_rate": 0.0008561538855988409,
"loss": 1.4056,
"step": 685
},
{
"epoch": 0.75,
"learning_rate": 0.0008541552203324667,
"loss": 1.4486,
"step": 690
},
{
"epoch": 0.75,
"learning_rate": 0.0008521451334414605,
"loss": 1.4147,
"step": 695
},
{
"epoch": 0.76,
"learning_rate": 0.0008501236897517987,
"loss": 1.4547,
"step": 700
},
{
"epoch": 0.76,
"eval_loss": 1.4729957580566406,
"eval_runtime": 10.6704,
"eval_samples_per_second": 14.058,
"eval_steps_per_second": 1.781,
"step": 700
},
{
"epoch": 0.76,
"learning_rate": 0.000848090954455718,
"loss": 1.4464,
"step": 705
},
{
"epoch": 0.77,
"learning_rate": 0.0008460469931096138,
"loss": 1.4163,
"step": 710
},
{
"epoch": 0.77,
"learning_rate": 0.0008439918716319246,
"loss": 1.5283,
"step": 715
},
{
"epoch": 0.78,
"learning_rate": 0.0008419256563010076,
"loss": 1.4313,
"step": 720
},
{
"epoch": 0.79,
"learning_rate": 0.000839848413753,
"loss": 1.3995,
"step": 725
},
{
"epoch": 0.79,
"learning_rate": 0.0008377602109796709,
"loss": 1.4265,
"step": 730
},
{
"epoch": 0.8,
"learning_rate": 0.0008356611153262598,
"loss": 1.4426,
"step": 735
},
{
"epoch": 0.8,
"learning_rate": 0.0008335511944893057,
"loss": 1.4251,
"step": 740
},
{
"epoch": 0.81,
"learning_rate": 0.0008314305165144633,
"loss": 1.4686,
"step": 745
},
{
"epoch": 0.81,
"learning_rate": 0.0008292991497943081,
"loss": 1.4658,
"step": 750
},
{
"epoch": 0.82,
"learning_rate": 0.0008271571630661321,
"loss": 1.4347,
"step": 755
},
{
"epoch": 0.82,
"learning_rate": 0.0008250046254097255,
"loss": 1.4235,
"step": 760
},
{
"epoch": 0.83,
"learning_rate": 0.0008228416062451494,
"loss": 1.5047,
"step": 765
},
{
"epoch": 0.83,
"learning_rate": 0.0008206681753304976,
"loss": 1.445,
"step": 770
},
{
"epoch": 0.84,
"learning_rate": 0.0008184844027596461,
"loss": 1.4077,
"step": 775
},
{
"epoch": 0.85,
"learning_rate": 0.0008162903589599924,
"loss": 1.5057,
"step": 780
},
{
"epoch": 0.85,
"learning_rate": 0.0008140861146901849,
"loss": 1.4445,
"step": 785
},
{
"epoch": 0.86,
"learning_rate": 0.0008118717410378407,
"loss": 1.5333,
"step": 790
},
{
"epoch": 0.86,
"learning_rate": 0.0008096473094172527,
"loss": 1.3786,
"step": 795
},
{
"epoch": 0.87,
"learning_rate": 0.0008074128915670868,
"loss": 1.3781,
"step": 800
},
{
"epoch": 0.87,
"eval_loss": 1.4600605964660645,
"eval_runtime": 10.6704,
"eval_samples_per_second": 14.058,
"eval_steps_per_second": 1.781,
"step": 800
},
{
"epoch": 0.87,
"learning_rate": 0.0008051685595480678,
"loss": 1.5097,
"step": 805
},
{
"epoch": 0.88,
"learning_rate": 0.0008029143857406563,
"loss": 1.5608,
"step": 810
},
{
"epoch": 0.88,
"learning_rate": 0.0008006504428427133,
"loss": 1.4113,
"step": 815
},
{
"epoch": 0.89,
"learning_rate": 0.0007983768038671568,
"loss": 1.3781,
"step": 820
},
{
"epoch": 0.89,
"learning_rate": 0.0007960935421396062,
"loss": 1.4056,
"step": 825
},
{
"epoch": 0.9,
"learning_rate": 0.0007938007312960178,
"loss": 1.4463,
"step": 830
},
{
"epoch": 0.9,
"learning_rate": 0.0007914984452803105,
"loss": 1.3983,
"step": 835
},
{
"epoch": 0.91,
"learning_rate": 0.0007891867583419805,
"loss": 1.3968,
"step": 840
},
{
"epoch": 0.92,
"learning_rate": 0.0007868657450337066,
"loss": 1.4587,
"step": 845
},
{
"epoch": 0.92,
"learning_rate": 0.0007845354802089463,
"loss": 1.4654,
"step": 850
},
{
"epoch": 0.93,
"learning_rate": 0.0007821960390195224,
"loss": 1.4384,
"step": 855
},
{
"epoch": 0.93,
"learning_rate": 0.0007798474969131971,
"loss": 1.44,
"step": 860
},
{
"epoch": 0.94,
"learning_rate": 0.0007774899296312414,
"loss": 1.4221,
"step": 865
},
{
"epoch": 0.94,
"learning_rate": 0.0007751234132059906,
"loss": 1.3795,
"step": 870
},
{
"epoch": 0.95,
"learning_rate": 0.0007727480239583933,
"loss": 1.4748,
"step": 875
},
{
"epoch": 0.95,
"learning_rate": 0.0007703638384955494,
"loss": 1.5171,
"step": 880
},
{
"epoch": 0.96,
"learning_rate": 0.0007679709337082394,
"loss": 1.3996,
"step": 885
},
{
"epoch": 0.96,
"learning_rate": 0.0007655693867684454,
"loss": 1.4386,
"step": 890
},
{
"epoch": 0.97,
"learning_rate": 0.0007631592751268618,
"loss": 1.3789,
"step": 895
},
{
"epoch": 0.98,
"learning_rate": 0.0007607406765103972,
"loss": 1.4553,
"step": 900
},
{
"epoch": 0.98,
"eval_loss": 1.4479364156723022,
"eval_runtime": 10.6698,
"eval_samples_per_second": 14.058,
"eval_steps_per_second": 1.781,
"step": 900
},
{
"epoch": 0.98,
"learning_rate": 0.000758313668919668,
"loss": 1.3962,
"step": 905
},
{
"epoch": 0.99,
"learning_rate": 0.000755878330626483,
"loss": 1.3899,
"step": 910
},
{
"epoch": 0.99,
"learning_rate": 0.0007534347401713191,
"loss": 1.3965,
"step": 915
},
{
"epoch": 1.0,
"learning_rate": 0.0007509829763607879,
"loss": 1.367,
"step": 920
},
{
"epoch": 1.0,
"learning_rate": 0.0007485231182650945,
"loss": 1.4027,
"step": 925
},
{
"epoch": 1.01,
"learning_rate": 0.0007460552452154877,
"loss": 1.3563,
"step": 930
},
{
"epoch": 1.01,
"learning_rate": 0.0007435794368017007,
"loss": 1.3192,
"step": 935
},
{
"epoch": 1.02,
"learning_rate": 0.0007410957728693856,
"loss": 1.2772,
"step": 940
},
{
"epoch": 1.02,
"learning_rate": 0.0007386043335175367,
"loss": 1.3291,
"step": 945
},
{
"epoch": 1.03,
"learning_rate": 0.000736105199095909,
"loss": 1.304,
"step": 950
},
{
"epoch": 1.03,
"learning_rate": 0.0007335984502024256,
"loss": 1.3832,
"step": 955
},
{
"epoch": 1.04,
"learning_rate": 0.0007310841676805791,
"loss": 1.3351,
"step": 960
},
{
"epoch": 1.05,
"learning_rate": 0.000728562432616824,
"loss": 1.3375,
"step": 965
},
{
"epoch": 1.05,
"learning_rate": 0.0007260333263379619,
"loss": 1.3323,
"step": 970
},
{
"epoch": 1.06,
"learning_rate": 0.0007234969304085186,
"loss": 1.3293,
"step": 975
},
{
"epoch": 1.06,
"learning_rate": 0.0007209533266281133,
"loss": 1.3859,
"step": 980
},
{
"epoch": 1.07,
"learning_rate": 0.0007184025970288211,
"loss": 1.3553,
"step": 985
},
{
"epoch": 1.07,
"learning_rate": 0.000715844823872527,
"loss": 1.3607,
"step": 990
},
{
"epoch": 1.08,
"learning_rate": 0.0007132800896482731,
"loss": 1.3457,
"step": 995
},
{
"epoch": 1.08,
"learning_rate": 0.0007107084770695986,
"loss": 1.3788,
"step": 1000
},
{
"epoch": 1.08,
"eval_loss": 1.4371482133865356,
"eval_runtime": 10.6719,
"eval_samples_per_second": 14.056,
"eval_steps_per_second": 1.78,
"step": 1000
}
],
"logging_steps": 5,
"max_steps": 2766,
"num_train_epochs": 3,
"save_steps": 100,
"total_flos": 3.29660416131072e+17,
"trial_name": null,
"trial_params": null
}