jdannem6's picture
Uploaded checkpoint-3000
4b78784 verified
raw
history blame
49.7 kB
{
"best_metric": 0.018277771770954132,
"best_model_checkpoint": "runs/deepseek_lora_20240424-122712/checkpoint-2000",
"epoch": 0.9399232396020991,
"eval_steps": 500,
"global_step": 3000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 2.6060585975646973,
"learning_rate": 4.0000000000000003e-07,
"loss": 1.6667,
"step": 10
},
{
"epoch": 0.01,
"grad_norm": 5.726912021636963,
"learning_rate": 8.000000000000001e-07,
"loss": 1.6606,
"step": 20
},
{
"epoch": 0.01,
"grad_norm": 6.3224616050720215,
"learning_rate": 1.2000000000000002e-06,
"loss": 1.6875,
"step": 30
},
{
"epoch": 0.01,
"grad_norm": 4.4585185050964355,
"learning_rate": 1.6000000000000001e-06,
"loss": 1.5512,
"step": 40
},
{
"epoch": 0.02,
"grad_norm": 2.849210023880005,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.598,
"step": 50
},
{
"epoch": 0.02,
"grad_norm": 1.7917169332504272,
"learning_rate": 2.4000000000000003e-06,
"loss": 1.3203,
"step": 60
},
{
"epoch": 0.02,
"grad_norm": 10.056238174438477,
"learning_rate": 2.8000000000000003e-06,
"loss": 1.4914,
"step": 70
},
{
"epoch": 0.03,
"grad_norm": 5.542996406555176,
"learning_rate": 3.2000000000000003e-06,
"loss": 1.3369,
"step": 80
},
{
"epoch": 0.03,
"grad_norm": 1.5463522672653198,
"learning_rate": 3.6000000000000003e-06,
"loss": 1.3185,
"step": 90
},
{
"epoch": 0.03,
"grad_norm": 9.401649475097656,
"learning_rate": 4.000000000000001e-06,
"loss": 1.2827,
"step": 100
},
{
"epoch": 0.03,
"grad_norm": 1.238974928855896,
"learning_rate": 4.4e-06,
"loss": 1.0366,
"step": 110
},
{
"epoch": 0.04,
"grad_norm": 1.2282520532608032,
"learning_rate": 4.800000000000001e-06,
"loss": 1.0715,
"step": 120
},
{
"epoch": 0.04,
"grad_norm": 1.4583073854446411,
"learning_rate": 5.2e-06,
"loss": 0.868,
"step": 130
},
{
"epoch": 0.04,
"grad_norm": 4.3194146156311035,
"learning_rate": 5.600000000000001e-06,
"loss": 0.8281,
"step": 140
},
{
"epoch": 0.05,
"grad_norm": 4.280028820037842,
"learning_rate": 6e-06,
"loss": 0.8666,
"step": 150
},
{
"epoch": 0.05,
"grad_norm": 0.9424476027488708,
"learning_rate": 6.4000000000000006e-06,
"loss": 0.6691,
"step": 160
},
{
"epoch": 0.05,
"grad_norm": 0.9174453616142273,
"learning_rate": 6.800000000000001e-06,
"loss": 0.5405,
"step": 170
},
{
"epoch": 0.06,
"grad_norm": 1.0179359912872314,
"learning_rate": 7.2000000000000005e-06,
"loss": 0.5103,
"step": 180
},
{
"epoch": 0.06,
"grad_norm": 0.4516351521015167,
"learning_rate": 7.600000000000001e-06,
"loss": 0.2679,
"step": 190
},
{
"epoch": 0.06,
"grad_norm": 6.054721355438232,
"learning_rate": 8.000000000000001e-06,
"loss": 0.2634,
"step": 200
},
{
"epoch": 0.07,
"grad_norm": 0.40070173144340515,
"learning_rate": 8.400000000000001e-06,
"loss": 0.2254,
"step": 210
},
{
"epoch": 0.07,
"grad_norm": 0.6031996011734009,
"learning_rate": 8.8e-06,
"loss": 0.1281,
"step": 220
},
{
"epoch": 0.07,
"grad_norm": 3.0129740238189697,
"learning_rate": 9.200000000000002e-06,
"loss": 0.2365,
"step": 230
},
{
"epoch": 0.08,
"grad_norm": 0.6611989140510559,
"learning_rate": 9.600000000000001e-06,
"loss": 0.3255,
"step": 240
},
{
"epoch": 0.08,
"grad_norm": 0.3195549547672272,
"learning_rate": 1e-05,
"loss": 0.3287,
"step": 250
},
{
"epoch": 0.08,
"grad_norm": 0.27022993564605713,
"learning_rate": 1.04e-05,
"loss": 0.1439,
"step": 260
},
{
"epoch": 0.08,
"grad_norm": 4.671290874481201,
"learning_rate": 1.0800000000000002e-05,
"loss": 0.0676,
"step": 270
},
{
"epoch": 0.09,
"grad_norm": 0.2435157746076584,
"learning_rate": 1.1200000000000001e-05,
"loss": 0.2338,
"step": 280
},
{
"epoch": 0.09,
"grad_norm": 0.40632137656211853,
"learning_rate": 1.16e-05,
"loss": 0.1661,
"step": 290
},
{
"epoch": 0.09,
"grad_norm": 0.2910804748535156,
"learning_rate": 1.2e-05,
"loss": 0.1781,
"step": 300
},
{
"epoch": 0.1,
"grad_norm": 7.506131649017334,
"learning_rate": 1.2400000000000002e-05,
"loss": 0.1283,
"step": 310
},
{
"epoch": 0.1,
"grad_norm": 0.04508267343044281,
"learning_rate": 1.2800000000000001e-05,
"loss": 0.1022,
"step": 320
},
{
"epoch": 0.1,
"grad_norm": 0.23127306997776031,
"learning_rate": 1.3200000000000002e-05,
"loss": 0.1243,
"step": 330
},
{
"epoch": 0.11,
"grad_norm": 6.1613383293151855,
"learning_rate": 1.3600000000000002e-05,
"loss": 0.1485,
"step": 340
},
{
"epoch": 0.11,
"grad_norm": 0.023777758702635765,
"learning_rate": 1.4e-05,
"loss": 0.0538,
"step": 350
},
{
"epoch": 0.11,
"grad_norm": 0.014769719913601875,
"learning_rate": 1.4400000000000001e-05,
"loss": 0.0285,
"step": 360
},
{
"epoch": 0.12,
"grad_norm": 5.690178871154785,
"learning_rate": 1.48e-05,
"loss": 0.1325,
"step": 370
},
{
"epoch": 0.12,
"grad_norm": 0.02117346040904522,
"learning_rate": 1.5200000000000002e-05,
"loss": 0.0507,
"step": 380
},
{
"epoch": 0.12,
"grad_norm": 0.06630611419677734,
"learning_rate": 1.5600000000000003e-05,
"loss": 0.199,
"step": 390
},
{
"epoch": 0.13,
"grad_norm": 0.16532257199287415,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.0652,
"step": 400
},
{
"epoch": 0.13,
"grad_norm": 0.009799620136618614,
"learning_rate": 1.64e-05,
"loss": 0.1011,
"step": 410
},
{
"epoch": 0.13,
"grad_norm": 0.12324853241443634,
"learning_rate": 1.6800000000000002e-05,
"loss": 0.0364,
"step": 420
},
{
"epoch": 0.13,
"grad_norm": 0.010026533156633377,
"learning_rate": 1.72e-05,
"loss": 0.1973,
"step": 430
},
{
"epoch": 0.14,
"grad_norm": 0.0071570691652596,
"learning_rate": 1.76e-05,
"loss": 0.0819,
"step": 440
},
{
"epoch": 0.14,
"grad_norm": 0.04149964451789856,
"learning_rate": 1.8e-05,
"loss": 0.1046,
"step": 450
},
{
"epoch": 0.14,
"grad_norm": 3.45990252494812,
"learning_rate": 1.8400000000000003e-05,
"loss": 0.1011,
"step": 460
},
{
"epoch": 0.15,
"grad_norm": 0.00734300771728158,
"learning_rate": 1.88e-05,
"loss": 0.223,
"step": 470
},
{
"epoch": 0.15,
"grad_norm": 0.01419814396649599,
"learning_rate": 1.9200000000000003e-05,
"loss": 0.1178,
"step": 480
},
{
"epoch": 0.15,
"grad_norm": 0.10111651569604874,
"learning_rate": 1.9600000000000002e-05,
"loss": 0.1416,
"step": 490
},
{
"epoch": 0.16,
"grad_norm": 3.1214537620544434,
"learning_rate": 2e-05,
"loss": 0.1258,
"step": 500
},
{
"epoch": 0.16,
"eval_loss": 0.05367860943078995,
"eval_runtime": 62.0108,
"eval_samples_per_second": 16.126,
"eval_steps_per_second": 16.126,
"step": 500
},
{
"epoch": 0.16,
"grad_norm": 0.013164438307285309,
"learning_rate": 1.9955555555555557e-05,
"loss": 0.0268,
"step": 510
},
{
"epoch": 0.16,
"grad_norm": 0.15392057597637177,
"learning_rate": 1.9911111111111112e-05,
"loss": 0.0843,
"step": 520
},
{
"epoch": 0.17,
"grad_norm": 0.006180985830724239,
"learning_rate": 1.9866666666666667e-05,
"loss": 0.1384,
"step": 530
},
{
"epoch": 0.17,
"grad_norm": 0.2702454924583435,
"learning_rate": 1.9822222222222226e-05,
"loss": 0.0956,
"step": 540
},
{
"epoch": 0.17,
"grad_norm": 1.5135071277618408,
"learning_rate": 1.977777777777778e-05,
"loss": 0.1615,
"step": 550
},
{
"epoch": 0.18,
"grad_norm": 0.005121675785630941,
"learning_rate": 1.9733333333333336e-05,
"loss": 0.1201,
"step": 560
},
{
"epoch": 0.18,
"grad_norm": 0.09728775173425674,
"learning_rate": 1.968888888888889e-05,
"loss": 0.1112,
"step": 570
},
{
"epoch": 0.18,
"grad_norm": 4.736963748931885,
"learning_rate": 1.9644444444444447e-05,
"loss": 0.1546,
"step": 580
},
{
"epoch": 0.18,
"grad_norm": 0.006898015271872282,
"learning_rate": 1.9600000000000002e-05,
"loss": 0.1116,
"step": 590
},
{
"epoch": 0.19,
"grad_norm": 0.005388608202338219,
"learning_rate": 1.9555555555555557e-05,
"loss": 0.1597,
"step": 600
},
{
"epoch": 0.19,
"grad_norm": 0.0133949751034379,
"learning_rate": 1.9511111111111113e-05,
"loss": 0.0799,
"step": 610
},
{
"epoch": 0.19,
"grad_norm": 0.11226101964712143,
"learning_rate": 1.9466666666666668e-05,
"loss": 0.0448,
"step": 620
},
{
"epoch": 0.2,
"grad_norm": 0.011205198243260384,
"learning_rate": 1.9422222222222223e-05,
"loss": 0.0739,
"step": 630
},
{
"epoch": 0.2,
"grad_norm": 0.007312687113881111,
"learning_rate": 1.9377777777777778e-05,
"loss": 0.0779,
"step": 640
},
{
"epoch": 0.2,
"grad_norm": 0.006550287362188101,
"learning_rate": 1.9333333333333333e-05,
"loss": 0.2039,
"step": 650
},
{
"epoch": 0.21,
"grad_norm": 1.961844563484192,
"learning_rate": 1.928888888888889e-05,
"loss": 0.0618,
"step": 660
},
{
"epoch": 0.21,
"grad_norm": 0.012318034656345844,
"learning_rate": 1.9244444444444444e-05,
"loss": 0.1166,
"step": 670
},
{
"epoch": 0.21,
"grad_norm": 0.08778905868530273,
"learning_rate": 1.9200000000000003e-05,
"loss": 0.0822,
"step": 680
},
{
"epoch": 0.22,
"grad_norm": 0.010623461566865444,
"learning_rate": 1.9155555555555558e-05,
"loss": 0.095,
"step": 690
},
{
"epoch": 0.22,
"grad_norm": 0.0858495905995369,
"learning_rate": 1.9111111111111113e-05,
"loss": 0.0468,
"step": 700
},
{
"epoch": 0.22,
"grad_norm": 1.7685880661010742,
"learning_rate": 1.9066666666666668e-05,
"loss": 0.0288,
"step": 710
},
{
"epoch": 0.23,
"grad_norm": 0.032682083547115326,
"learning_rate": 1.9022222222222223e-05,
"loss": 0.1024,
"step": 720
},
{
"epoch": 0.23,
"grad_norm": 0.010010900907218456,
"learning_rate": 1.897777777777778e-05,
"loss": 0.0129,
"step": 730
},
{
"epoch": 0.23,
"grad_norm": 0.037693481892347336,
"learning_rate": 1.8933333333333334e-05,
"loss": 0.1217,
"step": 740
},
{
"epoch": 0.23,
"grad_norm": 2.2111008167266846,
"learning_rate": 1.888888888888889e-05,
"loss": 0.1206,
"step": 750
},
{
"epoch": 0.24,
"grad_norm": 1.4000599384307861,
"learning_rate": 1.8844444444444444e-05,
"loss": 0.1214,
"step": 760
},
{
"epoch": 0.24,
"grad_norm": 4.3877034187316895,
"learning_rate": 1.88e-05,
"loss": 0.0408,
"step": 770
},
{
"epoch": 0.24,
"grad_norm": 4.164296627044678,
"learning_rate": 1.8755555555555558e-05,
"loss": 0.1315,
"step": 780
},
{
"epoch": 0.25,
"grad_norm": 0.07969211786985397,
"learning_rate": 1.8711111111111113e-05,
"loss": 0.0149,
"step": 790
},
{
"epoch": 0.25,
"grad_norm": 3.9340951442718506,
"learning_rate": 1.866666666666667e-05,
"loss": 0.1169,
"step": 800
},
{
"epoch": 0.25,
"grad_norm": 0.006517359986901283,
"learning_rate": 1.8622222222222224e-05,
"loss": 0.0517,
"step": 810
},
{
"epoch": 0.26,
"grad_norm": 0.008951540105044842,
"learning_rate": 1.857777777777778e-05,
"loss": 0.0935,
"step": 820
},
{
"epoch": 0.26,
"grad_norm": 0.03655437007546425,
"learning_rate": 1.8533333333333334e-05,
"loss": 0.0883,
"step": 830
},
{
"epoch": 0.26,
"grad_norm": 0.0346522182226181,
"learning_rate": 1.848888888888889e-05,
"loss": 0.0479,
"step": 840
},
{
"epoch": 0.27,
"grad_norm": 0.006223162170499563,
"learning_rate": 1.8444444444444448e-05,
"loss": 0.0075,
"step": 850
},
{
"epoch": 0.27,
"grad_norm": 0.004037676844745874,
"learning_rate": 1.8400000000000003e-05,
"loss": 0.0333,
"step": 860
},
{
"epoch": 0.27,
"grad_norm": 0.004569903016090393,
"learning_rate": 1.835555555555556e-05,
"loss": 0.0536,
"step": 870
},
{
"epoch": 0.28,
"grad_norm": 0.005509174894541502,
"learning_rate": 1.8311111111111114e-05,
"loss": 0.1528,
"step": 880
},
{
"epoch": 0.28,
"grad_norm": 0.007597978692501783,
"learning_rate": 1.826666666666667e-05,
"loss": 0.0052,
"step": 890
},
{
"epoch": 0.28,
"grad_norm": 4.2217936515808105,
"learning_rate": 1.8222222222222224e-05,
"loss": 0.0776,
"step": 900
},
{
"epoch": 0.29,
"grad_norm": 0.004342419560998678,
"learning_rate": 1.817777777777778e-05,
"loss": 0.0552,
"step": 910
},
{
"epoch": 0.29,
"grad_norm": 0.0696156695485115,
"learning_rate": 1.8133333333333335e-05,
"loss": 0.0412,
"step": 920
},
{
"epoch": 0.29,
"grad_norm": 0.022448547184467316,
"learning_rate": 1.808888888888889e-05,
"loss": 0.0109,
"step": 930
},
{
"epoch": 0.29,
"grad_norm": 4.305741310119629,
"learning_rate": 1.8044444444444445e-05,
"loss": 0.0428,
"step": 940
},
{
"epoch": 0.3,
"grad_norm": 3.344078779220581,
"learning_rate": 1.8e-05,
"loss": 0.0651,
"step": 950
},
{
"epoch": 0.3,
"grad_norm": 2.585327386856079,
"learning_rate": 1.7955555555555556e-05,
"loss": 0.1474,
"step": 960
},
{
"epoch": 0.3,
"grad_norm": 2.2476861476898193,
"learning_rate": 1.791111111111111e-05,
"loss": 0.0892,
"step": 970
},
{
"epoch": 0.31,
"grad_norm": 0.06212488189339638,
"learning_rate": 1.7866666666666666e-05,
"loss": 0.0659,
"step": 980
},
{
"epoch": 0.31,
"grad_norm": 0.012895594350993633,
"learning_rate": 1.782222222222222e-05,
"loss": 0.0915,
"step": 990
},
{
"epoch": 0.31,
"grad_norm": 0.1314150094985962,
"learning_rate": 1.7777777777777777e-05,
"loss": 0.0456,
"step": 1000
},
{
"epoch": 0.31,
"eval_loss": 0.02997952327132225,
"eval_runtime": 62.0027,
"eval_samples_per_second": 16.128,
"eval_steps_per_second": 16.128,
"step": 1000
},
{
"epoch": 0.32,
"grad_norm": 0.01634177938103676,
"learning_rate": 1.7733333333333335e-05,
"loss": 0.0424,
"step": 1010
},
{
"epoch": 0.32,
"grad_norm": 0.009437276981770992,
"learning_rate": 1.768888888888889e-05,
"loss": 0.0663,
"step": 1020
},
{
"epoch": 0.32,
"grad_norm": 0.04742557182908058,
"learning_rate": 1.7644444444444446e-05,
"loss": 0.1235,
"step": 1030
},
{
"epoch": 0.33,
"grad_norm": 1.101121187210083,
"learning_rate": 1.76e-05,
"loss": 0.0574,
"step": 1040
},
{
"epoch": 0.33,
"grad_norm": 3.8775153160095215,
"learning_rate": 1.7555555555555556e-05,
"loss": 0.0916,
"step": 1050
},
{
"epoch": 0.33,
"grad_norm": 0.0064536286517977715,
"learning_rate": 1.751111111111111e-05,
"loss": 0.0363,
"step": 1060
},
{
"epoch": 0.34,
"grad_norm": 0.004769014660269022,
"learning_rate": 1.7466666666666667e-05,
"loss": 0.013,
"step": 1070
},
{
"epoch": 0.34,
"grad_norm": 1.7518908977508545,
"learning_rate": 1.7422222222222222e-05,
"loss": 0.082,
"step": 1080
},
{
"epoch": 0.34,
"grad_norm": 0.07153653353452682,
"learning_rate": 1.737777777777778e-05,
"loss": 0.0388,
"step": 1090
},
{
"epoch": 0.34,
"grad_norm": 0.015164585784077644,
"learning_rate": 1.7333333333333336e-05,
"loss": 0.0613,
"step": 1100
},
{
"epoch": 0.35,
"grad_norm": 3.7508952617645264,
"learning_rate": 1.728888888888889e-05,
"loss": 0.0989,
"step": 1110
},
{
"epoch": 0.35,
"grad_norm": 0.025457441806793213,
"learning_rate": 1.7244444444444446e-05,
"loss": 0.0268,
"step": 1120
},
{
"epoch": 0.35,
"grad_norm": 4.794126033782959,
"learning_rate": 1.72e-05,
"loss": 0.0741,
"step": 1130
},
{
"epoch": 0.36,
"grad_norm": 4.877151012420654,
"learning_rate": 1.7155555555555557e-05,
"loss": 0.0653,
"step": 1140
},
{
"epoch": 0.36,
"grad_norm": 0.009955652058124542,
"learning_rate": 1.7111111111111112e-05,
"loss": 0.1041,
"step": 1150
},
{
"epoch": 0.36,
"grad_norm": 0.006371030583977699,
"learning_rate": 1.706666666666667e-05,
"loss": 0.0365,
"step": 1160
},
{
"epoch": 0.37,
"grad_norm": 0.005713317077606916,
"learning_rate": 1.7022222222222226e-05,
"loss": 0.0402,
"step": 1170
},
{
"epoch": 0.37,
"grad_norm": 0.0035403750371187925,
"learning_rate": 1.697777777777778e-05,
"loss": 0.0208,
"step": 1180
},
{
"epoch": 0.37,
"grad_norm": 1.2340508699417114,
"learning_rate": 1.6933333333333336e-05,
"loss": 0.0507,
"step": 1190
},
{
"epoch": 0.38,
"grad_norm": 0.01031398307532072,
"learning_rate": 1.688888888888889e-05,
"loss": 0.1464,
"step": 1200
},
{
"epoch": 0.38,
"grad_norm": 1.4290913343429565,
"learning_rate": 1.6844444444444447e-05,
"loss": 0.01,
"step": 1210
},
{
"epoch": 0.38,
"grad_norm": 1.9291528463363647,
"learning_rate": 1.6800000000000002e-05,
"loss": 0.0077,
"step": 1220
},
{
"epoch": 0.39,
"grad_norm": 0.049461908638477325,
"learning_rate": 1.6755555555555557e-05,
"loss": 0.034,
"step": 1230
},
{
"epoch": 0.39,
"grad_norm": 0.004318055231124163,
"learning_rate": 1.6711111111111112e-05,
"loss": 0.0612,
"step": 1240
},
{
"epoch": 0.39,
"grad_norm": 0.003021540120244026,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.0135,
"step": 1250
},
{
"epoch": 0.39,
"grad_norm": 0.04815623164176941,
"learning_rate": 1.6622222222222223e-05,
"loss": 0.0884,
"step": 1260
},
{
"epoch": 0.4,
"grad_norm": 2.4603257179260254,
"learning_rate": 1.6577777777777778e-05,
"loss": 0.0149,
"step": 1270
},
{
"epoch": 0.4,
"grad_norm": 0.002652758965268731,
"learning_rate": 1.6533333333333333e-05,
"loss": 0.0239,
"step": 1280
},
{
"epoch": 0.4,
"grad_norm": 0.053305696696043015,
"learning_rate": 1.648888888888889e-05,
"loss": 0.0615,
"step": 1290
},
{
"epoch": 0.41,
"grad_norm": 0.0942620038986206,
"learning_rate": 1.6444444444444444e-05,
"loss": 0.0789,
"step": 1300
},
{
"epoch": 0.41,
"grad_norm": 2.3062126636505127,
"learning_rate": 1.64e-05,
"loss": 0.0526,
"step": 1310
},
{
"epoch": 0.41,
"grad_norm": 0.004340393468737602,
"learning_rate": 1.6355555555555557e-05,
"loss": 0.0638,
"step": 1320
},
{
"epoch": 0.42,
"grad_norm": 2.277503252029419,
"learning_rate": 1.6311111111111113e-05,
"loss": 0.0512,
"step": 1330
},
{
"epoch": 0.42,
"grad_norm": 5.073038578033447,
"learning_rate": 1.6266666666666668e-05,
"loss": 0.1041,
"step": 1340
},
{
"epoch": 0.42,
"grad_norm": 0.0030644198413938284,
"learning_rate": 1.6222222222222223e-05,
"loss": 0.0016,
"step": 1350
},
{
"epoch": 0.43,
"grad_norm": 0.4875153601169586,
"learning_rate": 1.617777777777778e-05,
"loss": 0.0946,
"step": 1360
},
{
"epoch": 0.43,
"grad_norm": 0.003334318520501256,
"learning_rate": 1.6133333333333334e-05,
"loss": 0.0548,
"step": 1370
},
{
"epoch": 0.43,
"grad_norm": 2.697582960128784,
"learning_rate": 1.608888888888889e-05,
"loss": 0.0628,
"step": 1380
},
{
"epoch": 0.44,
"grad_norm": 0.021118061617016792,
"learning_rate": 1.6044444444444444e-05,
"loss": 0.0537,
"step": 1390
},
{
"epoch": 0.44,
"grad_norm": 0.007942981086671352,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.057,
"step": 1400
},
{
"epoch": 0.44,
"grad_norm": 0.003399541135877371,
"learning_rate": 1.5955555555555558e-05,
"loss": 0.0089,
"step": 1410
},
{
"epoch": 0.44,
"grad_norm": 0.4597236216068268,
"learning_rate": 1.5911111111111113e-05,
"loss": 0.079,
"step": 1420
},
{
"epoch": 0.45,
"grad_norm": 1.0684938430786133,
"learning_rate": 1.586666666666667e-05,
"loss": 0.0149,
"step": 1430
},
{
"epoch": 0.45,
"grad_norm": 3.3166937828063965,
"learning_rate": 1.5822222222222224e-05,
"loss": 0.1189,
"step": 1440
},
{
"epoch": 0.45,
"grad_norm": 5.2161736488342285,
"learning_rate": 1.577777777777778e-05,
"loss": 0.1408,
"step": 1450
},
{
"epoch": 0.46,
"grad_norm": 0.3570309281349182,
"learning_rate": 1.5733333333333334e-05,
"loss": 0.0247,
"step": 1460
},
{
"epoch": 0.46,
"grad_norm": 0.005081634968519211,
"learning_rate": 1.5688888888888893e-05,
"loss": 0.0231,
"step": 1470
},
{
"epoch": 0.46,
"grad_norm": 4.267640590667725,
"learning_rate": 1.5644444444444448e-05,
"loss": 0.0928,
"step": 1480
},
{
"epoch": 0.47,
"grad_norm": 0.014545031823217869,
"learning_rate": 1.5600000000000003e-05,
"loss": 0.0561,
"step": 1490
},
{
"epoch": 0.47,
"grad_norm": 2.007899522781372,
"learning_rate": 1.555555555555556e-05,
"loss": 0.058,
"step": 1500
},
{
"epoch": 0.47,
"eval_loss": 0.021991439163684845,
"eval_runtime": 61.9363,
"eval_samples_per_second": 16.146,
"eval_steps_per_second": 16.146,
"step": 1500
},
{
"epoch": 0.47,
"grad_norm": 0.18892046809196472,
"learning_rate": 1.5511111111111114e-05,
"loss": 0.0541,
"step": 1510
},
{
"epoch": 0.48,
"grad_norm": 0.9835280179977417,
"learning_rate": 1.546666666666667e-05,
"loss": 0.0349,
"step": 1520
},
{
"epoch": 0.48,
"grad_norm": 0.004944021347910166,
"learning_rate": 1.5422222222222224e-05,
"loss": 0.0688,
"step": 1530
},
{
"epoch": 0.48,
"grad_norm": 0.0025414193514734507,
"learning_rate": 1.537777777777778e-05,
"loss": 0.0374,
"step": 1540
},
{
"epoch": 0.49,
"grad_norm": 0.047879841178655624,
"learning_rate": 1.5333333333333334e-05,
"loss": 0.0867,
"step": 1550
},
{
"epoch": 0.49,
"grad_norm": 0.12313953042030334,
"learning_rate": 1.528888888888889e-05,
"loss": 0.0142,
"step": 1560
},
{
"epoch": 0.49,
"grad_norm": 0.0055120717734098434,
"learning_rate": 1.5244444444444447e-05,
"loss": 0.0332,
"step": 1570
},
{
"epoch": 0.5,
"grad_norm": 0.0030517149716615677,
"learning_rate": 1.5200000000000002e-05,
"loss": 0.0739,
"step": 1580
},
{
"epoch": 0.5,
"grad_norm": 0.04954369366168976,
"learning_rate": 1.5155555555555557e-05,
"loss": 0.0609,
"step": 1590
},
{
"epoch": 0.5,
"grad_norm": 2.16231369972229,
"learning_rate": 1.5111111111111112e-05,
"loss": 0.084,
"step": 1600
},
{
"epoch": 0.5,
"grad_norm": 0.03328130766749382,
"learning_rate": 1.5066666666666668e-05,
"loss": 0.0561,
"step": 1610
},
{
"epoch": 0.51,
"grad_norm": 0.3883110582828522,
"learning_rate": 1.5022222222222223e-05,
"loss": 0.065,
"step": 1620
},
{
"epoch": 0.51,
"grad_norm": 0.007501136511564255,
"learning_rate": 1.497777777777778e-05,
"loss": 0.0645,
"step": 1630
},
{
"epoch": 0.51,
"grad_norm": 0.0025652372278273106,
"learning_rate": 1.4933333333333335e-05,
"loss": 0.1242,
"step": 1640
},
{
"epoch": 0.52,
"grad_norm": 2.322174549102783,
"learning_rate": 1.488888888888889e-05,
"loss": 0.0639,
"step": 1650
},
{
"epoch": 0.52,
"grad_norm": 3.1910171508789062,
"learning_rate": 1.4844444444444445e-05,
"loss": 0.1048,
"step": 1660
},
{
"epoch": 0.52,
"grad_norm": 0.00425474438816309,
"learning_rate": 1.48e-05,
"loss": 0.0244,
"step": 1670
},
{
"epoch": 0.53,
"grad_norm": 4.180226802825928,
"learning_rate": 1.4755555555555556e-05,
"loss": 0.0874,
"step": 1680
},
{
"epoch": 0.53,
"grad_norm": 1.1955620050430298,
"learning_rate": 1.4711111111111111e-05,
"loss": 0.0371,
"step": 1690
},
{
"epoch": 0.53,
"grad_norm": 0.010523403063416481,
"learning_rate": 1.4666666666666666e-05,
"loss": 0.0441,
"step": 1700
},
{
"epoch": 0.54,
"grad_norm": 0.3403013348579407,
"learning_rate": 1.4622222222222225e-05,
"loss": 0.0758,
"step": 1710
},
{
"epoch": 0.54,
"grad_norm": 0.046740252524614334,
"learning_rate": 1.457777777777778e-05,
"loss": 0.1069,
"step": 1720
},
{
"epoch": 0.54,
"grad_norm": 0.1384706348180771,
"learning_rate": 1.4533333333333335e-05,
"loss": 0.0656,
"step": 1730
},
{
"epoch": 0.55,
"grad_norm": 1.4094263315200806,
"learning_rate": 1.448888888888889e-05,
"loss": 0.0255,
"step": 1740
},
{
"epoch": 0.55,
"grad_norm": 0.055873990058898926,
"learning_rate": 1.4444444444444446e-05,
"loss": 0.0181,
"step": 1750
},
{
"epoch": 0.55,
"grad_norm": 0.002195443492382765,
"learning_rate": 1.4400000000000001e-05,
"loss": 0.0004,
"step": 1760
},
{
"epoch": 0.55,
"grad_norm": 0.004009276628494263,
"learning_rate": 1.4355555555555556e-05,
"loss": 0.0724,
"step": 1770
},
{
"epoch": 0.56,
"grad_norm": 2.4936776161193848,
"learning_rate": 1.4311111111111111e-05,
"loss": 0.0822,
"step": 1780
},
{
"epoch": 0.56,
"grad_norm": 1.6697956323623657,
"learning_rate": 1.4266666666666668e-05,
"loss": 0.0041,
"step": 1790
},
{
"epoch": 0.56,
"grad_norm": 2.3623549938201904,
"learning_rate": 1.4222222222222224e-05,
"loss": 0.0805,
"step": 1800
},
{
"epoch": 0.57,
"grad_norm": 3.6882166862487793,
"learning_rate": 1.4177777777777779e-05,
"loss": 0.0353,
"step": 1810
},
{
"epoch": 0.57,
"grad_norm": 2.8029565811157227,
"learning_rate": 1.4133333333333334e-05,
"loss": 0.0666,
"step": 1820
},
{
"epoch": 0.57,
"grad_norm": 0.0027399081736803055,
"learning_rate": 1.408888888888889e-05,
"loss": 0.0129,
"step": 1830
},
{
"epoch": 0.58,
"grad_norm": 0.002227638615295291,
"learning_rate": 1.4044444444444445e-05,
"loss": 0.0644,
"step": 1840
},
{
"epoch": 0.58,
"grad_norm": 0.0038109265733510256,
"learning_rate": 1.4e-05,
"loss": 0.0554,
"step": 1850
},
{
"epoch": 0.58,
"grad_norm": 0.23026524484157562,
"learning_rate": 1.3955555555555558e-05,
"loss": 0.0513,
"step": 1860
},
{
"epoch": 0.59,
"grad_norm": 0.003622630378231406,
"learning_rate": 1.3911111111111114e-05,
"loss": 0.0595,
"step": 1870
},
{
"epoch": 0.59,
"grad_norm": 0.004487840924412012,
"learning_rate": 1.3866666666666669e-05,
"loss": 0.0507,
"step": 1880
},
{
"epoch": 0.59,
"grad_norm": 0.0032237153500318527,
"learning_rate": 1.3822222222222224e-05,
"loss": 0.0246,
"step": 1890
},
{
"epoch": 0.6,
"grad_norm": 2.904846429824829,
"learning_rate": 1.377777777777778e-05,
"loss": 0.041,
"step": 1900
},
{
"epoch": 0.6,
"grad_norm": 2.9861977100372314,
"learning_rate": 1.3733333333333335e-05,
"loss": 0.0529,
"step": 1910
},
{
"epoch": 0.6,
"grad_norm": 0.002036773832514882,
"learning_rate": 1.368888888888889e-05,
"loss": 0.0515,
"step": 1920
},
{
"epoch": 0.6,
"grad_norm": 2.882114887237549,
"learning_rate": 1.3644444444444445e-05,
"loss": 0.0437,
"step": 1930
},
{
"epoch": 0.61,
"grad_norm": 3.0705764293670654,
"learning_rate": 1.3600000000000002e-05,
"loss": 0.0203,
"step": 1940
},
{
"epoch": 0.61,
"grad_norm": 0.003252115799114108,
"learning_rate": 1.3555555555555557e-05,
"loss": 0.0288,
"step": 1950
},
{
"epoch": 0.61,
"grad_norm": 0.002082700841128826,
"learning_rate": 1.3511111111111112e-05,
"loss": 0.0737,
"step": 1960
},
{
"epoch": 0.62,
"grad_norm": 0.004269629716873169,
"learning_rate": 1.3466666666666668e-05,
"loss": 0.0283,
"step": 1970
},
{
"epoch": 0.62,
"grad_norm": 0.004804402124136686,
"learning_rate": 1.3422222222222223e-05,
"loss": 0.0769,
"step": 1980
},
{
"epoch": 0.62,
"grad_norm": 0.006747941020876169,
"learning_rate": 1.3377777777777778e-05,
"loss": 0.0169,
"step": 1990
},
{
"epoch": 0.63,
"grad_norm": 0.00186560966540128,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.0346,
"step": 2000
},
{
"epoch": 0.63,
"eval_loss": 0.018277771770954132,
"eval_runtime": 61.9406,
"eval_samples_per_second": 16.144,
"eval_steps_per_second": 16.144,
"step": 2000
},
{
"epoch": 0.63,
"grad_norm": 0.0024101065937429667,
"learning_rate": 1.3288888888888889e-05,
"loss": 0.047,
"step": 2010
},
{
"epoch": 0.63,
"grad_norm": 0.013266036286950111,
"learning_rate": 1.3244444444444447e-05,
"loss": 0.0304,
"step": 2020
},
{
"epoch": 0.64,
"grad_norm": 0.500153660774231,
"learning_rate": 1.3200000000000002e-05,
"loss": 0.0443,
"step": 2030
},
{
"epoch": 0.64,
"grad_norm": 0.0013359179720282555,
"learning_rate": 1.3155555555555558e-05,
"loss": 0.0002,
"step": 2040
},
{
"epoch": 0.64,
"grad_norm": 1.1259698867797852,
"learning_rate": 1.3111111111111113e-05,
"loss": 0.0382,
"step": 2050
},
{
"epoch": 0.65,
"grad_norm": 0.009357116185128689,
"learning_rate": 1.3066666666666668e-05,
"loss": 0.0624,
"step": 2060
},
{
"epoch": 0.65,
"grad_norm": 0.1253252774477005,
"learning_rate": 1.3022222222222223e-05,
"loss": 0.0734,
"step": 2070
},
{
"epoch": 0.65,
"grad_norm": 0.0024754456244409084,
"learning_rate": 1.2977777777777779e-05,
"loss": 0.0683,
"step": 2080
},
{
"epoch": 0.65,
"grad_norm": 1.6212913990020752,
"learning_rate": 1.2933333333333334e-05,
"loss": 0.0467,
"step": 2090
},
{
"epoch": 0.66,
"grad_norm": 1.4906156063079834,
"learning_rate": 1.288888888888889e-05,
"loss": 0.1055,
"step": 2100
},
{
"epoch": 0.66,
"grad_norm": 0.005489406641572714,
"learning_rate": 1.2844444444444446e-05,
"loss": 0.0425,
"step": 2110
},
{
"epoch": 0.66,
"grad_norm": 0.018557880073785782,
"learning_rate": 1.2800000000000001e-05,
"loss": 0.0497,
"step": 2120
},
{
"epoch": 0.67,
"grad_norm": 0.00175235525239259,
"learning_rate": 1.2755555555555556e-05,
"loss": 0.0517,
"step": 2130
},
{
"epoch": 0.67,
"grad_norm": 0.002169775078073144,
"learning_rate": 1.2711111111111112e-05,
"loss": 0.038,
"step": 2140
},
{
"epoch": 0.67,
"grad_norm": 0.004229371901601553,
"learning_rate": 1.2666666666666667e-05,
"loss": 0.0897,
"step": 2150
},
{
"epoch": 0.68,
"grad_norm": 0.0044867489486932755,
"learning_rate": 1.2622222222222222e-05,
"loss": 0.092,
"step": 2160
},
{
"epoch": 0.68,
"grad_norm": 0.00232448847964406,
"learning_rate": 1.257777777777778e-05,
"loss": 0.0474,
"step": 2170
},
{
"epoch": 0.68,
"grad_norm": 0.10797467082738876,
"learning_rate": 1.2533333333333336e-05,
"loss": 0.0065,
"step": 2180
},
{
"epoch": 0.69,
"grad_norm": 0.001698866835795343,
"learning_rate": 1.2488888888888891e-05,
"loss": 0.1059,
"step": 2190
},
{
"epoch": 0.69,
"grad_norm": 0.0014356509782373905,
"learning_rate": 1.2444444444444446e-05,
"loss": 0.0352,
"step": 2200
},
{
"epoch": 0.69,
"grad_norm": 0.001666803378611803,
"learning_rate": 1.2400000000000002e-05,
"loss": 0.0723,
"step": 2210
},
{
"epoch": 0.7,
"grad_norm": 1.3424423933029175,
"learning_rate": 1.2355555555555557e-05,
"loss": 0.029,
"step": 2220
},
{
"epoch": 0.7,
"grad_norm": 0.0017589009366929531,
"learning_rate": 1.2311111111111112e-05,
"loss": 0.0261,
"step": 2230
},
{
"epoch": 0.7,
"grad_norm": 2.2947793006896973,
"learning_rate": 1.2266666666666667e-05,
"loss": 0.1309,
"step": 2240
},
{
"epoch": 0.7,
"grad_norm": 2.9271962642669678,
"learning_rate": 1.2222222222222224e-05,
"loss": 0.062,
"step": 2250
},
{
"epoch": 0.71,
"grad_norm": 0.006457278039306402,
"learning_rate": 1.217777777777778e-05,
"loss": 0.0099,
"step": 2260
},
{
"epoch": 0.71,
"grad_norm": 1.2980073690414429,
"learning_rate": 1.2133333333333335e-05,
"loss": 0.0846,
"step": 2270
},
{
"epoch": 0.71,
"grad_norm": 0.0023306766524910927,
"learning_rate": 1.208888888888889e-05,
"loss": 0.028,
"step": 2280
},
{
"epoch": 0.72,
"grad_norm": 0.0022570204455405474,
"learning_rate": 1.2044444444444445e-05,
"loss": 0.0283,
"step": 2290
},
{
"epoch": 0.72,
"grad_norm": 0.0026164520531892776,
"learning_rate": 1.2e-05,
"loss": 0.0502,
"step": 2300
},
{
"epoch": 0.72,
"grad_norm": 0.0019713249057531357,
"learning_rate": 1.1955555555555556e-05,
"loss": 0.0354,
"step": 2310
},
{
"epoch": 0.73,
"grad_norm": 0.00487458985298872,
"learning_rate": 1.191111111111111e-05,
"loss": 0.0579,
"step": 2320
},
{
"epoch": 0.73,
"grad_norm": 0.002555105835199356,
"learning_rate": 1.186666666666667e-05,
"loss": 0.0566,
"step": 2330
},
{
"epoch": 0.73,
"grad_norm": 0.28949007391929626,
"learning_rate": 1.1822222222222225e-05,
"loss": 0.0149,
"step": 2340
},
{
"epoch": 0.74,
"grad_norm": 0.5910075902938843,
"learning_rate": 1.177777777777778e-05,
"loss": 0.075,
"step": 2350
},
{
"epoch": 0.74,
"grad_norm": 0.001737405196763575,
"learning_rate": 1.1733333333333335e-05,
"loss": 0.0469,
"step": 2360
},
{
"epoch": 0.74,
"grad_norm": 5.248105049133301,
"learning_rate": 1.168888888888889e-05,
"loss": 0.0715,
"step": 2370
},
{
"epoch": 0.75,
"grad_norm": 0.0018054692773148417,
"learning_rate": 1.1644444444444446e-05,
"loss": 0.0354,
"step": 2380
},
{
"epoch": 0.75,
"grad_norm": 0.001644105650484562,
"learning_rate": 1.16e-05,
"loss": 0.0321,
"step": 2390
},
{
"epoch": 0.75,
"grad_norm": 0.001677204272709787,
"learning_rate": 1.1555555555555556e-05,
"loss": 0.0183,
"step": 2400
},
{
"epoch": 0.76,
"grad_norm": 3.1084885597229004,
"learning_rate": 1.1511111111111113e-05,
"loss": 0.029,
"step": 2410
},
{
"epoch": 0.76,
"grad_norm": 0.007074132561683655,
"learning_rate": 1.1466666666666668e-05,
"loss": 0.0525,
"step": 2420
},
{
"epoch": 0.76,
"grad_norm": 0.0032486789859831333,
"learning_rate": 1.1422222222222223e-05,
"loss": 0.0338,
"step": 2430
},
{
"epoch": 0.76,
"grad_norm": 2.797699213027954,
"learning_rate": 1.1377777777777779e-05,
"loss": 0.1017,
"step": 2440
},
{
"epoch": 0.77,
"grad_norm": 1.977303385734558,
"learning_rate": 1.1333333333333334e-05,
"loss": 0.061,
"step": 2450
},
{
"epoch": 0.77,
"grad_norm": 4.702297210693359,
"learning_rate": 1.1288888888888889e-05,
"loss": 0.0382,
"step": 2460
},
{
"epoch": 0.77,
"grad_norm": 0.05130983144044876,
"learning_rate": 1.1244444444444444e-05,
"loss": 0.0273,
"step": 2470
},
{
"epoch": 0.78,
"grad_norm": 0.001360241905786097,
"learning_rate": 1.1200000000000001e-05,
"loss": 0.0615,
"step": 2480
},
{
"epoch": 0.78,
"grad_norm": 0.65842205286026,
"learning_rate": 1.1155555555555556e-05,
"loss": 0.0349,
"step": 2490
},
{
"epoch": 0.78,
"grad_norm": 0.0062817600555717945,
"learning_rate": 1.1111111111111113e-05,
"loss": 0.055,
"step": 2500
},
{
"epoch": 0.78,
"eval_loss": 0.020550861954689026,
"eval_runtime": 61.9123,
"eval_samples_per_second": 16.152,
"eval_steps_per_second": 16.152,
"step": 2500
},
{
"epoch": 0.79,
"grad_norm": 0.002629642840474844,
"learning_rate": 1.1066666666666669e-05,
"loss": 0.0775,
"step": 2510
},
{
"epoch": 0.79,
"grad_norm": 0.003445986658334732,
"learning_rate": 1.1022222222222224e-05,
"loss": 0.0285,
"step": 2520
},
{
"epoch": 0.79,
"grad_norm": 2.4178996086120605,
"learning_rate": 1.0977777777777779e-05,
"loss": 0.1755,
"step": 2530
},
{
"epoch": 0.8,
"grad_norm": 0.0058800880797207355,
"learning_rate": 1.0933333333333334e-05,
"loss": 0.0055,
"step": 2540
},
{
"epoch": 0.8,
"grad_norm": 0.013078362680971622,
"learning_rate": 1.088888888888889e-05,
"loss": 0.0088,
"step": 2550
},
{
"epoch": 0.8,
"grad_norm": 0.0052205640822649,
"learning_rate": 1.0844444444444446e-05,
"loss": 0.023,
"step": 2560
},
{
"epoch": 0.81,
"grad_norm": 1.2594823837280273,
"learning_rate": 1.0800000000000002e-05,
"loss": 0.0222,
"step": 2570
},
{
"epoch": 0.81,
"grad_norm": 3.3159544467926025,
"learning_rate": 1.0755555555555557e-05,
"loss": 0.0426,
"step": 2580
},
{
"epoch": 0.81,
"grad_norm": 2.103391647338867,
"learning_rate": 1.0711111111111112e-05,
"loss": 0.0457,
"step": 2590
},
{
"epoch": 0.81,
"grad_norm": 0.000918123172596097,
"learning_rate": 1.0666666666666667e-05,
"loss": 0.0391,
"step": 2600
},
{
"epoch": 0.82,
"grad_norm": 0.0012873125961050391,
"learning_rate": 1.0622222222222223e-05,
"loss": 0.0217,
"step": 2610
},
{
"epoch": 0.82,
"grad_norm": 0.002058672485873103,
"learning_rate": 1.0577777777777778e-05,
"loss": 0.0463,
"step": 2620
},
{
"epoch": 0.82,
"grad_norm": 0.027771245688199997,
"learning_rate": 1.0533333333333333e-05,
"loss": 0.0805,
"step": 2630
},
{
"epoch": 0.83,
"grad_norm": 0.0015083320904523134,
"learning_rate": 1.048888888888889e-05,
"loss": 0.05,
"step": 2640
},
{
"epoch": 0.83,
"grad_norm": 2.7888853549957275,
"learning_rate": 1.0444444444444445e-05,
"loss": 0.0752,
"step": 2650
},
{
"epoch": 0.83,
"grad_norm": 3.569105863571167,
"learning_rate": 1.04e-05,
"loss": 0.0585,
"step": 2660
},
{
"epoch": 0.84,
"grad_norm": 0.034804560244083405,
"learning_rate": 1.0355555555555557e-05,
"loss": 0.0113,
"step": 2670
},
{
"epoch": 0.84,
"grad_norm": 0.0017612408846616745,
"learning_rate": 1.0311111111111113e-05,
"loss": 0.0535,
"step": 2680
},
{
"epoch": 0.84,
"grad_norm": 0.0011774456361308694,
"learning_rate": 1.0266666666666668e-05,
"loss": 0.0622,
"step": 2690
},
{
"epoch": 0.85,
"grad_norm": 2.2724449634552,
"learning_rate": 1.0222222222222223e-05,
"loss": 0.0059,
"step": 2700
},
{
"epoch": 0.85,
"grad_norm": 0.8430375456809998,
"learning_rate": 1.0177777777777778e-05,
"loss": 0.043,
"step": 2710
},
{
"epoch": 0.85,
"grad_norm": 2.0912680625915527,
"learning_rate": 1.0133333333333335e-05,
"loss": 0.0356,
"step": 2720
},
{
"epoch": 0.86,
"grad_norm": 0.043006353080272675,
"learning_rate": 1.008888888888889e-05,
"loss": 0.0247,
"step": 2730
},
{
"epoch": 0.86,
"grad_norm": 0.0013607463333755732,
"learning_rate": 1.0044444444444446e-05,
"loss": 0.0115,
"step": 2740
},
{
"epoch": 0.86,
"grad_norm": 0.003235200187191367,
"learning_rate": 1e-05,
"loss": 0.0447,
"step": 2750
},
{
"epoch": 0.86,
"grad_norm": 0.006067329086363316,
"learning_rate": 9.955555555555556e-06,
"loss": 0.049,
"step": 2760
},
{
"epoch": 0.87,
"grad_norm": 0.001446689828298986,
"learning_rate": 9.911111111111113e-06,
"loss": 0.0502,
"step": 2770
},
{
"epoch": 0.87,
"grad_norm": 0.0018873319495469332,
"learning_rate": 9.866666666666668e-06,
"loss": 0.0854,
"step": 2780
},
{
"epoch": 0.87,
"grad_norm": 0.002605983056128025,
"learning_rate": 9.822222222222223e-06,
"loss": 0.0284,
"step": 2790
},
{
"epoch": 0.88,
"grad_norm": 0.002705740975216031,
"learning_rate": 9.777777777777779e-06,
"loss": 0.0639,
"step": 2800
},
{
"epoch": 0.88,
"grad_norm": 0.004726971033960581,
"learning_rate": 9.733333333333334e-06,
"loss": 0.1064,
"step": 2810
},
{
"epoch": 0.88,
"grad_norm": 0.01625528745353222,
"learning_rate": 9.688888888888889e-06,
"loss": 0.0679,
"step": 2820
},
{
"epoch": 0.89,
"grad_norm": 1.4221800565719604,
"learning_rate": 9.644444444444444e-06,
"loss": 0.0504,
"step": 2830
},
{
"epoch": 0.89,
"grad_norm": 1.1785073280334473,
"learning_rate": 9.600000000000001e-06,
"loss": 0.0574,
"step": 2840
},
{
"epoch": 0.89,
"grad_norm": 0.0024502715095877647,
"learning_rate": 9.555555555555556e-06,
"loss": 0.0211,
"step": 2850
},
{
"epoch": 0.9,
"grad_norm": 0.0017725643701851368,
"learning_rate": 9.511111111111112e-06,
"loss": 0.057,
"step": 2860
},
{
"epoch": 0.9,
"grad_norm": 0.0027383696287870407,
"learning_rate": 9.466666666666667e-06,
"loss": 0.1225,
"step": 2870
},
{
"epoch": 0.9,
"grad_norm": 0.00296800397336483,
"learning_rate": 9.422222222222222e-06,
"loss": 0.0167,
"step": 2880
},
{
"epoch": 0.91,
"grad_norm": 0.003540828125551343,
"learning_rate": 9.377777777777779e-06,
"loss": 0.0484,
"step": 2890
},
{
"epoch": 0.91,
"grad_norm": 0.03577937185764313,
"learning_rate": 9.333333333333334e-06,
"loss": 0.0334,
"step": 2900
},
{
"epoch": 0.91,
"grad_norm": 0.0013176521752029657,
"learning_rate": 9.28888888888889e-06,
"loss": 0.0706,
"step": 2910
},
{
"epoch": 0.91,
"grad_norm": 0.0014053646009415388,
"learning_rate": 9.244444444444445e-06,
"loss": 0.0206,
"step": 2920
},
{
"epoch": 0.92,
"grad_norm": 0.002312118886038661,
"learning_rate": 9.200000000000002e-06,
"loss": 0.0398,
"step": 2930
},
{
"epoch": 0.92,
"grad_norm": 0.002495428314432502,
"learning_rate": 9.155555555555557e-06,
"loss": 0.0151,
"step": 2940
},
{
"epoch": 0.92,
"grad_norm": 0.0020435641054064035,
"learning_rate": 9.111111111111112e-06,
"loss": 0.034,
"step": 2950
},
{
"epoch": 0.93,
"grad_norm": 0.0015432636719197035,
"learning_rate": 9.066666666666667e-06,
"loss": 0.0485,
"step": 2960
},
{
"epoch": 0.93,
"grad_norm": 1.6057056188583374,
"learning_rate": 9.022222222222223e-06,
"loss": 0.0566,
"step": 2970
},
{
"epoch": 0.93,
"grad_norm": 0.0020012864843010902,
"learning_rate": 8.977777777777778e-06,
"loss": 0.0012,
"step": 2980
},
{
"epoch": 0.94,
"grad_norm": 0.0008306769304908812,
"learning_rate": 8.933333333333333e-06,
"loss": 0.037,
"step": 2990
},
{
"epoch": 0.94,
"grad_norm": 0.0008226165664382279,
"learning_rate": 8.888888888888888e-06,
"loss": 0.0365,
"step": 3000
},
{
"epoch": 0.94,
"eval_loss": 0.021811991930007935,
"eval_runtime": 61.9881,
"eval_samples_per_second": 16.132,
"eval_steps_per_second": 16.132,
"step": 3000
}
],
"logging_steps": 10,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000,
"total_flos": 4.8306377981952e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}