jdannem6's picture
Uploaded checkpoint-5000
6cad058 verified
raw
history blame
82.8 kB
{
"best_metric": 0.018277771770954132,
"best_model_checkpoint": "runs/deepseek_lora_20240424-122712/checkpoint-2000",
"epoch": 1.5665387326701654,
"eval_steps": 500,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 2.6060585975646973,
"learning_rate": 4.0000000000000003e-07,
"loss": 1.6667,
"step": 10
},
{
"epoch": 0.01,
"grad_norm": 5.726912021636963,
"learning_rate": 8.000000000000001e-07,
"loss": 1.6606,
"step": 20
},
{
"epoch": 0.01,
"grad_norm": 6.3224616050720215,
"learning_rate": 1.2000000000000002e-06,
"loss": 1.6875,
"step": 30
},
{
"epoch": 0.01,
"grad_norm": 4.4585185050964355,
"learning_rate": 1.6000000000000001e-06,
"loss": 1.5512,
"step": 40
},
{
"epoch": 0.02,
"grad_norm": 2.849210023880005,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.598,
"step": 50
},
{
"epoch": 0.02,
"grad_norm": 1.7917169332504272,
"learning_rate": 2.4000000000000003e-06,
"loss": 1.3203,
"step": 60
},
{
"epoch": 0.02,
"grad_norm": 10.056238174438477,
"learning_rate": 2.8000000000000003e-06,
"loss": 1.4914,
"step": 70
},
{
"epoch": 0.03,
"grad_norm": 5.542996406555176,
"learning_rate": 3.2000000000000003e-06,
"loss": 1.3369,
"step": 80
},
{
"epoch": 0.03,
"grad_norm": 1.5463522672653198,
"learning_rate": 3.6000000000000003e-06,
"loss": 1.3185,
"step": 90
},
{
"epoch": 0.03,
"grad_norm": 9.401649475097656,
"learning_rate": 4.000000000000001e-06,
"loss": 1.2827,
"step": 100
},
{
"epoch": 0.03,
"grad_norm": 1.238974928855896,
"learning_rate": 4.4e-06,
"loss": 1.0366,
"step": 110
},
{
"epoch": 0.04,
"grad_norm": 1.2282520532608032,
"learning_rate": 4.800000000000001e-06,
"loss": 1.0715,
"step": 120
},
{
"epoch": 0.04,
"grad_norm": 1.4583073854446411,
"learning_rate": 5.2e-06,
"loss": 0.868,
"step": 130
},
{
"epoch": 0.04,
"grad_norm": 4.3194146156311035,
"learning_rate": 5.600000000000001e-06,
"loss": 0.8281,
"step": 140
},
{
"epoch": 0.05,
"grad_norm": 4.280028820037842,
"learning_rate": 6e-06,
"loss": 0.8666,
"step": 150
},
{
"epoch": 0.05,
"grad_norm": 0.9424476027488708,
"learning_rate": 6.4000000000000006e-06,
"loss": 0.6691,
"step": 160
},
{
"epoch": 0.05,
"grad_norm": 0.9174453616142273,
"learning_rate": 6.800000000000001e-06,
"loss": 0.5405,
"step": 170
},
{
"epoch": 0.06,
"grad_norm": 1.0179359912872314,
"learning_rate": 7.2000000000000005e-06,
"loss": 0.5103,
"step": 180
},
{
"epoch": 0.06,
"grad_norm": 0.4516351521015167,
"learning_rate": 7.600000000000001e-06,
"loss": 0.2679,
"step": 190
},
{
"epoch": 0.06,
"grad_norm": 6.054721355438232,
"learning_rate": 8.000000000000001e-06,
"loss": 0.2634,
"step": 200
},
{
"epoch": 0.07,
"grad_norm": 0.40070173144340515,
"learning_rate": 8.400000000000001e-06,
"loss": 0.2254,
"step": 210
},
{
"epoch": 0.07,
"grad_norm": 0.6031996011734009,
"learning_rate": 8.8e-06,
"loss": 0.1281,
"step": 220
},
{
"epoch": 0.07,
"grad_norm": 3.0129740238189697,
"learning_rate": 9.200000000000002e-06,
"loss": 0.2365,
"step": 230
},
{
"epoch": 0.08,
"grad_norm": 0.6611989140510559,
"learning_rate": 9.600000000000001e-06,
"loss": 0.3255,
"step": 240
},
{
"epoch": 0.08,
"grad_norm": 0.3195549547672272,
"learning_rate": 1e-05,
"loss": 0.3287,
"step": 250
},
{
"epoch": 0.08,
"grad_norm": 0.27022993564605713,
"learning_rate": 1.04e-05,
"loss": 0.1439,
"step": 260
},
{
"epoch": 0.08,
"grad_norm": 4.671290874481201,
"learning_rate": 1.0800000000000002e-05,
"loss": 0.0676,
"step": 270
},
{
"epoch": 0.09,
"grad_norm": 0.2435157746076584,
"learning_rate": 1.1200000000000001e-05,
"loss": 0.2338,
"step": 280
},
{
"epoch": 0.09,
"grad_norm": 0.40632137656211853,
"learning_rate": 1.16e-05,
"loss": 0.1661,
"step": 290
},
{
"epoch": 0.09,
"grad_norm": 0.2910804748535156,
"learning_rate": 1.2e-05,
"loss": 0.1781,
"step": 300
},
{
"epoch": 0.1,
"grad_norm": 7.506131649017334,
"learning_rate": 1.2400000000000002e-05,
"loss": 0.1283,
"step": 310
},
{
"epoch": 0.1,
"grad_norm": 0.04508267343044281,
"learning_rate": 1.2800000000000001e-05,
"loss": 0.1022,
"step": 320
},
{
"epoch": 0.1,
"grad_norm": 0.23127306997776031,
"learning_rate": 1.3200000000000002e-05,
"loss": 0.1243,
"step": 330
},
{
"epoch": 0.11,
"grad_norm": 6.1613383293151855,
"learning_rate": 1.3600000000000002e-05,
"loss": 0.1485,
"step": 340
},
{
"epoch": 0.11,
"grad_norm": 0.023777758702635765,
"learning_rate": 1.4e-05,
"loss": 0.0538,
"step": 350
},
{
"epoch": 0.11,
"grad_norm": 0.014769719913601875,
"learning_rate": 1.4400000000000001e-05,
"loss": 0.0285,
"step": 360
},
{
"epoch": 0.12,
"grad_norm": 5.690178871154785,
"learning_rate": 1.48e-05,
"loss": 0.1325,
"step": 370
},
{
"epoch": 0.12,
"grad_norm": 0.02117346040904522,
"learning_rate": 1.5200000000000002e-05,
"loss": 0.0507,
"step": 380
},
{
"epoch": 0.12,
"grad_norm": 0.06630611419677734,
"learning_rate": 1.5600000000000003e-05,
"loss": 0.199,
"step": 390
},
{
"epoch": 0.13,
"grad_norm": 0.16532257199287415,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.0652,
"step": 400
},
{
"epoch": 0.13,
"grad_norm": 0.009799620136618614,
"learning_rate": 1.64e-05,
"loss": 0.1011,
"step": 410
},
{
"epoch": 0.13,
"grad_norm": 0.12324853241443634,
"learning_rate": 1.6800000000000002e-05,
"loss": 0.0364,
"step": 420
},
{
"epoch": 0.13,
"grad_norm": 0.010026533156633377,
"learning_rate": 1.72e-05,
"loss": 0.1973,
"step": 430
},
{
"epoch": 0.14,
"grad_norm": 0.0071570691652596,
"learning_rate": 1.76e-05,
"loss": 0.0819,
"step": 440
},
{
"epoch": 0.14,
"grad_norm": 0.04149964451789856,
"learning_rate": 1.8e-05,
"loss": 0.1046,
"step": 450
},
{
"epoch": 0.14,
"grad_norm": 3.45990252494812,
"learning_rate": 1.8400000000000003e-05,
"loss": 0.1011,
"step": 460
},
{
"epoch": 0.15,
"grad_norm": 0.00734300771728158,
"learning_rate": 1.88e-05,
"loss": 0.223,
"step": 470
},
{
"epoch": 0.15,
"grad_norm": 0.01419814396649599,
"learning_rate": 1.9200000000000003e-05,
"loss": 0.1178,
"step": 480
},
{
"epoch": 0.15,
"grad_norm": 0.10111651569604874,
"learning_rate": 1.9600000000000002e-05,
"loss": 0.1416,
"step": 490
},
{
"epoch": 0.16,
"grad_norm": 3.1214537620544434,
"learning_rate": 2e-05,
"loss": 0.1258,
"step": 500
},
{
"epoch": 0.16,
"eval_loss": 0.05367860943078995,
"eval_runtime": 62.0108,
"eval_samples_per_second": 16.126,
"eval_steps_per_second": 16.126,
"step": 500
},
{
"epoch": 0.16,
"grad_norm": 0.013164438307285309,
"learning_rate": 1.9955555555555557e-05,
"loss": 0.0268,
"step": 510
},
{
"epoch": 0.16,
"grad_norm": 0.15392057597637177,
"learning_rate": 1.9911111111111112e-05,
"loss": 0.0843,
"step": 520
},
{
"epoch": 0.17,
"grad_norm": 0.006180985830724239,
"learning_rate": 1.9866666666666667e-05,
"loss": 0.1384,
"step": 530
},
{
"epoch": 0.17,
"grad_norm": 0.2702454924583435,
"learning_rate": 1.9822222222222226e-05,
"loss": 0.0956,
"step": 540
},
{
"epoch": 0.17,
"grad_norm": 1.5135071277618408,
"learning_rate": 1.977777777777778e-05,
"loss": 0.1615,
"step": 550
},
{
"epoch": 0.18,
"grad_norm": 0.005121675785630941,
"learning_rate": 1.9733333333333336e-05,
"loss": 0.1201,
"step": 560
},
{
"epoch": 0.18,
"grad_norm": 0.09728775173425674,
"learning_rate": 1.968888888888889e-05,
"loss": 0.1112,
"step": 570
},
{
"epoch": 0.18,
"grad_norm": 4.736963748931885,
"learning_rate": 1.9644444444444447e-05,
"loss": 0.1546,
"step": 580
},
{
"epoch": 0.18,
"grad_norm": 0.006898015271872282,
"learning_rate": 1.9600000000000002e-05,
"loss": 0.1116,
"step": 590
},
{
"epoch": 0.19,
"grad_norm": 0.005388608202338219,
"learning_rate": 1.9555555555555557e-05,
"loss": 0.1597,
"step": 600
},
{
"epoch": 0.19,
"grad_norm": 0.0133949751034379,
"learning_rate": 1.9511111111111113e-05,
"loss": 0.0799,
"step": 610
},
{
"epoch": 0.19,
"grad_norm": 0.11226101964712143,
"learning_rate": 1.9466666666666668e-05,
"loss": 0.0448,
"step": 620
},
{
"epoch": 0.2,
"grad_norm": 0.011205198243260384,
"learning_rate": 1.9422222222222223e-05,
"loss": 0.0739,
"step": 630
},
{
"epoch": 0.2,
"grad_norm": 0.007312687113881111,
"learning_rate": 1.9377777777777778e-05,
"loss": 0.0779,
"step": 640
},
{
"epoch": 0.2,
"grad_norm": 0.006550287362188101,
"learning_rate": 1.9333333333333333e-05,
"loss": 0.2039,
"step": 650
},
{
"epoch": 0.21,
"grad_norm": 1.961844563484192,
"learning_rate": 1.928888888888889e-05,
"loss": 0.0618,
"step": 660
},
{
"epoch": 0.21,
"grad_norm": 0.012318034656345844,
"learning_rate": 1.9244444444444444e-05,
"loss": 0.1166,
"step": 670
},
{
"epoch": 0.21,
"grad_norm": 0.08778905868530273,
"learning_rate": 1.9200000000000003e-05,
"loss": 0.0822,
"step": 680
},
{
"epoch": 0.22,
"grad_norm": 0.010623461566865444,
"learning_rate": 1.9155555555555558e-05,
"loss": 0.095,
"step": 690
},
{
"epoch": 0.22,
"grad_norm": 0.0858495905995369,
"learning_rate": 1.9111111111111113e-05,
"loss": 0.0468,
"step": 700
},
{
"epoch": 0.22,
"grad_norm": 1.7685880661010742,
"learning_rate": 1.9066666666666668e-05,
"loss": 0.0288,
"step": 710
},
{
"epoch": 0.23,
"grad_norm": 0.032682083547115326,
"learning_rate": 1.9022222222222223e-05,
"loss": 0.1024,
"step": 720
},
{
"epoch": 0.23,
"grad_norm": 0.010010900907218456,
"learning_rate": 1.897777777777778e-05,
"loss": 0.0129,
"step": 730
},
{
"epoch": 0.23,
"grad_norm": 0.037693481892347336,
"learning_rate": 1.8933333333333334e-05,
"loss": 0.1217,
"step": 740
},
{
"epoch": 0.23,
"grad_norm": 2.2111008167266846,
"learning_rate": 1.888888888888889e-05,
"loss": 0.1206,
"step": 750
},
{
"epoch": 0.24,
"grad_norm": 1.4000599384307861,
"learning_rate": 1.8844444444444444e-05,
"loss": 0.1214,
"step": 760
},
{
"epoch": 0.24,
"grad_norm": 4.3877034187316895,
"learning_rate": 1.88e-05,
"loss": 0.0408,
"step": 770
},
{
"epoch": 0.24,
"grad_norm": 4.164296627044678,
"learning_rate": 1.8755555555555558e-05,
"loss": 0.1315,
"step": 780
},
{
"epoch": 0.25,
"grad_norm": 0.07969211786985397,
"learning_rate": 1.8711111111111113e-05,
"loss": 0.0149,
"step": 790
},
{
"epoch": 0.25,
"grad_norm": 3.9340951442718506,
"learning_rate": 1.866666666666667e-05,
"loss": 0.1169,
"step": 800
},
{
"epoch": 0.25,
"grad_norm": 0.006517359986901283,
"learning_rate": 1.8622222222222224e-05,
"loss": 0.0517,
"step": 810
},
{
"epoch": 0.26,
"grad_norm": 0.008951540105044842,
"learning_rate": 1.857777777777778e-05,
"loss": 0.0935,
"step": 820
},
{
"epoch": 0.26,
"grad_norm": 0.03655437007546425,
"learning_rate": 1.8533333333333334e-05,
"loss": 0.0883,
"step": 830
},
{
"epoch": 0.26,
"grad_norm": 0.0346522182226181,
"learning_rate": 1.848888888888889e-05,
"loss": 0.0479,
"step": 840
},
{
"epoch": 0.27,
"grad_norm": 0.006223162170499563,
"learning_rate": 1.8444444444444448e-05,
"loss": 0.0075,
"step": 850
},
{
"epoch": 0.27,
"grad_norm": 0.004037676844745874,
"learning_rate": 1.8400000000000003e-05,
"loss": 0.0333,
"step": 860
},
{
"epoch": 0.27,
"grad_norm": 0.004569903016090393,
"learning_rate": 1.835555555555556e-05,
"loss": 0.0536,
"step": 870
},
{
"epoch": 0.28,
"grad_norm": 0.005509174894541502,
"learning_rate": 1.8311111111111114e-05,
"loss": 0.1528,
"step": 880
},
{
"epoch": 0.28,
"grad_norm": 0.007597978692501783,
"learning_rate": 1.826666666666667e-05,
"loss": 0.0052,
"step": 890
},
{
"epoch": 0.28,
"grad_norm": 4.2217936515808105,
"learning_rate": 1.8222222222222224e-05,
"loss": 0.0776,
"step": 900
},
{
"epoch": 0.29,
"grad_norm": 0.004342419560998678,
"learning_rate": 1.817777777777778e-05,
"loss": 0.0552,
"step": 910
},
{
"epoch": 0.29,
"grad_norm": 0.0696156695485115,
"learning_rate": 1.8133333333333335e-05,
"loss": 0.0412,
"step": 920
},
{
"epoch": 0.29,
"grad_norm": 0.022448547184467316,
"learning_rate": 1.808888888888889e-05,
"loss": 0.0109,
"step": 930
},
{
"epoch": 0.29,
"grad_norm": 4.305741310119629,
"learning_rate": 1.8044444444444445e-05,
"loss": 0.0428,
"step": 940
},
{
"epoch": 0.3,
"grad_norm": 3.344078779220581,
"learning_rate": 1.8e-05,
"loss": 0.0651,
"step": 950
},
{
"epoch": 0.3,
"grad_norm": 2.585327386856079,
"learning_rate": 1.7955555555555556e-05,
"loss": 0.1474,
"step": 960
},
{
"epoch": 0.3,
"grad_norm": 2.2476861476898193,
"learning_rate": 1.791111111111111e-05,
"loss": 0.0892,
"step": 970
},
{
"epoch": 0.31,
"grad_norm": 0.06212488189339638,
"learning_rate": 1.7866666666666666e-05,
"loss": 0.0659,
"step": 980
},
{
"epoch": 0.31,
"grad_norm": 0.012895594350993633,
"learning_rate": 1.782222222222222e-05,
"loss": 0.0915,
"step": 990
},
{
"epoch": 0.31,
"grad_norm": 0.1314150094985962,
"learning_rate": 1.7777777777777777e-05,
"loss": 0.0456,
"step": 1000
},
{
"epoch": 0.31,
"eval_loss": 0.02997952327132225,
"eval_runtime": 62.0027,
"eval_samples_per_second": 16.128,
"eval_steps_per_second": 16.128,
"step": 1000
},
{
"epoch": 0.32,
"grad_norm": 0.01634177938103676,
"learning_rate": 1.7733333333333335e-05,
"loss": 0.0424,
"step": 1010
},
{
"epoch": 0.32,
"grad_norm": 0.009437276981770992,
"learning_rate": 1.768888888888889e-05,
"loss": 0.0663,
"step": 1020
},
{
"epoch": 0.32,
"grad_norm": 0.04742557182908058,
"learning_rate": 1.7644444444444446e-05,
"loss": 0.1235,
"step": 1030
},
{
"epoch": 0.33,
"grad_norm": 1.101121187210083,
"learning_rate": 1.76e-05,
"loss": 0.0574,
"step": 1040
},
{
"epoch": 0.33,
"grad_norm": 3.8775153160095215,
"learning_rate": 1.7555555555555556e-05,
"loss": 0.0916,
"step": 1050
},
{
"epoch": 0.33,
"grad_norm": 0.0064536286517977715,
"learning_rate": 1.751111111111111e-05,
"loss": 0.0363,
"step": 1060
},
{
"epoch": 0.34,
"grad_norm": 0.004769014660269022,
"learning_rate": 1.7466666666666667e-05,
"loss": 0.013,
"step": 1070
},
{
"epoch": 0.34,
"grad_norm": 1.7518908977508545,
"learning_rate": 1.7422222222222222e-05,
"loss": 0.082,
"step": 1080
},
{
"epoch": 0.34,
"grad_norm": 0.07153653353452682,
"learning_rate": 1.737777777777778e-05,
"loss": 0.0388,
"step": 1090
},
{
"epoch": 0.34,
"grad_norm": 0.015164585784077644,
"learning_rate": 1.7333333333333336e-05,
"loss": 0.0613,
"step": 1100
},
{
"epoch": 0.35,
"grad_norm": 3.7508952617645264,
"learning_rate": 1.728888888888889e-05,
"loss": 0.0989,
"step": 1110
},
{
"epoch": 0.35,
"grad_norm": 0.025457441806793213,
"learning_rate": 1.7244444444444446e-05,
"loss": 0.0268,
"step": 1120
},
{
"epoch": 0.35,
"grad_norm": 4.794126033782959,
"learning_rate": 1.72e-05,
"loss": 0.0741,
"step": 1130
},
{
"epoch": 0.36,
"grad_norm": 4.877151012420654,
"learning_rate": 1.7155555555555557e-05,
"loss": 0.0653,
"step": 1140
},
{
"epoch": 0.36,
"grad_norm": 0.009955652058124542,
"learning_rate": 1.7111111111111112e-05,
"loss": 0.1041,
"step": 1150
},
{
"epoch": 0.36,
"grad_norm": 0.006371030583977699,
"learning_rate": 1.706666666666667e-05,
"loss": 0.0365,
"step": 1160
},
{
"epoch": 0.37,
"grad_norm": 0.005713317077606916,
"learning_rate": 1.7022222222222226e-05,
"loss": 0.0402,
"step": 1170
},
{
"epoch": 0.37,
"grad_norm": 0.0035403750371187925,
"learning_rate": 1.697777777777778e-05,
"loss": 0.0208,
"step": 1180
},
{
"epoch": 0.37,
"grad_norm": 1.2340508699417114,
"learning_rate": 1.6933333333333336e-05,
"loss": 0.0507,
"step": 1190
},
{
"epoch": 0.38,
"grad_norm": 0.01031398307532072,
"learning_rate": 1.688888888888889e-05,
"loss": 0.1464,
"step": 1200
},
{
"epoch": 0.38,
"grad_norm": 1.4290913343429565,
"learning_rate": 1.6844444444444447e-05,
"loss": 0.01,
"step": 1210
},
{
"epoch": 0.38,
"grad_norm": 1.9291528463363647,
"learning_rate": 1.6800000000000002e-05,
"loss": 0.0077,
"step": 1220
},
{
"epoch": 0.39,
"grad_norm": 0.049461908638477325,
"learning_rate": 1.6755555555555557e-05,
"loss": 0.034,
"step": 1230
},
{
"epoch": 0.39,
"grad_norm": 0.004318055231124163,
"learning_rate": 1.6711111111111112e-05,
"loss": 0.0612,
"step": 1240
},
{
"epoch": 0.39,
"grad_norm": 0.003021540120244026,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.0135,
"step": 1250
},
{
"epoch": 0.39,
"grad_norm": 0.04815623164176941,
"learning_rate": 1.6622222222222223e-05,
"loss": 0.0884,
"step": 1260
},
{
"epoch": 0.4,
"grad_norm": 2.4603257179260254,
"learning_rate": 1.6577777777777778e-05,
"loss": 0.0149,
"step": 1270
},
{
"epoch": 0.4,
"grad_norm": 0.002652758965268731,
"learning_rate": 1.6533333333333333e-05,
"loss": 0.0239,
"step": 1280
},
{
"epoch": 0.4,
"grad_norm": 0.053305696696043015,
"learning_rate": 1.648888888888889e-05,
"loss": 0.0615,
"step": 1290
},
{
"epoch": 0.41,
"grad_norm": 0.0942620038986206,
"learning_rate": 1.6444444444444444e-05,
"loss": 0.0789,
"step": 1300
},
{
"epoch": 0.41,
"grad_norm": 2.3062126636505127,
"learning_rate": 1.64e-05,
"loss": 0.0526,
"step": 1310
},
{
"epoch": 0.41,
"grad_norm": 0.004340393468737602,
"learning_rate": 1.6355555555555557e-05,
"loss": 0.0638,
"step": 1320
},
{
"epoch": 0.42,
"grad_norm": 2.277503252029419,
"learning_rate": 1.6311111111111113e-05,
"loss": 0.0512,
"step": 1330
},
{
"epoch": 0.42,
"grad_norm": 5.073038578033447,
"learning_rate": 1.6266666666666668e-05,
"loss": 0.1041,
"step": 1340
},
{
"epoch": 0.42,
"grad_norm": 0.0030644198413938284,
"learning_rate": 1.6222222222222223e-05,
"loss": 0.0016,
"step": 1350
},
{
"epoch": 0.43,
"grad_norm": 0.4875153601169586,
"learning_rate": 1.617777777777778e-05,
"loss": 0.0946,
"step": 1360
},
{
"epoch": 0.43,
"grad_norm": 0.003334318520501256,
"learning_rate": 1.6133333333333334e-05,
"loss": 0.0548,
"step": 1370
},
{
"epoch": 0.43,
"grad_norm": 2.697582960128784,
"learning_rate": 1.608888888888889e-05,
"loss": 0.0628,
"step": 1380
},
{
"epoch": 0.44,
"grad_norm": 0.021118061617016792,
"learning_rate": 1.6044444444444444e-05,
"loss": 0.0537,
"step": 1390
},
{
"epoch": 0.44,
"grad_norm": 0.007942981086671352,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.057,
"step": 1400
},
{
"epoch": 0.44,
"grad_norm": 0.003399541135877371,
"learning_rate": 1.5955555555555558e-05,
"loss": 0.0089,
"step": 1410
},
{
"epoch": 0.44,
"grad_norm": 0.4597236216068268,
"learning_rate": 1.5911111111111113e-05,
"loss": 0.079,
"step": 1420
},
{
"epoch": 0.45,
"grad_norm": 1.0684938430786133,
"learning_rate": 1.586666666666667e-05,
"loss": 0.0149,
"step": 1430
},
{
"epoch": 0.45,
"grad_norm": 3.3166937828063965,
"learning_rate": 1.5822222222222224e-05,
"loss": 0.1189,
"step": 1440
},
{
"epoch": 0.45,
"grad_norm": 5.2161736488342285,
"learning_rate": 1.577777777777778e-05,
"loss": 0.1408,
"step": 1450
},
{
"epoch": 0.46,
"grad_norm": 0.3570309281349182,
"learning_rate": 1.5733333333333334e-05,
"loss": 0.0247,
"step": 1460
},
{
"epoch": 0.46,
"grad_norm": 0.005081634968519211,
"learning_rate": 1.5688888888888893e-05,
"loss": 0.0231,
"step": 1470
},
{
"epoch": 0.46,
"grad_norm": 4.267640590667725,
"learning_rate": 1.5644444444444448e-05,
"loss": 0.0928,
"step": 1480
},
{
"epoch": 0.47,
"grad_norm": 0.014545031823217869,
"learning_rate": 1.5600000000000003e-05,
"loss": 0.0561,
"step": 1490
},
{
"epoch": 0.47,
"grad_norm": 2.007899522781372,
"learning_rate": 1.555555555555556e-05,
"loss": 0.058,
"step": 1500
},
{
"epoch": 0.47,
"eval_loss": 0.021991439163684845,
"eval_runtime": 61.9363,
"eval_samples_per_second": 16.146,
"eval_steps_per_second": 16.146,
"step": 1500
},
{
"epoch": 0.47,
"grad_norm": 0.18892046809196472,
"learning_rate": 1.5511111111111114e-05,
"loss": 0.0541,
"step": 1510
},
{
"epoch": 0.48,
"grad_norm": 0.9835280179977417,
"learning_rate": 1.546666666666667e-05,
"loss": 0.0349,
"step": 1520
},
{
"epoch": 0.48,
"grad_norm": 0.004944021347910166,
"learning_rate": 1.5422222222222224e-05,
"loss": 0.0688,
"step": 1530
},
{
"epoch": 0.48,
"grad_norm": 0.0025414193514734507,
"learning_rate": 1.537777777777778e-05,
"loss": 0.0374,
"step": 1540
},
{
"epoch": 0.49,
"grad_norm": 0.047879841178655624,
"learning_rate": 1.5333333333333334e-05,
"loss": 0.0867,
"step": 1550
},
{
"epoch": 0.49,
"grad_norm": 0.12313953042030334,
"learning_rate": 1.528888888888889e-05,
"loss": 0.0142,
"step": 1560
},
{
"epoch": 0.49,
"grad_norm": 0.0055120717734098434,
"learning_rate": 1.5244444444444447e-05,
"loss": 0.0332,
"step": 1570
},
{
"epoch": 0.5,
"grad_norm": 0.0030517149716615677,
"learning_rate": 1.5200000000000002e-05,
"loss": 0.0739,
"step": 1580
},
{
"epoch": 0.5,
"grad_norm": 0.04954369366168976,
"learning_rate": 1.5155555555555557e-05,
"loss": 0.0609,
"step": 1590
},
{
"epoch": 0.5,
"grad_norm": 2.16231369972229,
"learning_rate": 1.5111111111111112e-05,
"loss": 0.084,
"step": 1600
},
{
"epoch": 0.5,
"grad_norm": 0.03328130766749382,
"learning_rate": 1.5066666666666668e-05,
"loss": 0.0561,
"step": 1610
},
{
"epoch": 0.51,
"grad_norm": 0.3883110582828522,
"learning_rate": 1.5022222222222223e-05,
"loss": 0.065,
"step": 1620
},
{
"epoch": 0.51,
"grad_norm": 0.007501136511564255,
"learning_rate": 1.497777777777778e-05,
"loss": 0.0645,
"step": 1630
},
{
"epoch": 0.51,
"grad_norm": 0.0025652372278273106,
"learning_rate": 1.4933333333333335e-05,
"loss": 0.1242,
"step": 1640
},
{
"epoch": 0.52,
"grad_norm": 2.322174549102783,
"learning_rate": 1.488888888888889e-05,
"loss": 0.0639,
"step": 1650
},
{
"epoch": 0.52,
"grad_norm": 3.1910171508789062,
"learning_rate": 1.4844444444444445e-05,
"loss": 0.1048,
"step": 1660
},
{
"epoch": 0.52,
"grad_norm": 0.00425474438816309,
"learning_rate": 1.48e-05,
"loss": 0.0244,
"step": 1670
},
{
"epoch": 0.53,
"grad_norm": 4.180226802825928,
"learning_rate": 1.4755555555555556e-05,
"loss": 0.0874,
"step": 1680
},
{
"epoch": 0.53,
"grad_norm": 1.1955620050430298,
"learning_rate": 1.4711111111111111e-05,
"loss": 0.0371,
"step": 1690
},
{
"epoch": 0.53,
"grad_norm": 0.010523403063416481,
"learning_rate": 1.4666666666666666e-05,
"loss": 0.0441,
"step": 1700
},
{
"epoch": 0.54,
"grad_norm": 0.3403013348579407,
"learning_rate": 1.4622222222222225e-05,
"loss": 0.0758,
"step": 1710
},
{
"epoch": 0.54,
"grad_norm": 0.046740252524614334,
"learning_rate": 1.457777777777778e-05,
"loss": 0.1069,
"step": 1720
},
{
"epoch": 0.54,
"grad_norm": 0.1384706348180771,
"learning_rate": 1.4533333333333335e-05,
"loss": 0.0656,
"step": 1730
},
{
"epoch": 0.55,
"grad_norm": 1.4094263315200806,
"learning_rate": 1.448888888888889e-05,
"loss": 0.0255,
"step": 1740
},
{
"epoch": 0.55,
"grad_norm": 0.055873990058898926,
"learning_rate": 1.4444444444444446e-05,
"loss": 0.0181,
"step": 1750
},
{
"epoch": 0.55,
"grad_norm": 0.002195443492382765,
"learning_rate": 1.4400000000000001e-05,
"loss": 0.0004,
"step": 1760
},
{
"epoch": 0.55,
"grad_norm": 0.004009276628494263,
"learning_rate": 1.4355555555555556e-05,
"loss": 0.0724,
"step": 1770
},
{
"epoch": 0.56,
"grad_norm": 2.4936776161193848,
"learning_rate": 1.4311111111111111e-05,
"loss": 0.0822,
"step": 1780
},
{
"epoch": 0.56,
"grad_norm": 1.6697956323623657,
"learning_rate": 1.4266666666666668e-05,
"loss": 0.0041,
"step": 1790
},
{
"epoch": 0.56,
"grad_norm": 2.3623549938201904,
"learning_rate": 1.4222222222222224e-05,
"loss": 0.0805,
"step": 1800
},
{
"epoch": 0.57,
"grad_norm": 3.6882166862487793,
"learning_rate": 1.4177777777777779e-05,
"loss": 0.0353,
"step": 1810
},
{
"epoch": 0.57,
"grad_norm": 2.8029565811157227,
"learning_rate": 1.4133333333333334e-05,
"loss": 0.0666,
"step": 1820
},
{
"epoch": 0.57,
"grad_norm": 0.0027399081736803055,
"learning_rate": 1.408888888888889e-05,
"loss": 0.0129,
"step": 1830
},
{
"epoch": 0.58,
"grad_norm": 0.002227638615295291,
"learning_rate": 1.4044444444444445e-05,
"loss": 0.0644,
"step": 1840
},
{
"epoch": 0.58,
"grad_norm": 0.0038109265733510256,
"learning_rate": 1.4e-05,
"loss": 0.0554,
"step": 1850
},
{
"epoch": 0.58,
"grad_norm": 0.23026524484157562,
"learning_rate": 1.3955555555555558e-05,
"loss": 0.0513,
"step": 1860
},
{
"epoch": 0.59,
"grad_norm": 0.003622630378231406,
"learning_rate": 1.3911111111111114e-05,
"loss": 0.0595,
"step": 1870
},
{
"epoch": 0.59,
"grad_norm": 0.004487840924412012,
"learning_rate": 1.3866666666666669e-05,
"loss": 0.0507,
"step": 1880
},
{
"epoch": 0.59,
"grad_norm": 0.0032237153500318527,
"learning_rate": 1.3822222222222224e-05,
"loss": 0.0246,
"step": 1890
},
{
"epoch": 0.6,
"grad_norm": 2.904846429824829,
"learning_rate": 1.377777777777778e-05,
"loss": 0.041,
"step": 1900
},
{
"epoch": 0.6,
"grad_norm": 2.9861977100372314,
"learning_rate": 1.3733333333333335e-05,
"loss": 0.0529,
"step": 1910
},
{
"epoch": 0.6,
"grad_norm": 0.002036773832514882,
"learning_rate": 1.368888888888889e-05,
"loss": 0.0515,
"step": 1920
},
{
"epoch": 0.6,
"grad_norm": 2.882114887237549,
"learning_rate": 1.3644444444444445e-05,
"loss": 0.0437,
"step": 1930
},
{
"epoch": 0.61,
"grad_norm": 3.0705764293670654,
"learning_rate": 1.3600000000000002e-05,
"loss": 0.0203,
"step": 1940
},
{
"epoch": 0.61,
"grad_norm": 0.003252115799114108,
"learning_rate": 1.3555555555555557e-05,
"loss": 0.0288,
"step": 1950
},
{
"epoch": 0.61,
"grad_norm": 0.002082700841128826,
"learning_rate": 1.3511111111111112e-05,
"loss": 0.0737,
"step": 1960
},
{
"epoch": 0.62,
"grad_norm": 0.004269629716873169,
"learning_rate": 1.3466666666666668e-05,
"loss": 0.0283,
"step": 1970
},
{
"epoch": 0.62,
"grad_norm": 0.004804402124136686,
"learning_rate": 1.3422222222222223e-05,
"loss": 0.0769,
"step": 1980
},
{
"epoch": 0.62,
"grad_norm": 0.006747941020876169,
"learning_rate": 1.3377777777777778e-05,
"loss": 0.0169,
"step": 1990
},
{
"epoch": 0.63,
"grad_norm": 0.00186560966540128,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.0346,
"step": 2000
},
{
"epoch": 0.63,
"eval_loss": 0.018277771770954132,
"eval_runtime": 61.9406,
"eval_samples_per_second": 16.144,
"eval_steps_per_second": 16.144,
"step": 2000
},
{
"epoch": 0.63,
"grad_norm": 0.0024101065937429667,
"learning_rate": 1.3288888888888889e-05,
"loss": 0.047,
"step": 2010
},
{
"epoch": 0.63,
"grad_norm": 0.013266036286950111,
"learning_rate": 1.3244444444444447e-05,
"loss": 0.0304,
"step": 2020
},
{
"epoch": 0.64,
"grad_norm": 0.500153660774231,
"learning_rate": 1.3200000000000002e-05,
"loss": 0.0443,
"step": 2030
},
{
"epoch": 0.64,
"grad_norm": 0.0013359179720282555,
"learning_rate": 1.3155555555555558e-05,
"loss": 0.0002,
"step": 2040
},
{
"epoch": 0.64,
"grad_norm": 1.1259698867797852,
"learning_rate": 1.3111111111111113e-05,
"loss": 0.0382,
"step": 2050
},
{
"epoch": 0.65,
"grad_norm": 0.009357116185128689,
"learning_rate": 1.3066666666666668e-05,
"loss": 0.0624,
"step": 2060
},
{
"epoch": 0.65,
"grad_norm": 0.1253252774477005,
"learning_rate": 1.3022222222222223e-05,
"loss": 0.0734,
"step": 2070
},
{
"epoch": 0.65,
"grad_norm": 0.0024754456244409084,
"learning_rate": 1.2977777777777779e-05,
"loss": 0.0683,
"step": 2080
},
{
"epoch": 0.65,
"grad_norm": 1.6212913990020752,
"learning_rate": 1.2933333333333334e-05,
"loss": 0.0467,
"step": 2090
},
{
"epoch": 0.66,
"grad_norm": 1.4906156063079834,
"learning_rate": 1.288888888888889e-05,
"loss": 0.1055,
"step": 2100
},
{
"epoch": 0.66,
"grad_norm": 0.005489406641572714,
"learning_rate": 1.2844444444444446e-05,
"loss": 0.0425,
"step": 2110
},
{
"epoch": 0.66,
"grad_norm": 0.018557880073785782,
"learning_rate": 1.2800000000000001e-05,
"loss": 0.0497,
"step": 2120
},
{
"epoch": 0.67,
"grad_norm": 0.00175235525239259,
"learning_rate": 1.2755555555555556e-05,
"loss": 0.0517,
"step": 2130
},
{
"epoch": 0.67,
"grad_norm": 0.002169775078073144,
"learning_rate": 1.2711111111111112e-05,
"loss": 0.038,
"step": 2140
},
{
"epoch": 0.67,
"grad_norm": 0.004229371901601553,
"learning_rate": 1.2666666666666667e-05,
"loss": 0.0897,
"step": 2150
},
{
"epoch": 0.68,
"grad_norm": 0.0044867489486932755,
"learning_rate": 1.2622222222222222e-05,
"loss": 0.092,
"step": 2160
},
{
"epoch": 0.68,
"grad_norm": 0.00232448847964406,
"learning_rate": 1.257777777777778e-05,
"loss": 0.0474,
"step": 2170
},
{
"epoch": 0.68,
"grad_norm": 0.10797467082738876,
"learning_rate": 1.2533333333333336e-05,
"loss": 0.0065,
"step": 2180
},
{
"epoch": 0.69,
"grad_norm": 0.001698866835795343,
"learning_rate": 1.2488888888888891e-05,
"loss": 0.1059,
"step": 2190
},
{
"epoch": 0.69,
"grad_norm": 0.0014356509782373905,
"learning_rate": 1.2444444444444446e-05,
"loss": 0.0352,
"step": 2200
},
{
"epoch": 0.69,
"grad_norm": 0.001666803378611803,
"learning_rate": 1.2400000000000002e-05,
"loss": 0.0723,
"step": 2210
},
{
"epoch": 0.7,
"grad_norm": 1.3424423933029175,
"learning_rate": 1.2355555555555557e-05,
"loss": 0.029,
"step": 2220
},
{
"epoch": 0.7,
"grad_norm": 0.0017589009366929531,
"learning_rate": 1.2311111111111112e-05,
"loss": 0.0261,
"step": 2230
},
{
"epoch": 0.7,
"grad_norm": 2.2947793006896973,
"learning_rate": 1.2266666666666667e-05,
"loss": 0.1309,
"step": 2240
},
{
"epoch": 0.7,
"grad_norm": 2.9271962642669678,
"learning_rate": 1.2222222222222224e-05,
"loss": 0.062,
"step": 2250
},
{
"epoch": 0.71,
"grad_norm": 0.006457278039306402,
"learning_rate": 1.217777777777778e-05,
"loss": 0.0099,
"step": 2260
},
{
"epoch": 0.71,
"grad_norm": 1.2980073690414429,
"learning_rate": 1.2133333333333335e-05,
"loss": 0.0846,
"step": 2270
},
{
"epoch": 0.71,
"grad_norm": 0.0023306766524910927,
"learning_rate": 1.208888888888889e-05,
"loss": 0.028,
"step": 2280
},
{
"epoch": 0.72,
"grad_norm": 0.0022570204455405474,
"learning_rate": 1.2044444444444445e-05,
"loss": 0.0283,
"step": 2290
},
{
"epoch": 0.72,
"grad_norm": 0.0026164520531892776,
"learning_rate": 1.2e-05,
"loss": 0.0502,
"step": 2300
},
{
"epoch": 0.72,
"grad_norm": 0.0019713249057531357,
"learning_rate": 1.1955555555555556e-05,
"loss": 0.0354,
"step": 2310
},
{
"epoch": 0.73,
"grad_norm": 0.00487458985298872,
"learning_rate": 1.191111111111111e-05,
"loss": 0.0579,
"step": 2320
},
{
"epoch": 0.73,
"grad_norm": 0.002555105835199356,
"learning_rate": 1.186666666666667e-05,
"loss": 0.0566,
"step": 2330
},
{
"epoch": 0.73,
"grad_norm": 0.28949007391929626,
"learning_rate": 1.1822222222222225e-05,
"loss": 0.0149,
"step": 2340
},
{
"epoch": 0.74,
"grad_norm": 0.5910075902938843,
"learning_rate": 1.177777777777778e-05,
"loss": 0.075,
"step": 2350
},
{
"epoch": 0.74,
"grad_norm": 0.001737405196763575,
"learning_rate": 1.1733333333333335e-05,
"loss": 0.0469,
"step": 2360
},
{
"epoch": 0.74,
"grad_norm": 5.248105049133301,
"learning_rate": 1.168888888888889e-05,
"loss": 0.0715,
"step": 2370
},
{
"epoch": 0.75,
"grad_norm": 0.0018054692773148417,
"learning_rate": 1.1644444444444446e-05,
"loss": 0.0354,
"step": 2380
},
{
"epoch": 0.75,
"grad_norm": 0.001644105650484562,
"learning_rate": 1.16e-05,
"loss": 0.0321,
"step": 2390
},
{
"epoch": 0.75,
"grad_norm": 0.001677204272709787,
"learning_rate": 1.1555555555555556e-05,
"loss": 0.0183,
"step": 2400
},
{
"epoch": 0.76,
"grad_norm": 3.1084885597229004,
"learning_rate": 1.1511111111111113e-05,
"loss": 0.029,
"step": 2410
},
{
"epoch": 0.76,
"grad_norm": 0.007074132561683655,
"learning_rate": 1.1466666666666668e-05,
"loss": 0.0525,
"step": 2420
},
{
"epoch": 0.76,
"grad_norm": 0.0032486789859831333,
"learning_rate": 1.1422222222222223e-05,
"loss": 0.0338,
"step": 2430
},
{
"epoch": 0.76,
"grad_norm": 2.797699213027954,
"learning_rate": 1.1377777777777779e-05,
"loss": 0.1017,
"step": 2440
},
{
"epoch": 0.77,
"grad_norm": 1.977303385734558,
"learning_rate": 1.1333333333333334e-05,
"loss": 0.061,
"step": 2450
},
{
"epoch": 0.77,
"grad_norm": 4.702297210693359,
"learning_rate": 1.1288888888888889e-05,
"loss": 0.0382,
"step": 2460
},
{
"epoch": 0.77,
"grad_norm": 0.05130983144044876,
"learning_rate": 1.1244444444444444e-05,
"loss": 0.0273,
"step": 2470
},
{
"epoch": 0.78,
"grad_norm": 0.001360241905786097,
"learning_rate": 1.1200000000000001e-05,
"loss": 0.0615,
"step": 2480
},
{
"epoch": 0.78,
"grad_norm": 0.65842205286026,
"learning_rate": 1.1155555555555556e-05,
"loss": 0.0349,
"step": 2490
},
{
"epoch": 0.78,
"grad_norm": 0.0062817600555717945,
"learning_rate": 1.1111111111111113e-05,
"loss": 0.055,
"step": 2500
},
{
"epoch": 0.78,
"eval_loss": 0.020550861954689026,
"eval_runtime": 61.9123,
"eval_samples_per_second": 16.152,
"eval_steps_per_second": 16.152,
"step": 2500
},
{
"epoch": 0.79,
"grad_norm": 0.002629642840474844,
"learning_rate": 1.1066666666666669e-05,
"loss": 0.0775,
"step": 2510
},
{
"epoch": 0.79,
"grad_norm": 0.003445986658334732,
"learning_rate": 1.1022222222222224e-05,
"loss": 0.0285,
"step": 2520
},
{
"epoch": 0.79,
"grad_norm": 2.4178996086120605,
"learning_rate": 1.0977777777777779e-05,
"loss": 0.1755,
"step": 2530
},
{
"epoch": 0.8,
"grad_norm": 0.0058800880797207355,
"learning_rate": 1.0933333333333334e-05,
"loss": 0.0055,
"step": 2540
},
{
"epoch": 0.8,
"grad_norm": 0.013078362680971622,
"learning_rate": 1.088888888888889e-05,
"loss": 0.0088,
"step": 2550
},
{
"epoch": 0.8,
"grad_norm": 0.0052205640822649,
"learning_rate": 1.0844444444444446e-05,
"loss": 0.023,
"step": 2560
},
{
"epoch": 0.81,
"grad_norm": 1.2594823837280273,
"learning_rate": 1.0800000000000002e-05,
"loss": 0.0222,
"step": 2570
},
{
"epoch": 0.81,
"grad_norm": 3.3159544467926025,
"learning_rate": 1.0755555555555557e-05,
"loss": 0.0426,
"step": 2580
},
{
"epoch": 0.81,
"grad_norm": 2.103391647338867,
"learning_rate": 1.0711111111111112e-05,
"loss": 0.0457,
"step": 2590
},
{
"epoch": 0.81,
"grad_norm": 0.000918123172596097,
"learning_rate": 1.0666666666666667e-05,
"loss": 0.0391,
"step": 2600
},
{
"epoch": 0.82,
"grad_norm": 0.0012873125961050391,
"learning_rate": 1.0622222222222223e-05,
"loss": 0.0217,
"step": 2610
},
{
"epoch": 0.82,
"grad_norm": 0.002058672485873103,
"learning_rate": 1.0577777777777778e-05,
"loss": 0.0463,
"step": 2620
},
{
"epoch": 0.82,
"grad_norm": 0.027771245688199997,
"learning_rate": 1.0533333333333333e-05,
"loss": 0.0805,
"step": 2630
},
{
"epoch": 0.83,
"grad_norm": 0.0015083320904523134,
"learning_rate": 1.048888888888889e-05,
"loss": 0.05,
"step": 2640
},
{
"epoch": 0.83,
"grad_norm": 2.7888853549957275,
"learning_rate": 1.0444444444444445e-05,
"loss": 0.0752,
"step": 2650
},
{
"epoch": 0.83,
"grad_norm": 3.569105863571167,
"learning_rate": 1.04e-05,
"loss": 0.0585,
"step": 2660
},
{
"epoch": 0.84,
"grad_norm": 0.034804560244083405,
"learning_rate": 1.0355555555555557e-05,
"loss": 0.0113,
"step": 2670
},
{
"epoch": 0.84,
"grad_norm": 0.0017612408846616745,
"learning_rate": 1.0311111111111113e-05,
"loss": 0.0535,
"step": 2680
},
{
"epoch": 0.84,
"grad_norm": 0.0011774456361308694,
"learning_rate": 1.0266666666666668e-05,
"loss": 0.0622,
"step": 2690
},
{
"epoch": 0.85,
"grad_norm": 2.2724449634552,
"learning_rate": 1.0222222222222223e-05,
"loss": 0.0059,
"step": 2700
},
{
"epoch": 0.85,
"grad_norm": 0.8430375456809998,
"learning_rate": 1.0177777777777778e-05,
"loss": 0.043,
"step": 2710
},
{
"epoch": 0.85,
"grad_norm": 2.0912680625915527,
"learning_rate": 1.0133333333333335e-05,
"loss": 0.0356,
"step": 2720
},
{
"epoch": 0.86,
"grad_norm": 0.043006353080272675,
"learning_rate": 1.008888888888889e-05,
"loss": 0.0247,
"step": 2730
},
{
"epoch": 0.86,
"grad_norm": 0.0013607463333755732,
"learning_rate": 1.0044444444444446e-05,
"loss": 0.0115,
"step": 2740
},
{
"epoch": 0.86,
"grad_norm": 0.003235200187191367,
"learning_rate": 1e-05,
"loss": 0.0447,
"step": 2750
},
{
"epoch": 0.86,
"grad_norm": 0.006067329086363316,
"learning_rate": 9.955555555555556e-06,
"loss": 0.049,
"step": 2760
},
{
"epoch": 0.87,
"grad_norm": 0.001446689828298986,
"learning_rate": 9.911111111111113e-06,
"loss": 0.0502,
"step": 2770
},
{
"epoch": 0.87,
"grad_norm": 0.0018873319495469332,
"learning_rate": 9.866666666666668e-06,
"loss": 0.0854,
"step": 2780
},
{
"epoch": 0.87,
"grad_norm": 0.002605983056128025,
"learning_rate": 9.822222222222223e-06,
"loss": 0.0284,
"step": 2790
},
{
"epoch": 0.88,
"grad_norm": 0.002705740975216031,
"learning_rate": 9.777777777777779e-06,
"loss": 0.0639,
"step": 2800
},
{
"epoch": 0.88,
"grad_norm": 0.004726971033960581,
"learning_rate": 9.733333333333334e-06,
"loss": 0.1064,
"step": 2810
},
{
"epoch": 0.88,
"grad_norm": 0.01625528745353222,
"learning_rate": 9.688888888888889e-06,
"loss": 0.0679,
"step": 2820
},
{
"epoch": 0.89,
"grad_norm": 1.4221800565719604,
"learning_rate": 9.644444444444444e-06,
"loss": 0.0504,
"step": 2830
},
{
"epoch": 0.89,
"grad_norm": 1.1785073280334473,
"learning_rate": 9.600000000000001e-06,
"loss": 0.0574,
"step": 2840
},
{
"epoch": 0.89,
"grad_norm": 0.0024502715095877647,
"learning_rate": 9.555555555555556e-06,
"loss": 0.0211,
"step": 2850
},
{
"epoch": 0.9,
"grad_norm": 0.0017725643701851368,
"learning_rate": 9.511111111111112e-06,
"loss": 0.057,
"step": 2860
},
{
"epoch": 0.9,
"grad_norm": 0.0027383696287870407,
"learning_rate": 9.466666666666667e-06,
"loss": 0.1225,
"step": 2870
},
{
"epoch": 0.9,
"grad_norm": 0.00296800397336483,
"learning_rate": 9.422222222222222e-06,
"loss": 0.0167,
"step": 2880
},
{
"epoch": 0.91,
"grad_norm": 0.003540828125551343,
"learning_rate": 9.377777777777779e-06,
"loss": 0.0484,
"step": 2890
},
{
"epoch": 0.91,
"grad_norm": 0.03577937185764313,
"learning_rate": 9.333333333333334e-06,
"loss": 0.0334,
"step": 2900
},
{
"epoch": 0.91,
"grad_norm": 0.0013176521752029657,
"learning_rate": 9.28888888888889e-06,
"loss": 0.0706,
"step": 2910
},
{
"epoch": 0.91,
"grad_norm": 0.0014053646009415388,
"learning_rate": 9.244444444444445e-06,
"loss": 0.0206,
"step": 2920
},
{
"epoch": 0.92,
"grad_norm": 0.002312118886038661,
"learning_rate": 9.200000000000002e-06,
"loss": 0.0398,
"step": 2930
},
{
"epoch": 0.92,
"grad_norm": 0.002495428314432502,
"learning_rate": 9.155555555555557e-06,
"loss": 0.0151,
"step": 2940
},
{
"epoch": 0.92,
"grad_norm": 0.0020435641054064035,
"learning_rate": 9.111111111111112e-06,
"loss": 0.034,
"step": 2950
},
{
"epoch": 0.93,
"grad_norm": 0.0015432636719197035,
"learning_rate": 9.066666666666667e-06,
"loss": 0.0485,
"step": 2960
},
{
"epoch": 0.93,
"grad_norm": 1.6057056188583374,
"learning_rate": 9.022222222222223e-06,
"loss": 0.0566,
"step": 2970
},
{
"epoch": 0.93,
"grad_norm": 0.0020012864843010902,
"learning_rate": 8.977777777777778e-06,
"loss": 0.0012,
"step": 2980
},
{
"epoch": 0.94,
"grad_norm": 0.0008306769304908812,
"learning_rate": 8.933333333333333e-06,
"loss": 0.037,
"step": 2990
},
{
"epoch": 0.94,
"grad_norm": 0.0008226165664382279,
"learning_rate": 8.888888888888888e-06,
"loss": 0.0365,
"step": 3000
},
{
"epoch": 0.94,
"eval_loss": 0.021811991930007935,
"eval_runtime": 61.9881,
"eval_samples_per_second": 16.132,
"eval_steps_per_second": 16.132,
"step": 3000
},
{
"epoch": 0.94,
"grad_norm": 2.95473575592041,
"learning_rate": 8.844444444444445e-06,
"loss": 0.1515,
"step": 3010
},
{
"epoch": 0.95,
"grad_norm": 0.005038989707827568,
"learning_rate": 8.8e-06,
"loss": 0.0355,
"step": 3020
},
{
"epoch": 0.95,
"grad_norm": 0.002464048098772764,
"learning_rate": 8.755555555555556e-06,
"loss": 0.0157,
"step": 3030
},
{
"epoch": 0.95,
"grad_norm": 2.865673065185547,
"learning_rate": 8.711111111111111e-06,
"loss": 0.0994,
"step": 3040
},
{
"epoch": 0.96,
"grad_norm": 0.023971589282155037,
"learning_rate": 8.666666666666668e-06,
"loss": 0.0467,
"step": 3050
},
{
"epoch": 0.96,
"grad_norm": 0.0025374030228704214,
"learning_rate": 8.622222222222223e-06,
"loss": 0.0488,
"step": 3060
},
{
"epoch": 0.96,
"grad_norm": 1.8780492544174194,
"learning_rate": 8.577777777777778e-06,
"loss": 0.0453,
"step": 3070
},
{
"epoch": 0.96,
"grad_norm": 0.0010841538896784186,
"learning_rate": 8.533333333333335e-06,
"loss": 0.0473,
"step": 3080
},
{
"epoch": 0.97,
"grad_norm": 2.532902240753174,
"learning_rate": 8.48888888888889e-06,
"loss": 0.0135,
"step": 3090
},
{
"epoch": 0.97,
"grad_norm": 0.0008325451053678989,
"learning_rate": 8.444444444444446e-06,
"loss": 0.0645,
"step": 3100
},
{
"epoch": 0.97,
"grad_norm": 0.01362746674567461,
"learning_rate": 8.400000000000001e-06,
"loss": 0.107,
"step": 3110
},
{
"epoch": 0.98,
"grad_norm": 0.056719791144132614,
"learning_rate": 8.355555555555556e-06,
"loss": 0.0187,
"step": 3120
},
{
"epoch": 0.98,
"grad_norm": 0.0013840706087648869,
"learning_rate": 8.311111111111111e-06,
"loss": 0.0435,
"step": 3130
},
{
"epoch": 0.98,
"grad_norm": 0.0014659567968919873,
"learning_rate": 8.266666666666667e-06,
"loss": 0.0918,
"step": 3140
},
{
"epoch": 0.99,
"grad_norm": 3.019699811935425,
"learning_rate": 8.222222222222222e-06,
"loss": 0.0166,
"step": 3150
},
{
"epoch": 0.99,
"grad_norm": 0.002052758354693651,
"learning_rate": 8.177777777777779e-06,
"loss": 0.0373,
"step": 3160
},
{
"epoch": 0.99,
"grad_norm": 0.0011199481086805463,
"learning_rate": 8.133333333333334e-06,
"loss": 0.0105,
"step": 3170
},
{
"epoch": 1.0,
"grad_norm": 0.0013342432212084532,
"learning_rate": 8.08888888888889e-06,
"loss": 0.0512,
"step": 3180
},
{
"epoch": 1.0,
"grad_norm": 0.0014090395998209715,
"learning_rate": 8.044444444444444e-06,
"loss": 0.0338,
"step": 3190
},
{
"epoch": 1.0,
"grad_norm": 1.101834774017334,
"learning_rate": 8.000000000000001e-06,
"loss": 0.0129,
"step": 3200
},
{
"epoch": 1.01,
"grad_norm": 0.5294092297554016,
"learning_rate": 7.955555555555557e-06,
"loss": 0.0153,
"step": 3210
},
{
"epoch": 1.01,
"grad_norm": 3.8237059116363525,
"learning_rate": 7.911111111111112e-06,
"loss": 0.0738,
"step": 3220
},
{
"epoch": 1.01,
"grad_norm": 0.0007992366445250809,
"learning_rate": 7.866666666666667e-06,
"loss": 0.0166,
"step": 3230
},
{
"epoch": 1.02,
"grad_norm": 0.7395054697990417,
"learning_rate": 7.822222222222224e-06,
"loss": 0.0087,
"step": 3240
},
{
"epoch": 1.02,
"grad_norm": 0.0022702962160110474,
"learning_rate": 7.77777777777778e-06,
"loss": 0.0137,
"step": 3250
},
{
"epoch": 1.02,
"grad_norm": 0.18367303907871246,
"learning_rate": 7.733333333333334e-06,
"loss": 0.0246,
"step": 3260
},
{
"epoch": 1.02,
"grad_norm": 1.4116305112838745,
"learning_rate": 7.68888888888889e-06,
"loss": 0.0722,
"step": 3270
},
{
"epoch": 1.03,
"grad_norm": 0.0008715521544218063,
"learning_rate": 7.644444444444445e-06,
"loss": 0.0018,
"step": 3280
},
{
"epoch": 1.03,
"grad_norm": 0.0013335467083379626,
"learning_rate": 7.600000000000001e-06,
"loss": 0.0003,
"step": 3290
},
{
"epoch": 1.03,
"grad_norm": 0.002115165116265416,
"learning_rate": 7.555555555555556e-06,
"loss": 0.0087,
"step": 3300
},
{
"epoch": 1.04,
"grad_norm": 2.0042011737823486,
"learning_rate": 7.511111111111111e-06,
"loss": 0.0303,
"step": 3310
},
{
"epoch": 1.04,
"grad_norm": 0.000751888903323561,
"learning_rate": 7.4666666666666675e-06,
"loss": 0.0214,
"step": 3320
},
{
"epoch": 1.04,
"grad_norm": 0.09692036360502243,
"learning_rate": 7.422222222222223e-06,
"loss": 0.113,
"step": 3330
},
{
"epoch": 1.05,
"grad_norm": 4.702492713928223,
"learning_rate": 7.377777777777778e-06,
"loss": 0.0581,
"step": 3340
},
{
"epoch": 1.05,
"grad_norm": 0.01480321865528822,
"learning_rate": 7.333333333333333e-06,
"loss": 0.022,
"step": 3350
},
{
"epoch": 1.05,
"grad_norm": 0.0011497796513140202,
"learning_rate": 7.28888888888889e-06,
"loss": 0.0232,
"step": 3360
},
{
"epoch": 1.06,
"grad_norm": 0.0010983615648001432,
"learning_rate": 7.244444444444445e-06,
"loss": 0.0375,
"step": 3370
},
{
"epoch": 1.06,
"grad_norm": 0.005294387228786945,
"learning_rate": 7.2000000000000005e-06,
"loss": 0.0548,
"step": 3380
},
{
"epoch": 1.06,
"grad_norm": 2.251269578933716,
"learning_rate": 7.155555555555556e-06,
"loss": 0.0205,
"step": 3390
},
{
"epoch": 1.07,
"grad_norm": 0.003230300033465028,
"learning_rate": 7.111111111111112e-06,
"loss": 0.0147,
"step": 3400
},
{
"epoch": 1.07,
"grad_norm": 1.5663217306137085,
"learning_rate": 7.066666666666667e-06,
"loss": 0.0186,
"step": 3410
},
{
"epoch": 1.07,
"grad_norm": 0.0012102341279387474,
"learning_rate": 7.022222222222222e-06,
"loss": 0.0016,
"step": 3420
},
{
"epoch": 1.07,
"grad_norm": 0.000960271863732487,
"learning_rate": 6.977777777777779e-06,
"loss": 0.0351,
"step": 3430
},
{
"epoch": 1.08,
"grad_norm": 0.001627901685424149,
"learning_rate": 6.9333333333333344e-06,
"loss": 0.0494,
"step": 3440
},
{
"epoch": 1.08,
"grad_norm": 0.0015967305516824126,
"learning_rate": 6.88888888888889e-06,
"loss": 0.0052,
"step": 3450
},
{
"epoch": 1.08,
"grad_norm": 0.0006052978569641709,
"learning_rate": 6.844444444444445e-06,
"loss": 0.0198,
"step": 3460
},
{
"epoch": 1.09,
"grad_norm": 0.832760214805603,
"learning_rate": 6.800000000000001e-06,
"loss": 0.0147,
"step": 3470
},
{
"epoch": 1.09,
"grad_norm": 1.8160419464111328,
"learning_rate": 6.755555555555556e-06,
"loss": 0.0256,
"step": 3480
},
{
"epoch": 1.09,
"grad_norm": 1.7934602499008179,
"learning_rate": 6.711111111111111e-06,
"loss": 0.0471,
"step": 3490
},
{
"epoch": 1.1,
"grad_norm": 0.01658487133681774,
"learning_rate": 6.666666666666667e-06,
"loss": 0.043,
"step": 3500
},
{
"epoch": 1.1,
"eval_loss": 0.021265115588903427,
"eval_runtime": 61.9415,
"eval_samples_per_second": 16.144,
"eval_steps_per_second": 16.144,
"step": 3500
},
{
"epoch": 1.1,
"grad_norm": 0.002576815662905574,
"learning_rate": 6.6222222222222236e-06,
"loss": 0.0521,
"step": 3510
},
{
"epoch": 1.1,
"grad_norm": 0.005587077233940363,
"learning_rate": 6.577777777777779e-06,
"loss": 0.0098,
"step": 3520
},
{
"epoch": 1.11,
"grad_norm": 0.7236731052398682,
"learning_rate": 6.533333333333334e-06,
"loss": 0.0434,
"step": 3530
},
{
"epoch": 1.11,
"grad_norm": 0.013782077468931675,
"learning_rate": 6.488888888888889e-06,
"loss": 0.0231,
"step": 3540
},
{
"epoch": 1.11,
"grad_norm": 0.0013029536930844188,
"learning_rate": 6.444444444444445e-06,
"loss": 0.006,
"step": 3550
},
{
"epoch": 1.12,
"grad_norm": 0.0017811213620007038,
"learning_rate": 6.4000000000000006e-06,
"loss": 0.0335,
"step": 3560
},
{
"epoch": 1.12,
"grad_norm": 0.0008715124567970634,
"learning_rate": 6.355555555555556e-06,
"loss": 0.0285,
"step": 3570
},
{
"epoch": 1.12,
"grad_norm": 0.002087782369926572,
"learning_rate": 6.311111111111111e-06,
"loss": 0.0157,
"step": 3580
},
{
"epoch": 1.12,
"grad_norm": 0.0018431423231959343,
"learning_rate": 6.266666666666668e-06,
"loss": 0.007,
"step": 3590
},
{
"epoch": 1.13,
"grad_norm": 2.5827839374542236,
"learning_rate": 6.222222222222223e-06,
"loss": 0.0288,
"step": 3600
},
{
"epoch": 1.13,
"grad_norm": 0.00216556154191494,
"learning_rate": 6.177777777777778e-06,
"loss": 0.0001,
"step": 3610
},
{
"epoch": 1.13,
"grad_norm": 0.0011645135236904025,
"learning_rate": 6.133333333333334e-06,
"loss": 0.0384,
"step": 3620
},
{
"epoch": 1.14,
"grad_norm": 1.5017549991607666,
"learning_rate": 6.08888888888889e-06,
"loss": 0.0133,
"step": 3630
},
{
"epoch": 1.14,
"grad_norm": 1.0787444114685059,
"learning_rate": 6.044444444444445e-06,
"loss": 0.0201,
"step": 3640
},
{
"epoch": 1.14,
"grad_norm": 2.6228489875793457,
"learning_rate": 6e-06,
"loss": 0.0316,
"step": 3650
},
{
"epoch": 1.15,
"grad_norm": 2.6893579959869385,
"learning_rate": 5.955555555555555e-06,
"loss": 0.0296,
"step": 3660
},
{
"epoch": 1.15,
"grad_norm": 2.737757921218872,
"learning_rate": 5.911111111111112e-06,
"loss": 0.0281,
"step": 3670
},
{
"epoch": 1.15,
"grad_norm": 0.0011677155271172523,
"learning_rate": 5.8666666666666675e-06,
"loss": 0.0446,
"step": 3680
},
{
"epoch": 1.16,
"grad_norm": 1.590535044670105,
"learning_rate": 5.822222222222223e-06,
"loss": 0.0668,
"step": 3690
},
{
"epoch": 1.16,
"grad_norm": 3.329134464263916,
"learning_rate": 5.777777777777778e-06,
"loss": 0.023,
"step": 3700
},
{
"epoch": 1.16,
"grad_norm": 0.0014503680868074298,
"learning_rate": 5.733333333333334e-06,
"loss": 0.0343,
"step": 3710
},
{
"epoch": 1.17,
"grad_norm": 0.03579283133149147,
"learning_rate": 5.688888888888889e-06,
"loss": 0.0767,
"step": 3720
},
{
"epoch": 1.17,
"grad_norm": 0.0014656345592811704,
"learning_rate": 5.6444444444444445e-06,
"loss": 0.0202,
"step": 3730
},
{
"epoch": 1.17,
"grad_norm": 0.0010772488312795758,
"learning_rate": 5.600000000000001e-06,
"loss": 0.0521,
"step": 3740
},
{
"epoch": 1.17,
"grad_norm": 0.007391482125967741,
"learning_rate": 5.555555555555557e-06,
"loss": 0.0177,
"step": 3750
},
{
"epoch": 1.18,
"grad_norm": 0.0013084843521937728,
"learning_rate": 5.511111111111112e-06,
"loss": 0.0132,
"step": 3760
},
{
"epoch": 1.18,
"grad_norm": 0.0018328677397221327,
"learning_rate": 5.466666666666667e-06,
"loss": 0.0833,
"step": 3770
},
{
"epoch": 1.18,
"grad_norm": 0.002266037743538618,
"learning_rate": 5.422222222222223e-06,
"loss": 0.0165,
"step": 3780
},
{
"epoch": 1.19,
"grad_norm": 0.35491234064102173,
"learning_rate": 5.3777777777777784e-06,
"loss": 0.0205,
"step": 3790
},
{
"epoch": 1.19,
"grad_norm": 0.08552182465791702,
"learning_rate": 5.333333333333334e-06,
"loss": 0.0153,
"step": 3800
},
{
"epoch": 1.19,
"grad_norm": 0.0011903179110959172,
"learning_rate": 5.288888888888889e-06,
"loss": 0.0164,
"step": 3810
},
{
"epoch": 1.2,
"grad_norm": 0.002342939842492342,
"learning_rate": 5.244444444444445e-06,
"loss": 0.0301,
"step": 3820
},
{
"epoch": 1.2,
"grad_norm": 0.0657195895910263,
"learning_rate": 5.2e-06,
"loss": 0.0162,
"step": 3830
},
{
"epoch": 1.2,
"grad_norm": 0.0015936404233798385,
"learning_rate": 5.155555555555556e-06,
"loss": 0.03,
"step": 3840
},
{
"epoch": 1.21,
"grad_norm": 0.0019919448532164097,
"learning_rate": 5.1111111111111115e-06,
"loss": 0.0003,
"step": 3850
},
{
"epoch": 1.21,
"grad_norm": 1.1910542249679565,
"learning_rate": 5.0666666666666676e-06,
"loss": 0.0309,
"step": 3860
},
{
"epoch": 1.21,
"grad_norm": 0.0016105415998026729,
"learning_rate": 5.022222222222223e-06,
"loss": 0.0319,
"step": 3870
},
{
"epoch": 1.22,
"grad_norm": 0.0006896441336721182,
"learning_rate": 4.977777777777778e-06,
"loss": 0.0187,
"step": 3880
},
{
"epoch": 1.22,
"grad_norm": 2.6113905906677246,
"learning_rate": 4.933333333333334e-06,
"loss": 0.016,
"step": 3890
},
{
"epoch": 1.22,
"grad_norm": 1.3897886276245117,
"learning_rate": 4.888888888888889e-06,
"loss": 0.0575,
"step": 3900
},
{
"epoch": 1.23,
"grad_norm": 0.0009651753352954984,
"learning_rate": 4.8444444444444446e-06,
"loss": 0.001,
"step": 3910
},
{
"epoch": 1.23,
"grad_norm": 0.003367891302332282,
"learning_rate": 4.800000000000001e-06,
"loss": 0.0507,
"step": 3920
},
{
"epoch": 1.23,
"grad_norm": 1.4884891510009766,
"learning_rate": 4.755555555555556e-06,
"loss": 0.0078,
"step": 3930
},
{
"epoch": 1.23,
"grad_norm": 0.0011329209664836526,
"learning_rate": 4.711111111111111e-06,
"loss": 0.0,
"step": 3940
},
{
"epoch": 1.24,
"grad_norm": 0.0015582548221573234,
"learning_rate": 4.666666666666667e-06,
"loss": 0.0263,
"step": 3950
},
{
"epoch": 1.24,
"grad_norm": 0.0019160009687766433,
"learning_rate": 4.622222222222222e-06,
"loss": 0.025,
"step": 3960
},
{
"epoch": 1.24,
"grad_norm": 0.0009318340453319252,
"learning_rate": 4.5777777777777785e-06,
"loss": 0.0474,
"step": 3970
},
{
"epoch": 1.25,
"grad_norm": 0.0015654967864975333,
"learning_rate": 4.533333333333334e-06,
"loss": 0.025,
"step": 3980
},
{
"epoch": 1.25,
"grad_norm": 2.0137476921081543,
"learning_rate": 4.488888888888889e-06,
"loss": 0.0092,
"step": 3990
},
{
"epoch": 1.25,
"grad_norm": 0.03859930485486984,
"learning_rate": 4.444444444444444e-06,
"loss": 0.0231,
"step": 4000
},
{
"epoch": 1.25,
"eval_loss": 0.02214735746383667,
"eval_runtime": 61.9646,
"eval_samples_per_second": 16.138,
"eval_steps_per_second": 16.138,
"step": 4000
},
{
"epoch": 1.26,
"grad_norm": 0.0019934908486902714,
"learning_rate": 4.4e-06,
"loss": 0.0371,
"step": 4010
},
{
"epoch": 1.26,
"grad_norm": 0.019653044641017914,
"learning_rate": 4.3555555555555555e-06,
"loss": 0.0195,
"step": 4020
},
{
"epoch": 1.26,
"grad_norm": 6.957361221313477,
"learning_rate": 4.3111111111111115e-06,
"loss": 0.0581,
"step": 4030
},
{
"epoch": 1.27,
"grad_norm": 0.002056374680250883,
"learning_rate": 4.266666666666668e-06,
"loss": 0.0095,
"step": 4040
},
{
"epoch": 1.27,
"grad_norm": 0.003081790404394269,
"learning_rate": 4.222222222222223e-06,
"loss": 0.0974,
"step": 4050
},
{
"epoch": 1.27,
"grad_norm": 0.5953612327575684,
"learning_rate": 4.177777777777778e-06,
"loss": 0.0236,
"step": 4060
},
{
"epoch": 1.28,
"grad_norm": 0.3564951717853546,
"learning_rate": 4.133333333333333e-06,
"loss": 0.006,
"step": 4070
},
{
"epoch": 1.28,
"grad_norm": 1.716987133026123,
"learning_rate": 4.088888888888889e-06,
"loss": 0.0267,
"step": 4080
},
{
"epoch": 1.28,
"grad_norm": 0.0012724585831165314,
"learning_rate": 4.044444444444445e-06,
"loss": 0.033,
"step": 4090
},
{
"epoch": 1.28,
"grad_norm": 0.04030340909957886,
"learning_rate": 4.000000000000001e-06,
"loss": 0.0281,
"step": 4100
},
{
"epoch": 1.29,
"grad_norm": 0.0011103869182989001,
"learning_rate": 3.955555555555556e-06,
"loss": 0.0207,
"step": 4110
},
{
"epoch": 1.29,
"grad_norm": 1.1311607360839844,
"learning_rate": 3.911111111111112e-06,
"loss": 0.0419,
"step": 4120
},
{
"epoch": 1.29,
"grad_norm": 4.105261325836182,
"learning_rate": 3.866666666666667e-06,
"loss": 0.0481,
"step": 4130
},
{
"epoch": 1.3,
"grad_norm": 0.020243069157004356,
"learning_rate": 3.8222222222222224e-06,
"loss": 0.0159,
"step": 4140
},
{
"epoch": 1.3,
"grad_norm": 0.0007079013157635927,
"learning_rate": 3.777777777777778e-06,
"loss": 0.0394,
"step": 4150
},
{
"epoch": 1.3,
"grad_norm": 4.033346652984619,
"learning_rate": 3.7333333333333337e-06,
"loss": 0.0444,
"step": 4160
},
{
"epoch": 1.31,
"grad_norm": 1.866074800491333,
"learning_rate": 3.688888888888889e-06,
"loss": 0.0236,
"step": 4170
},
{
"epoch": 1.31,
"grad_norm": 0.0009274449548684061,
"learning_rate": 3.644444444444445e-06,
"loss": 0.0185,
"step": 4180
},
{
"epoch": 1.31,
"grad_norm": 0.0005082357674837112,
"learning_rate": 3.6000000000000003e-06,
"loss": 0.0047,
"step": 4190
},
{
"epoch": 1.32,
"grad_norm": 0.0029984298162162304,
"learning_rate": 3.555555555555556e-06,
"loss": 0.0059,
"step": 4200
},
{
"epoch": 1.32,
"grad_norm": 1.9557390213012695,
"learning_rate": 3.511111111111111e-06,
"loss": 0.0159,
"step": 4210
},
{
"epoch": 1.32,
"grad_norm": 0.0032698616851121187,
"learning_rate": 3.4666666666666672e-06,
"loss": 0.0232,
"step": 4220
},
{
"epoch": 1.33,
"grad_norm": 0.0015465226024389267,
"learning_rate": 3.4222222222222224e-06,
"loss": 0.0352,
"step": 4230
},
{
"epoch": 1.33,
"grad_norm": 0.000589876202866435,
"learning_rate": 3.377777777777778e-06,
"loss": 0.0148,
"step": 4240
},
{
"epoch": 1.33,
"grad_norm": 0.0008864306146278977,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.0118,
"step": 4250
},
{
"epoch": 1.33,
"grad_norm": 0.001105438219383359,
"learning_rate": 3.2888888888888894e-06,
"loss": 0.0116,
"step": 4260
},
{
"epoch": 1.34,
"grad_norm": 0.001639057882130146,
"learning_rate": 3.2444444444444446e-06,
"loss": 0.0346,
"step": 4270
},
{
"epoch": 1.34,
"grad_norm": 5.211305141448975,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.1259,
"step": 4280
},
{
"epoch": 1.34,
"grad_norm": 0.0007241423591040075,
"learning_rate": 3.1555555555555555e-06,
"loss": 0.0062,
"step": 4290
},
{
"epoch": 1.35,
"grad_norm": 0.0005371780716814101,
"learning_rate": 3.1111111111111116e-06,
"loss": 0.0057,
"step": 4300
},
{
"epoch": 1.35,
"grad_norm": 0.0020129482727497816,
"learning_rate": 3.066666666666667e-06,
"loss": 0.017,
"step": 4310
},
{
"epoch": 1.35,
"grad_norm": 0.002723652869462967,
"learning_rate": 3.0222222222222225e-06,
"loss": 0.055,
"step": 4320
},
{
"epoch": 1.36,
"grad_norm": 0.0009510384988971055,
"learning_rate": 2.9777777777777777e-06,
"loss": 0.0278,
"step": 4330
},
{
"epoch": 1.36,
"grad_norm": 0.0005794400931335986,
"learning_rate": 2.9333333333333338e-06,
"loss": 0.0462,
"step": 4340
},
{
"epoch": 1.36,
"grad_norm": 0.001246045925654471,
"learning_rate": 2.888888888888889e-06,
"loss": 0.0345,
"step": 4350
},
{
"epoch": 1.37,
"grad_norm": 1.7043557167053223,
"learning_rate": 2.8444444444444446e-06,
"loss": 0.0536,
"step": 4360
},
{
"epoch": 1.37,
"grad_norm": 3.8166236877441406,
"learning_rate": 2.8000000000000003e-06,
"loss": 0.0218,
"step": 4370
},
{
"epoch": 1.37,
"grad_norm": 2.2809741497039795,
"learning_rate": 2.755555555555556e-06,
"loss": 0.064,
"step": 4380
},
{
"epoch": 1.38,
"grad_norm": 0.005824711639434099,
"learning_rate": 2.7111111111111116e-06,
"loss": 0.0269,
"step": 4390
},
{
"epoch": 1.38,
"grad_norm": 0.05956351011991501,
"learning_rate": 2.666666666666667e-06,
"loss": 0.0352,
"step": 4400
},
{
"epoch": 1.38,
"grad_norm": 0.0005406837444752455,
"learning_rate": 2.6222222222222225e-06,
"loss": 0.0447,
"step": 4410
},
{
"epoch": 1.38,
"grad_norm": 1.6089625358581543,
"learning_rate": 2.577777777777778e-06,
"loss": 0.0295,
"step": 4420
},
{
"epoch": 1.39,
"grad_norm": 0.0007441785419359803,
"learning_rate": 2.5333333333333338e-06,
"loss": 0.0309,
"step": 4430
},
{
"epoch": 1.39,
"grad_norm": 0.295694500207901,
"learning_rate": 2.488888888888889e-06,
"loss": 0.0645,
"step": 4440
},
{
"epoch": 1.39,
"grad_norm": 0.00048340618377551436,
"learning_rate": 2.4444444444444447e-06,
"loss": 0.0334,
"step": 4450
},
{
"epoch": 1.4,
"grad_norm": 0.0010877919849008322,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.0196,
"step": 4460
},
{
"epoch": 1.4,
"grad_norm": 1.4027998447418213,
"learning_rate": 2.3555555555555555e-06,
"loss": 0.0419,
"step": 4470
},
{
"epoch": 1.4,
"grad_norm": 1.5113612413406372,
"learning_rate": 2.311111111111111e-06,
"loss": 0.0181,
"step": 4480
},
{
"epoch": 1.41,
"grad_norm": 0.018225079402327538,
"learning_rate": 2.266666666666667e-06,
"loss": 0.0124,
"step": 4490
},
{
"epoch": 1.41,
"grad_norm": 0.0012544667115435004,
"learning_rate": 2.222222222222222e-06,
"loss": 0.0164,
"step": 4500
},
{
"epoch": 1.41,
"eval_loss": 0.0191478431224823,
"eval_runtime": 61.916,
"eval_samples_per_second": 16.151,
"eval_steps_per_second": 16.151,
"step": 4500
},
{
"epoch": 1.41,
"grad_norm": 0.0007392147090286016,
"learning_rate": 2.1777777777777777e-06,
"loss": 0.0231,
"step": 4510
},
{
"epoch": 1.42,
"grad_norm": 1.7059309482574463,
"learning_rate": 2.133333333333334e-06,
"loss": 0.0205,
"step": 4520
},
{
"epoch": 1.42,
"grad_norm": 0.0005404640105552971,
"learning_rate": 2.088888888888889e-06,
"loss": 0.0281,
"step": 4530
},
{
"epoch": 1.42,
"grad_norm": 0.000860686122905463,
"learning_rate": 2.0444444444444447e-06,
"loss": 0.0097,
"step": 4540
},
{
"epoch": 1.43,
"grad_norm": 0.0006032692035660148,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.009,
"step": 4550
},
{
"epoch": 1.43,
"grad_norm": 0.0009364313445985317,
"learning_rate": 1.955555555555556e-06,
"loss": 0.0095,
"step": 4560
},
{
"epoch": 1.43,
"grad_norm": 0.0004945364780724049,
"learning_rate": 1.9111111111111112e-06,
"loss": 0.0074,
"step": 4570
},
{
"epoch": 1.43,
"grad_norm": 0.001013664877973497,
"learning_rate": 1.8666666666666669e-06,
"loss": 0.0074,
"step": 4580
},
{
"epoch": 1.44,
"grad_norm": 0.001039516762830317,
"learning_rate": 1.8222222222222225e-06,
"loss": 0.0528,
"step": 4590
},
{
"epoch": 1.44,
"grad_norm": 0.0011727253440767527,
"learning_rate": 1.777777777777778e-06,
"loss": 0.0304,
"step": 4600
},
{
"epoch": 1.44,
"grad_norm": 0.0022508064284920692,
"learning_rate": 1.7333333333333336e-06,
"loss": 0.02,
"step": 4610
},
{
"epoch": 1.45,
"grad_norm": 0.0005586406332440674,
"learning_rate": 1.688888888888889e-06,
"loss": 0.0343,
"step": 4620
},
{
"epoch": 1.45,
"grad_norm": 1.7288540601730347,
"learning_rate": 1.6444444444444447e-06,
"loss": 0.0615,
"step": 4630
},
{
"epoch": 1.45,
"grad_norm": 0.0005667012301273644,
"learning_rate": 1.6000000000000001e-06,
"loss": 0.0002,
"step": 4640
},
{
"epoch": 1.46,
"grad_norm": 1.2609503269195557,
"learning_rate": 1.5555555555555558e-06,
"loss": 0.024,
"step": 4650
},
{
"epoch": 1.46,
"grad_norm": 0.007942456752061844,
"learning_rate": 1.5111111111111112e-06,
"loss": 0.0858,
"step": 4660
},
{
"epoch": 1.46,
"grad_norm": 0.0006302391411736608,
"learning_rate": 1.4666666666666669e-06,
"loss": 0.0085,
"step": 4670
},
{
"epoch": 1.47,
"grad_norm": 0.0007383729098364711,
"learning_rate": 1.4222222222222223e-06,
"loss": 0.0077,
"step": 4680
},
{
"epoch": 1.47,
"grad_norm": 1.8679457902908325,
"learning_rate": 1.377777777777778e-06,
"loss": 0.0498,
"step": 4690
},
{
"epoch": 1.47,
"grad_norm": 1.0000814199447632,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.0072,
"step": 4700
},
{
"epoch": 1.48,
"grad_norm": 0.0014682277105748653,
"learning_rate": 1.288888888888889e-06,
"loss": 0.0167,
"step": 4710
},
{
"epoch": 1.48,
"grad_norm": 0.002120391232892871,
"learning_rate": 1.2444444444444445e-06,
"loss": 0.0123,
"step": 4720
},
{
"epoch": 1.48,
"grad_norm": 1.122591257095337,
"learning_rate": 1.2000000000000002e-06,
"loss": 0.0475,
"step": 4730
},
{
"epoch": 1.49,
"grad_norm": 1.740593671798706,
"learning_rate": 1.1555555555555556e-06,
"loss": 0.051,
"step": 4740
},
{
"epoch": 1.49,
"grad_norm": 0.001058158464729786,
"learning_rate": 1.111111111111111e-06,
"loss": 0.0376,
"step": 4750
},
{
"epoch": 1.49,
"grad_norm": 0.001304270583204925,
"learning_rate": 1.066666666666667e-06,
"loss": 0.0051,
"step": 4760
},
{
"epoch": 1.49,
"grad_norm": 1.0726263523101807,
"learning_rate": 1.0222222222222223e-06,
"loss": 0.0358,
"step": 4770
},
{
"epoch": 1.5,
"grad_norm": 0.0022968349512666464,
"learning_rate": 9.77777777777778e-07,
"loss": 0.0502,
"step": 4780
},
{
"epoch": 1.5,
"grad_norm": 0.0011897665681317449,
"learning_rate": 9.333333333333334e-07,
"loss": 0.0316,
"step": 4790
},
{
"epoch": 1.5,
"grad_norm": 1.8926975727081299,
"learning_rate": 8.88888888888889e-07,
"loss": 0.0992,
"step": 4800
},
{
"epoch": 1.51,
"grad_norm": 0.0009652904118411243,
"learning_rate": 8.444444444444445e-07,
"loss": 0.0043,
"step": 4810
},
{
"epoch": 1.51,
"grad_norm": 0.0011450715828686953,
"learning_rate": 8.000000000000001e-07,
"loss": 0.0448,
"step": 4820
},
{
"epoch": 1.51,
"grad_norm": 0.03609352558851242,
"learning_rate": 7.555555555555556e-07,
"loss": 0.0459,
"step": 4830
},
{
"epoch": 1.52,
"grad_norm": 0.3606705665588379,
"learning_rate": 7.111111111111112e-07,
"loss": 0.0145,
"step": 4840
},
{
"epoch": 1.52,
"grad_norm": 0.005971833132207394,
"learning_rate": 6.666666666666667e-07,
"loss": 0.0359,
"step": 4850
},
{
"epoch": 1.52,
"grad_norm": 0.0006163293146528304,
"learning_rate": 6.222222222222223e-07,
"loss": 0.0195,
"step": 4860
},
{
"epoch": 1.53,
"grad_norm": 0.000649360881652683,
"learning_rate": 5.777777777777778e-07,
"loss": 0.0549,
"step": 4870
},
{
"epoch": 1.53,
"grad_norm": 0.5589812994003296,
"learning_rate": 5.333333333333335e-07,
"loss": 0.0179,
"step": 4880
},
{
"epoch": 1.53,
"grad_norm": 4.655618190765381,
"learning_rate": 4.88888888888889e-07,
"loss": 0.0463,
"step": 4890
},
{
"epoch": 1.54,
"grad_norm": 0.0005667076911777258,
"learning_rate": 4.444444444444445e-07,
"loss": 0.0389,
"step": 4900
},
{
"epoch": 1.54,
"grad_norm": 0.0008188075153157115,
"learning_rate": 4.0000000000000003e-07,
"loss": 0.0898,
"step": 4910
},
{
"epoch": 1.54,
"grad_norm": 0.01343528926372528,
"learning_rate": 3.555555555555556e-07,
"loss": 0.0124,
"step": 4920
},
{
"epoch": 1.54,
"grad_norm": 0.0009366168524138629,
"learning_rate": 3.111111111111111e-07,
"loss": 0.0349,
"step": 4930
},
{
"epoch": 1.55,
"grad_norm": 0.0007606361177749932,
"learning_rate": 2.666666666666667e-07,
"loss": 0.0109,
"step": 4940
},
{
"epoch": 1.55,
"grad_norm": 0.0006974030402489007,
"learning_rate": 2.2222222222222224e-07,
"loss": 0.0164,
"step": 4950
},
{
"epoch": 1.55,
"grad_norm": 0.001067605335265398,
"learning_rate": 1.777777777777778e-07,
"loss": 0.0388,
"step": 4960
},
{
"epoch": 1.56,
"grad_norm": 0.0011008504079654813,
"learning_rate": 1.3333333333333336e-07,
"loss": 0.0391,
"step": 4970
},
{
"epoch": 1.56,
"grad_norm": 0.0006954495911486447,
"learning_rate": 8.88888888888889e-08,
"loss": 0.0167,
"step": 4980
},
{
"epoch": 1.56,
"grad_norm": 0.004439559765160084,
"learning_rate": 4.444444444444445e-08,
"loss": 0.0465,
"step": 4990
},
{
"epoch": 1.57,
"grad_norm": 0.0010708813788369298,
"learning_rate": 0.0,
"loss": 0.0342,
"step": 5000
},
{
"epoch": 1.57,
"eval_loss": 0.02267477475106716,
"eval_runtime": 61.8731,
"eval_samples_per_second": 16.162,
"eval_steps_per_second": 16.162,
"step": 5000
}
],
"logging_steps": 10,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000,
"total_flos": 8.051062996992e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}