apcl
/

chiayisu's picture
init
2f30e1d
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.3,
"eval_steps": 500,
"global_step": 1875,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0008,
"grad_norm": 0.6484375,
"learning_rate": 0.0001,
"loss": 1.3667,
"step": 5
},
{
"epoch": 0.0016,
"grad_norm": 0.2890625,
"learning_rate": 0.0001,
"loss": 0.6021,
"step": 10
},
{
"epoch": 0.0024,
"grad_norm": 0.279296875,
"learning_rate": 0.0001,
"loss": 0.5931,
"step": 15
},
{
"epoch": 0.0032,
"grad_norm": 0.2255859375,
"learning_rate": 0.0001,
"loss": 0.4884,
"step": 20
},
{
"epoch": 0.004,
"grad_norm": 0.208984375,
"learning_rate": 0.0001,
"loss": 0.4893,
"step": 25
},
{
"epoch": 0.0048,
"grad_norm": 0.259765625,
"learning_rate": 0.0001,
"loss": 0.4225,
"step": 30
},
{
"epoch": 0.0056,
"grad_norm": 0.1376953125,
"learning_rate": 0.0001,
"loss": 0.4329,
"step": 35
},
{
"epoch": 0.0064,
"grad_norm": 0.1826171875,
"learning_rate": 0.0001,
"loss": 0.3509,
"step": 40
},
{
"epoch": 0.0072,
"grad_norm": 0.1728515625,
"learning_rate": 0.0001,
"loss": 0.2905,
"step": 45
},
{
"epoch": 0.008,
"grad_norm": 0.1552734375,
"learning_rate": 0.0001,
"loss": 0.2521,
"step": 50
},
{
"epoch": 0.0088,
"grad_norm": 0.1630859375,
"learning_rate": 0.0001,
"loss": 0.2611,
"step": 55
},
{
"epoch": 0.0096,
"grad_norm": 0.275390625,
"learning_rate": 0.0001,
"loss": 0.2761,
"step": 60
},
{
"epoch": 0.0104,
"grad_norm": 0.244140625,
"learning_rate": 0.0001,
"loss": 0.3501,
"step": 65
},
{
"epoch": 0.0112,
"grad_norm": 0.1953125,
"learning_rate": 0.0001,
"loss": 0.3219,
"step": 70
},
{
"epoch": 0.012,
"grad_norm": 0.119140625,
"learning_rate": 0.0001,
"loss": 0.3007,
"step": 75
},
{
"epoch": 0.0128,
"grad_norm": 0.126953125,
"learning_rate": 0.0001,
"loss": 0.2484,
"step": 80
},
{
"epoch": 0.0136,
"grad_norm": 0.2001953125,
"learning_rate": 0.0001,
"loss": 0.3356,
"step": 85
},
{
"epoch": 0.0144,
"grad_norm": 0.1376953125,
"learning_rate": 0.0001,
"loss": 0.2909,
"step": 90
},
{
"epoch": 0.0152,
"grad_norm": 0.228515625,
"learning_rate": 0.0001,
"loss": 0.3195,
"step": 95
},
{
"epoch": 0.016,
"grad_norm": 0.2197265625,
"learning_rate": 0.0001,
"loss": 0.287,
"step": 100
},
{
"epoch": 0.0168,
"grad_norm": 0.1650390625,
"learning_rate": 0.0001,
"loss": 0.1826,
"step": 105
},
{
"epoch": 0.0176,
"grad_norm": 0.22265625,
"learning_rate": 0.0001,
"loss": 0.2706,
"step": 110
},
{
"epoch": 0.0184,
"grad_norm": 0.16015625,
"learning_rate": 0.0001,
"loss": 0.2637,
"step": 115
},
{
"epoch": 0.0192,
"grad_norm": 0.142578125,
"learning_rate": 0.0001,
"loss": 0.3109,
"step": 120
},
{
"epoch": 0.02,
"grad_norm": 0.1640625,
"learning_rate": 0.0001,
"loss": 0.2612,
"step": 125
},
{
"epoch": 0.0208,
"grad_norm": 0.1689453125,
"learning_rate": 0.0001,
"loss": 0.2936,
"step": 130
},
{
"epoch": 0.0216,
"grad_norm": 0.17578125,
"learning_rate": 0.0001,
"loss": 0.2793,
"step": 135
},
{
"epoch": 0.0224,
"grad_norm": 0.1083984375,
"learning_rate": 0.0001,
"loss": 0.3024,
"step": 140
},
{
"epoch": 0.0232,
"grad_norm": 0.0986328125,
"learning_rate": 0.0001,
"loss": 0.2759,
"step": 145
},
{
"epoch": 0.024,
"grad_norm": 0.2109375,
"learning_rate": 0.0001,
"loss": 0.1857,
"step": 150
},
{
"epoch": 0.0248,
"grad_norm": 0.0966796875,
"learning_rate": 0.0001,
"loss": 0.1355,
"step": 155
},
{
"epoch": 0.0256,
"grad_norm": 0.1494140625,
"learning_rate": 0.0001,
"loss": 0.2338,
"step": 160
},
{
"epoch": 0.0264,
"grad_norm": 0.1513671875,
"learning_rate": 0.0001,
"loss": 0.2164,
"step": 165
},
{
"epoch": 0.0272,
"grad_norm": 0.1826171875,
"learning_rate": 0.0001,
"loss": 0.2483,
"step": 170
},
{
"epoch": 0.028,
"grad_norm": 0.1171875,
"learning_rate": 0.0001,
"loss": 0.2607,
"step": 175
},
{
"epoch": 0.0288,
"grad_norm": 0.1591796875,
"learning_rate": 0.0001,
"loss": 0.1975,
"step": 180
},
{
"epoch": 0.0296,
"grad_norm": 0.134765625,
"learning_rate": 0.0001,
"loss": 0.2463,
"step": 185
},
{
"epoch": 0.0304,
"grad_norm": 0.1826171875,
"learning_rate": 0.0001,
"loss": 0.2358,
"step": 190
},
{
"epoch": 0.0312,
"grad_norm": 0.1220703125,
"learning_rate": 0.0001,
"loss": 0.2332,
"step": 195
},
{
"epoch": 0.032,
"grad_norm": 0.146484375,
"learning_rate": 0.0001,
"loss": 0.2009,
"step": 200
},
{
"epoch": 0.0328,
"grad_norm": 0.1796875,
"learning_rate": 0.0001,
"loss": 0.1893,
"step": 205
},
{
"epoch": 0.0336,
"grad_norm": 0.1875,
"learning_rate": 0.0001,
"loss": 0.2456,
"step": 210
},
{
"epoch": 0.0344,
"grad_norm": 0.09765625,
"learning_rate": 0.0001,
"loss": 0.2466,
"step": 215
},
{
"epoch": 0.0352,
"grad_norm": 0.150390625,
"learning_rate": 0.0001,
"loss": 0.2551,
"step": 220
},
{
"epoch": 0.036,
"grad_norm": 0.14453125,
"learning_rate": 0.0001,
"loss": 0.2433,
"step": 225
},
{
"epoch": 0.0368,
"grad_norm": 0.17578125,
"learning_rate": 0.0001,
"loss": 0.2108,
"step": 230
},
{
"epoch": 0.0376,
"grad_norm": 0.140625,
"learning_rate": 0.0001,
"loss": 0.2568,
"step": 235
},
{
"epoch": 0.0384,
"grad_norm": 0.11962890625,
"learning_rate": 0.0001,
"loss": 0.2372,
"step": 240
},
{
"epoch": 0.0392,
"grad_norm": 0.146484375,
"learning_rate": 0.0001,
"loss": 0.2998,
"step": 245
},
{
"epoch": 0.04,
"grad_norm": 0.146484375,
"learning_rate": 0.0001,
"loss": 0.1874,
"step": 250
},
{
"epoch": 0.0408,
"grad_norm": 0.2373046875,
"learning_rate": 0.0001,
"loss": 0.1526,
"step": 255
},
{
"epoch": 0.0416,
"grad_norm": 0.16015625,
"learning_rate": 0.0001,
"loss": 0.2291,
"step": 260
},
{
"epoch": 0.0424,
"grad_norm": 0.1474609375,
"learning_rate": 0.0001,
"loss": 0.228,
"step": 265
},
{
"epoch": 0.0432,
"grad_norm": 0.1328125,
"learning_rate": 0.0001,
"loss": 0.2063,
"step": 270
},
{
"epoch": 0.044,
"grad_norm": 0.138671875,
"learning_rate": 0.0001,
"loss": 0.2627,
"step": 275
},
{
"epoch": 0.0448,
"grad_norm": 0.1689453125,
"learning_rate": 0.0001,
"loss": 0.2837,
"step": 280
},
{
"epoch": 0.0456,
"grad_norm": 0.134765625,
"learning_rate": 0.0001,
"loss": 0.2537,
"step": 285
},
{
"epoch": 0.0464,
"grad_norm": 0.150390625,
"learning_rate": 0.0001,
"loss": 0.2314,
"step": 290
},
{
"epoch": 0.0472,
"grad_norm": 0.1552734375,
"learning_rate": 0.0001,
"loss": 0.2183,
"step": 295
},
{
"epoch": 0.048,
"grad_norm": 0.1953125,
"learning_rate": 0.0001,
"loss": 0.2426,
"step": 300
},
{
"epoch": 0.0488,
"grad_norm": 0.193359375,
"learning_rate": 0.0001,
"loss": 0.1542,
"step": 305
},
{
"epoch": 0.0496,
"grad_norm": 0.22265625,
"learning_rate": 0.0001,
"loss": 0.2559,
"step": 310
},
{
"epoch": 0.0504,
"grad_norm": 0.1318359375,
"learning_rate": 0.0001,
"loss": 0.2596,
"step": 315
},
{
"epoch": 0.0512,
"grad_norm": 0.099609375,
"learning_rate": 0.0001,
"loss": 0.23,
"step": 320
},
{
"epoch": 0.052,
"grad_norm": 0.103515625,
"learning_rate": 0.0001,
"loss": 0.2111,
"step": 325
},
{
"epoch": 0.0528,
"grad_norm": 0.1787109375,
"learning_rate": 0.0001,
"loss": 0.2623,
"step": 330
},
{
"epoch": 0.0536,
"grad_norm": 0.208984375,
"learning_rate": 0.0001,
"loss": 0.2963,
"step": 335
},
{
"epoch": 0.0544,
"grad_norm": 0.1376953125,
"learning_rate": 0.0001,
"loss": 0.2518,
"step": 340
},
{
"epoch": 0.0552,
"grad_norm": 0.1376953125,
"learning_rate": 0.0001,
"loss": 0.2331,
"step": 345
},
{
"epoch": 0.056,
"grad_norm": 0.09423828125,
"learning_rate": 0.0001,
"loss": 0.1953,
"step": 350
},
{
"epoch": 0.0568,
"grad_norm": 0.244140625,
"learning_rate": 0.0001,
"loss": 0.1474,
"step": 355
},
{
"epoch": 0.0576,
"grad_norm": 0.11474609375,
"learning_rate": 0.0001,
"loss": 0.1851,
"step": 360
},
{
"epoch": 0.0584,
"grad_norm": 0.1669921875,
"learning_rate": 0.0001,
"loss": 0.2436,
"step": 365
},
{
"epoch": 0.0592,
"grad_norm": 0.1572265625,
"learning_rate": 0.0001,
"loss": 0.3038,
"step": 370
},
{
"epoch": 0.06,
"grad_norm": 0.1328125,
"learning_rate": 0.0001,
"loss": 0.2231,
"step": 375
},
{
"epoch": 0.0608,
"grad_norm": 0.19921875,
"learning_rate": 0.0001,
"loss": 0.2191,
"step": 380
},
{
"epoch": 0.0616,
"grad_norm": 0.1611328125,
"learning_rate": 0.0001,
"loss": 0.267,
"step": 385
},
{
"epoch": 0.0624,
"grad_norm": 0.11865234375,
"learning_rate": 0.0001,
"loss": 0.1987,
"step": 390
},
{
"epoch": 0.0632,
"grad_norm": 0.1513671875,
"learning_rate": 0.0001,
"loss": 0.2045,
"step": 395
},
{
"epoch": 0.064,
"grad_norm": 0.1162109375,
"learning_rate": 0.0001,
"loss": 0.1616,
"step": 400
},
{
"epoch": 0.0648,
"grad_norm": 0.158203125,
"learning_rate": 0.0001,
"loss": 0.1417,
"step": 405
},
{
"epoch": 0.0656,
"grad_norm": 0.1708984375,
"learning_rate": 0.0001,
"loss": 0.2236,
"step": 410
},
{
"epoch": 0.0664,
"grad_norm": 0.2216796875,
"learning_rate": 0.0001,
"loss": 0.2387,
"step": 415
},
{
"epoch": 0.0672,
"grad_norm": 0.1416015625,
"learning_rate": 0.0001,
"loss": 0.1907,
"step": 420
},
{
"epoch": 0.068,
"grad_norm": 0.1201171875,
"learning_rate": 0.0001,
"loss": 0.2465,
"step": 425
},
{
"epoch": 0.0688,
"grad_norm": 0.1728515625,
"learning_rate": 0.0001,
"loss": 0.2512,
"step": 430
},
{
"epoch": 0.0696,
"grad_norm": 0.11328125,
"learning_rate": 0.0001,
"loss": 0.1749,
"step": 435
},
{
"epoch": 0.0704,
"grad_norm": 0.328125,
"learning_rate": 0.0001,
"loss": 0.2124,
"step": 440
},
{
"epoch": 0.0712,
"grad_norm": 0.1552734375,
"learning_rate": 0.0001,
"loss": 0.2249,
"step": 445
},
{
"epoch": 0.072,
"grad_norm": 0.1728515625,
"learning_rate": 0.0001,
"loss": 0.2183,
"step": 450
},
{
"epoch": 0.0728,
"grad_norm": 0.2099609375,
"learning_rate": 0.0001,
"loss": 0.1436,
"step": 455
},
{
"epoch": 0.0736,
"grad_norm": 0.15625,
"learning_rate": 0.0001,
"loss": 0.2584,
"step": 460
},
{
"epoch": 0.0744,
"grad_norm": 0.1494140625,
"learning_rate": 0.0001,
"loss": 0.2075,
"step": 465
},
{
"epoch": 0.0752,
"grad_norm": 0.150390625,
"learning_rate": 0.0001,
"loss": 0.2687,
"step": 470
},
{
"epoch": 0.076,
"grad_norm": 0.25390625,
"learning_rate": 0.0001,
"loss": 0.2125,
"step": 475
},
{
"epoch": 0.0768,
"grad_norm": 0.1259765625,
"learning_rate": 0.0001,
"loss": 0.1893,
"step": 480
},
{
"epoch": 0.0776,
"grad_norm": 0.158203125,
"learning_rate": 0.0001,
"loss": 0.2272,
"step": 485
},
{
"epoch": 0.0784,
"grad_norm": 0.09130859375,
"learning_rate": 0.0001,
"loss": 0.2172,
"step": 490
},
{
"epoch": 0.0792,
"grad_norm": 0.16796875,
"learning_rate": 0.0001,
"loss": 0.2382,
"step": 495
},
{
"epoch": 0.08,
"grad_norm": 0.126953125,
"learning_rate": 0.0001,
"loss": 0.1732,
"step": 500
},
{
"epoch": 0.0808,
"grad_norm": 0.1396484375,
"learning_rate": 0.0001,
"loss": 0.1203,
"step": 505
},
{
"epoch": 0.0816,
"grad_norm": 0.1259765625,
"learning_rate": 0.0001,
"loss": 0.2118,
"step": 510
},
{
"epoch": 0.0824,
"grad_norm": 0.1357421875,
"learning_rate": 0.0001,
"loss": 0.2096,
"step": 515
},
{
"epoch": 0.0832,
"grad_norm": 0.150390625,
"learning_rate": 0.0001,
"loss": 0.2379,
"step": 520
},
{
"epoch": 0.084,
"grad_norm": 0.1953125,
"learning_rate": 0.0001,
"loss": 0.2167,
"step": 525
},
{
"epoch": 0.0848,
"grad_norm": 0.09912109375,
"learning_rate": 0.0001,
"loss": 0.2328,
"step": 530
},
{
"epoch": 0.0856,
"grad_norm": 0.380859375,
"learning_rate": 0.0001,
"loss": 0.2141,
"step": 535
},
{
"epoch": 0.0864,
"grad_norm": 0.11572265625,
"learning_rate": 0.0001,
"loss": 0.183,
"step": 540
},
{
"epoch": 0.0872,
"grad_norm": 0.150390625,
"learning_rate": 0.0001,
"loss": 0.1895,
"step": 545
},
{
"epoch": 0.088,
"grad_norm": 0.0966796875,
"learning_rate": 0.0001,
"loss": 0.1824,
"step": 550
},
{
"epoch": 0.0888,
"grad_norm": 0.10693359375,
"learning_rate": 0.0001,
"loss": 0.1108,
"step": 555
},
{
"epoch": 0.0896,
"grad_norm": 0.16796875,
"learning_rate": 0.0001,
"loss": 0.2111,
"step": 560
},
{
"epoch": 0.0904,
"grad_norm": 0.10791015625,
"learning_rate": 0.0001,
"loss": 0.2169,
"step": 565
},
{
"epoch": 0.0912,
"grad_norm": 0.11328125,
"learning_rate": 0.0001,
"loss": 0.2518,
"step": 570
},
{
"epoch": 0.092,
"grad_norm": 0.1806640625,
"learning_rate": 0.0001,
"loss": 0.2217,
"step": 575
},
{
"epoch": 0.0928,
"grad_norm": 0.072265625,
"learning_rate": 0.0001,
"loss": 0.2436,
"step": 580
},
{
"epoch": 0.0936,
"grad_norm": 0.11328125,
"learning_rate": 0.0001,
"loss": 0.2692,
"step": 585
},
{
"epoch": 0.0944,
"grad_norm": 0.1083984375,
"learning_rate": 0.0001,
"loss": 0.23,
"step": 590
},
{
"epoch": 0.0952,
"grad_norm": 0.1630859375,
"learning_rate": 0.0001,
"loss": 0.2581,
"step": 595
},
{
"epoch": 0.096,
"grad_norm": 0.11328125,
"learning_rate": 0.0001,
"loss": 0.1432,
"step": 600
},
{
"epoch": 0.0968,
"grad_norm": 0.126953125,
"learning_rate": 0.0001,
"loss": 0.1439,
"step": 605
},
{
"epoch": 0.0976,
"grad_norm": 0.087890625,
"learning_rate": 0.0001,
"loss": 0.1891,
"step": 610
},
{
"epoch": 0.0984,
"grad_norm": 0.10498046875,
"learning_rate": 0.0001,
"loss": 0.2183,
"step": 615
},
{
"epoch": 0.0992,
"grad_norm": 0.14453125,
"learning_rate": 0.0001,
"loss": 0.221,
"step": 620
},
{
"epoch": 0.1,
"grad_norm": 0.1025390625,
"learning_rate": 0.0001,
"loss": 0.2226,
"step": 625
},
{
"epoch": 0.1008,
"grad_norm": 0.150390625,
"learning_rate": 0.0001,
"loss": 0.2429,
"step": 630
},
{
"epoch": 0.1016,
"grad_norm": 0.1337890625,
"learning_rate": 0.0001,
"loss": 0.2422,
"step": 635
},
{
"epoch": 0.1024,
"grad_norm": 0.08837890625,
"learning_rate": 0.0001,
"loss": 0.205,
"step": 640
},
{
"epoch": 0.1032,
"grad_norm": 0.140625,
"learning_rate": 0.0001,
"loss": 0.183,
"step": 645
},
{
"epoch": 0.104,
"grad_norm": 0.1630859375,
"learning_rate": 0.0001,
"loss": 0.1847,
"step": 650
},
{
"epoch": 0.1048,
"grad_norm": 0.162109375,
"learning_rate": 0.0001,
"loss": 0.1508,
"step": 655
},
{
"epoch": 0.1056,
"grad_norm": 0.271484375,
"learning_rate": 0.0001,
"loss": 0.209,
"step": 660
},
{
"epoch": 0.1064,
"grad_norm": 0.1259765625,
"learning_rate": 0.0001,
"loss": 0.1998,
"step": 665
},
{
"epoch": 0.1072,
"grad_norm": 0.1533203125,
"learning_rate": 0.0001,
"loss": 0.2266,
"step": 670
},
{
"epoch": 0.108,
"grad_norm": 0.1826171875,
"learning_rate": 0.0001,
"loss": 0.2164,
"step": 675
},
{
"epoch": 0.1088,
"grad_norm": 0.1298828125,
"learning_rate": 0.0001,
"loss": 0.1764,
"step": 680
},
{
"epoch": 0.1096,
"grad_norm": 0.1767578125,
"learning_rate": 0.0001,
"loss": 0.2615,
"step": 685
},
{
"epoch": 0.1104,
"grad_norm": 0.1279296875,
"learning_rate": 0.0001,
"loss": 0.2375,
"step": 690
},
{
"epoch": 0.1112,
"grad_norm": 0.189453125,
"learning_rate": 0.0001,
"loss": 0.2404,
"step": 695
},
{
"epoch": 0.112,
"grad_norm": 0.134765625,
"learning_rate": 0.0001,
"loss": 0.2704,
"step": 700
},
{
"epoch": 0.1128,
"grad_norm": 0.11669921875,
"learning_rate": 0.0001,
"loss": 0.1269,
"step": 705
},
{
"epoch": 0.1136,
"grad_norm": 0.1357421875,
"learning_rate": 0.0001,
"loss": 0.1691,
"step": 710
},
{
"epoch": 0.1144,
"grad_norm": 0.13671875,
"learning_rate": 0.0001,
"loss": 0.2307,
"step": 715
},
{
"epoch": 0.1152,
"grad_norm": 0.169921875,
"learning_rate": 0.0001,
"loss": 0.2473,
"step": 720
},
{
"epoch": 0.116,
"grad_norm": 0.1318359375,
"learning_rate": 0.0001,
"loss": 0.214,
"step": 725
},
{
"epoch": 0.1168,
"grad_norm": 0.09326171875,
"learning_rate": 0.0001,
"loss": 0.1435,
"step": 730
},
{
"epoch": 0.1176,
"grad_norm": 0.125,
"learning_rate": 0.0001,
"loss": 0.1778,
"step": 735
},
{
"epoch": 0.1184,
"grad_norm": 0.12451171875,
"learning_rate": 0.0001,
"loss": 0.2369,
"step": 740
},
{
"epoch": 0.1192,
"grad_norm": 0.09912109375,
"learning_rate": 0.0001,
"loss": 0.2128,
"step": 745
},
{
"epoch": 0.12,
"grad_norm": 0.1572265625,
"learning_rate": 0.0001,
"loss": 0.2003,
"step": 750
},
{
"epoch": 0.1208,
"grad_norm": 0.154296875,
"learning_rate": 0.0001,
"loss": 0.1169,
"step": 755
},
{
"epoch": 0.1216,
"grad_norm": 0.10888671875,
"learning_rate": 0.0001,
"loss": 0.2335,
"step": 760
},
{
"epoch": 0.1224,
"grad_norm": 0.1318359375,
"learning_rate": 0.0001,
"loss": 0.2036,
"step": 765
},
{
"epoch": 0.1232,
"grad_norm": 0.0810546875,
"learning_rate": 0.0001,
"loss": 0.1927,
"step": 770
},
{
"epoch": 0.124,
"grad_norm": 0.61328125,
"learning_rate": 0.0001,
"loss": 0.2357,
"step": 775
},
{
"epoch": 0.1248,
"grad_norm": 0.11865234375,
"learning_rate": 0.0001,
"loss": 0.2313,
"step": 780
},
{
"epoch": 0.1256,
"grad_norm": 0.203125,
"learning_rate": 0.0001,
"loss": 0.1845,
"step": 785
},
{
"epoch": 0.1264,
"grad_norm": 0.09912109375,
"learning_rate": 0.0001,
"loss": 0.2271,
"step": 790
},
{
"epoch": 0.1272,
"grad_norm": 0.171875,
"learning_rate": 0.0001,
"loss": 0.2022,
"step": 795
},
{
"epoch": 0.128,
"grad_norm": 0.1162109375,
"learning_rate": 0.0001,
"loss": 0.1503,
"step": 800
},
{
"epoch": 0.1288,
"grad_norm": 0.09326171875,
"learning_rate": 0.0001,
"loss": 0.0877,
"step": 805
},
{
"epoch": 0.1296,
"grad_norm": 0.1669921875,
"learning_rate": 0.0001,
"loss": 0.1888,
"step": 810
},
{
"epoch": 0.1304,
"grad_norm": 0.11962890625,
"learning_rate": 0.0001,
"loss": 0.2269,
"step": 815
},
{
"epoch": 0.1312,
"grad_norm": 0.1494140625,
"learning_rate": 0.0001,
"loss": 0.2482,
"step": 820
},
{
"epoch": 0.132,
"grad_norm": 0.1142578125,
"learning_rate": 0.0001,
"loss": 0.1865,
"step": 825
},
{
"epoch": 0.1328,
"grad_norm": 0.11962890625,
"learning_rate": 0.0001,
"loss": 0.1741,
"step": 830
},
{
"epoch": 0.1336,
"grad_norm": 0.2275390625,
"learning_rate": 0.0001,
"loss": 0.2137,
"step": 835
},
{
"epoch": 0.1344,
"grad_norm": 0.146484375,
"learning_rate": 0.0001,
"loss": 0.1883,
"step": 840
},
{
"epoch": 0.1352,
"grad_norm": 0.126953125,
"learning_rate": 0.0001,
"loss": 0.2225,
"step": 845
},
{
"epoch": 0.136,
"grad_norm": 0.1591796875,
"learning_rate": 0.0001,
"loss": 0.2226,
"step": 850
},
{
"epoch": 0.1368,
"grad_norm": 0.10205078125,
"learning_rate": 0.0001,
"loss": 0.1197,
"step": 855
},
{
"epoch": 0.1376,
"grad_norm": 0.1162109375,
"learning_rate": 0.0001,
"loss": 0.2416,
"step": 860
},
{
"epoch": 0.1384,
"grad_norm": 0.1494140625,
"learning_rate": 0.0001,
"loss": 0.2261,
"step": 865
},
{
"epoch": 0.1392,
"grad_norm": 0.1142578125,
"learning_rate": 0.0001,
"loss": 0.1796,
"step": 870
},
{
"epoch": 0.14,
"grad_norm": 0.103515625,
"learning_rate": 0.0001,
"loss": 0.2087,
"step": 875
},
{
"epoch": 0.1408,
"grad_norm": 0.1396484375,
"learning_rate": 0.0001,
"loss": 0.2171,
"step": 880
},
{
"epoch": 0.1416,
"grad_norm": 0.1171875,
"learning_rate": 0.0001,
"loss": 0.1997,
"step": 885
},
{
"epoch": 0.1424,
"grad_norm": 0.11572265625,
"learning_rate": 0.0001,
"loss": 0.2481,
"step": 890
},
{
"epoch": 0.1432,
"grad_norm": 0.12109375,
"learning_rate": 0.0001,
"loss": 0.1914,
"step": 895
},
{
"epoch": 0.144,
"grad_norm": 0.0859375,
"learning_rate": 0.0001,
"loss": 0.2108,
"step": 900
},
{
"epoch": 0.1448,
"grad_norm": 0.09716796875,
"learning_rate": 0.0001,
"loss": 0.1172,
"step": 905
},
{
"epoch": 0.1456,
"grad_norm": 0.18359375,
"learning_rate": 0.0001,
"loss": 0.2263,
"step": 910
},
{
"epoch": 0.1464,
"grad_norm": 0.1025390625,
"learning_rate": 0.0001,
"loss": 0.2087,
"step": 915
},
{
"epoch": 0.1472,
"grad_norm": 0.1767578125,
"learning_rate": 0.0001,
"loss": 0.1891,
"step": 920
},
{
"epoch": 0.148,
"grad_norm": 0.12353515625,
"learning_rate": 0.0001,
"loss": 0.2137,
"step": 925
},
{
"epoch": 0.1488,
"grad_norm": 0.2109375,
"learning_rate": 0.0001,
"loss": 0.2366,
"step": 930
},
{
"epoch": 0.1496,
"grad_norm": 0.10791015625,
"learning_rate": 0.0001,
"loss": 0.1942,
"step": 935
},
{
"epoch": 0.1504,
"grad_norm": 0.11376953125,
"learning_rate": 0.0001,
"loss": 0.2417,
"step": 940
},
{
"epoch": 0.1512,
"grad_norm": 0.1513671875,
"learning_rate": 0.0001,
"loss": 0.1778,
"step": 945
},
{
"epoch": 0.152,
"grad_norm": 0.0634765625,
"learning_rate": 0.0001,
"loss": 0.1067,
"step": 950
},
{
"epoch": 0.1528,
"grad_norm": 0.1220703125,
"learning_rate": 0.0001,
"loss": 0.1211,
"step": 955
},
{
"epoch": 0.1536,
"grad_norm": 0.1240234375,
"learning_rate": 0.0001,
"loss": 0.1733,
"step": 960
},
{
"epoch": 0.1544,
"grad_norm": 0.72265625,
"learning_rate": 0.0001,
"loss": 0.2172,
"step": 965
},
{
"epoch": 0.1552,
"grad_norm": 0.1279296875,
"learning_rate": 0.0001,
"loss": 0.1941,
"step": 970
},
{
"epoch": 0.156,
"grad_norm": 0.13671875,
"learning_rate": 0.0001,
"loss": 0.1975,
"step": 975
},
{
"epoch": 0.1568,
"grad_norm": 0.1494140625,
"learning_rate": 0.0001,
"loss": 0.1821,
"step": 980
},
{
"epoch": 0.1576,
"grad_norm": 0.1279296875,
"learning_rate": 0.0001,
"loss": 0.2076,
"step": 985
},
{
"epoch": 0.1584,
"grad_norm": 0.10986328125,
"learning_rate": 0.0001,
"loss": 0.1921,
"step": 990
},
{
"epoch": 0.1592,
"grad_norm": 0.09814453125,
"learning_rate": 0.0001,
"loss": 0.1943,
"step": 995
},
{
"epoch": 0.16,
"grad_norm": 0.109375,
"learning_rate": 0.0001,
"loss": 0.1589,
"step": 1000
},
{
"epoch": 0.1608,
"grad_norm": 0.1025390625,
"learning_rate": 0.0001,
"loss": 0.1407,
"step": 1005
},
{
"epoch": 0.1616,
"grad_norm": 0.0751953125,
"learning_rate": 0.0001,
"loss": 0.1717,
"step": 1010
},
{
"epoch": 0.1624,
"grad_norm": 0.1806640625,
"learning_rate": 0.0001,
"loss": 0.2239,
"step": 1015
},
{
"epoch": 0.1632,
"grad_norm": 0.1171875,
"learning_rate": 0.0001,
"loss": 0.1973,
"step": 1020
},
{
"epoch": 0.164,
"grad_norm": 0.11572265625,
"learning_rate": 0.0001,
"loss": 0.2205,
"step": 1025
},
{
"epoch": 0.1648,
"grad_norm": 0.1171875,
"learning_rate": 0.0001,
"loss": 0.2101,
"step": 1030
},
{
"epoch": 0.1656,
"grad_norm": 0.251953125,
"learning_rate": 0.0001,
"loss": 0.2401,
"step": 1035
},
{
"epoch": 0.1664,
"grad_norm": 0.130859375,
"learning_rate": 0.0001,
"loss": 0.2134,
"step": 1040
},
{
"epoch": 0.1672,
"grad_norm": 0.162109375,
"learning_rate": 0.0001,
"loss": 0.2182,
"step": 1045
},
{
"epoch": 0.168,
"grad_norm": 0.158203125,
"learning_rate": 0.0001,
"loss": 0.2012,
"step": 1050
},
{
"epoch": 0.1688,
"grad_norm": 0.142578125,
"learning_rate": 0.0001,
"loss": 0.1434,
"step": 1055
},
{
"epoch": 0.1696,
"grad_norm": 0.11767578125,
"learning_rate": 0.0001,
"loss": 0.1887,
"step": 1060
},
{
"epoch": 0.1704,
"grad_norm": 0.1279296875,
"learning_rate": 0.0001,
"loss": 0.1921,
"step": 1065
},
{
"epoch": 0.1712,
"grad_norm": 0.126953125,
"learning_rate": 0.0001,
"loss": 0.2235,
"step": 1070
},
{
"epoch": 0.172,
"grad_norm": 0.1318359375,
"learning_rate": 0.0001,
"loss": 0.2512,
"step": 1075
},
{
"epoch": 0.1728,
"grad_norm": 0.08642578125,
"learning_rate": 0.0001,
"loss": 0.2558,
"step": 1080
},
{
"epoch": 0.1736,
"grad_norm": 0.130859375,
"learning_rate": 0.0001,
"loss": 0.1705,
"step": 1085
},
{
"epoch": 0.1744,
"grad_norm": 0.1748046875,
"learning_rate": 0.0001,
"loss": 0.2395,
"step": 1090
},
{
"epoch": 0.1752,
"grad_norm": 0.119140625,
"learning_rate": 0.0001,
"loss": 0.2307,
"step": 1095
},
{
"epoch": 0.176,
"grad_norm": 0.09326171875,
"learning_rate": 0.0001,
"loss": 0.1668,
"step": 1100
},
{
"epoch": 0.1768,
"grad_norm": 0.10693359375,
"learning_rate": 0.0001,
"loss": 0.1151,
"step": 1105
},
{
"epoch": 0.1776,
"grad_norm": 0.2490234375,
"learning_rate": 0.0001,
"loss": 0.1921,
"step": 1110
},
{
"epoch": 0.1784,
"grad_norm": 0.1376953125,
"learning_rate": 0.0001,
"loss": 0.1684,
"step": 1115
},
{
"epoch": 0.1792,
"grad_norm": 0.12451171875,
"learning_rate": 0.0001,
"loss": 0.2009,
"step": 1120
},
{
"epoch": 0.18,
"grad_norm": 0.0771484375,
"learning_rate": 0.0001,
"loss": 0.1867,
"step": 1125
},
{
"epoch": 0.1808,
"grad_norm": 0.1376953125,
"learning_rate": 0.0001,
"loss": 0.1855,
"step": 1130
},
{
"epoch": 0.1816,
"grad_norm": 0.1142578125,
"learning_rate": 0.0001,
"loss": 0.1649,
"step": 1135
},
{
"epoch": 0.1824,
"grad_norm": 0.1787109375,
"learning_rate": 0.0001,
"loss": 0.2225,
"step": 1140
},
{
"epoch": 0.1832,
"grad_norm": 0.1572265625,
"learning_rate": 0.0001,
"loss": 0.2116,
"step": 1145
},
{
"epoch": 0.184,
"grad_norm": 0.140625,
"learning_rate": 0.0001,
"loss": 0.1438,
"step": 1150
},
{
"epoch": 0.1848,
"grad_norm": 0.2734375,
"learning_rate": 0.0001,
"loss": 0.1191,
"step": 1155
},
{
"epoch": 0.1856,
"grad_norm": 0.1396484375,
"learning_rate": 0.0001,
"loss": 0.17,
"step": 1160
},
{
"epoch": 0.1864,
"grad_norm": 0.1591796875,
"learning_rate": 0.0001,
"loss": 0.1978,
"step": 1165
},
{
"epoch": 0.1872,
"grad_norm": 0.1279296875,
"learning_rate": 0.0001,
"loss": 0.1978,
"step": 1170
},
{
"epoch": 0.188,
"grad_norm": 0.080078125,
"learning_rate": 0.0001,
"loss": 0.2512,
"step": 1175
},
{
"epoch": 0.1888,
"grad_norm": 0.125,
"learning_rate": 0.0001,
"loss": 0.1788,
"step": 1180
},
{
"epoch": 0.1896,
"grad_norm": 0.11279296875,
"learning_rate": 0.0001,
"loss": 0.2334,
"step": 1185
},
{
"epoch": 0.1904,
"grad_norm": 0.091796875,
"learning_rate": 0.0001,
"loss": 0.2125,
"step": 1190
},
{
"epoch": 0.1912,
"grad_norm": 0.154296875,
"learning_rate": 0.0001,
"loss": 0.1803,
"step": 1195
},
{
"epoch": 0.192,
"grad_norm": 0.1416015625,
"learning_rate": 0.0001,
"loss": 0.1754,
"step": 1200
},
{
"epoch": 0.1928,
"grad_norm": 0.1337890625,
"learning_rate": 0.0001,
"loss": 0.1199,
"step": 1205
},
{
"epoch": 0.1936,
"grad_norm": 0.11376953125,
"learning_rate": 0.0001,
"loss": 0.2053,
"step": 1210
},
{
"epoch": 0.1944,
"grad_norm": 0.14453125,
"learning_rate": 0.0001,
"loss": 0.1595,
"step": 1215
},
{
"epoch": 0.1952,
"grad_norm": 0.10693359375,
"learning_rate": 0.0001,
"loss": 0.1964,
"step": 1220
},
{
"epoch": 0.196,
"grad_norm": 0.1201171875,
"learning_rate": 0.0001,
"loss": 0.1937,
"step": 1225
},
{
"epoch": 0.1968,
"grad_norm": 0.158203125,
"learning_rate": 0.0001,
"loss": 0.2097,
"step": 1230
},
{
"epoch": 0.1976,
"grad_norm": 0.138671875,
"learning_rate": 0.0001,
"loss": 0.1948,
"step": 1235
},
{
"epoch": 0.1984,
"grad_norm": 0.078125,
"learning_rate": 0.0001,
"loss": 0.1963,
"step": 1240
},
{
"epoch": 0.1992,
"grad_norm": 0.11962890625,
"learning_rate": 0.0001,
"loss": 0.2814,
"step": 1245
},
{
"epoch": 0.2,
"grad_norm": 0.2021484375,
"learning_rate": 0.0001,
"loss": 0.1523,
"step": 1250
},
{
"epoch": 0.2008,
"grad_norm": 0.125,
"learning_rate": 0.0001,
"loss": 0.0996,
"step": 1255
},
{
"epoch": 0.2016,
"grad_norm": 0.15625,
"learning_rate": 0.0001,
"loss": 0.2334,
"step": 1260
},
{
"epoch": 0.2024,
"grad_norm": 0.12109375,
"learning_rate": 0.0001,
"loss": 0.1847,
"step": 1265
},
{
"epoch": 0.2032,
"grad_norm": 0.173828125,
"learning_rate": 0.0001,
"loss": 0.2321,
"step": 1270
},
{
"epoch": 0.204,
"grad_norm": 0.1015625,
"learning_rate": 0.0001,
"loss": 0.2159,
"step": 1275
},
{
"epoch": 0.2048,
"grad_norm": 0.125,
"learning_rate": 0.0001,
"loss": 0.2117,
"step": 1280
},
{
"epoch": 0.2056,
"grad_norm": 0.16796875,
"learning_rate": 0.0001,
"loss": 0.1675,
"step": 1285
},
{
"epoch": 0.2064,
"grad_norm": 0.2041015625,
"learning_rate": 0.0001,
"loss": 0.2439,
"step": 1290
},
{
"epoch": 0.2072,
"grad_norm": 0.1044921875,
"learning_rate": 0.0001,
"loss": 0.2054,
"step": 1295
},
{
"epoch": 0.208,
"grad_norm": 0.1279296875,
"learning_rate": 0.0001,
"loss": 0.2,
"step": 1300
},
{
"epoch": 0.2088,
"grad_norm": 0.1123046875,
"learning_rate": 0.0001,
"loss": 0.1237,
"step": 1305
},
{
"epoch": 0.2096,
"grad_norm": 0.10888671875,
"learning_rate": 0.0001,
"loss": 0.197,
"step": 1310
},
{
"epoch": 0.2104,
"grad_norm": 0.10400390625,
"learning_rate": 0.0001,
"loss": 0.2242,
"step": 1315
},
{
"epoch": 0.2112,
"grad_norm": 0.1103515625,
"learning_rate": 0.0001,
"loss": 0.1983,
"step": 1320
},
{
"epoch": 0.212,
"grad_norm": 0.0966796875,
"learning_rate": 0.0001,
"loss": 0.1903,
"step": 1325
},
{
"epoch": 0.2128,
"grad_norm": 0.12255859375,
"learning_rate": 0.0001,
"loss": 0.2123,
"step": 1330
},
{
"epoch": 0.2136,
"grad_norm": 0.10888671875,
"learning_rate": 0.0001,
"loss": 0.1954,
"step": 1335
},
{
"epoch": 0.2144,
"grad_norm": 0.1328125,
"learning_rate": 0.0001,
"loss": 0.216,
"step": 1340
},
{
"epoch": 0.2152,
"grad_norm": 0.10888671875,
"learning_rate": 0.0001,
"loss": 0.2259,
"step": 1345
},
{
"epoch": 0.216,
"grad_norm": 0.146484375,
"learning_rate": 0.0001,
"loss": 0.1752,
"step": 1350
},
{
"epoch": 0.2168,
"grad_norm": 0.12158203125,
"learning_rate": 0.0001,
"loss": 0.1258,
"step": 1355
},
{
"epoch": 0.2176,
"grad_norm": 0.09423828125,
"learning_rate": 0.0001,
"loss": 0.1522,
"step": 1360
},
{
"epoch": 0.2184,
"grad_norm": 0.138671875,
"learning_rate": 0.0001,
"loss": 0.2063,
"step": 1365
},
{
"epoch": 0.2192,
"grad_norm": 0.1376953125,
"learning_rate": 0.0001,
"loss": 0.2096,
"step": 1370
},
{
"epoch": 0.22,
"grad_norm": 0.11767578125,
"learning_rate": 0.0001,
"loss": 0.2009,
"step": 1375
},
{
"epoch": 0.2208,
"grad_norm": 0.1689453125,
"learning_rate": 0.0001,
"loss": 0.2258,
"step": 1380
},
{
"epoch": 0.2216,
"grad_norm": 0.09521484375,
"learning_rate": 0.0001,
"loss": 0.1929,
"step": 1385
},
{
"epoch": 0.2224,
"grad_norm": 0.0966796875,
"learning_rate": 0.0001,
"loss": 0.1534,
"step": 1390
},
{
"epoch": 0.2232,
"grad_norm": 0.09423828125,
"learning_rate": 0.0001,
"loss": 0.1837,
"step": 1395
},
{
"epoch": 0.224,
"grad_norm": 0.111328125,
"learning_rate": 0.0001,
"loss": 0.1965,
"step": 1400
},
{
"epoch": 0.2248,
"grad_norm": 0.1376953125,
"learning_rate": 0.0001,
"loss": 0.0743,
"step": 1405
},
{
"epoch": 0.2256,
"grad_norm": 0.11572265625,
"learning_rate": 0.0001,
"loss": 0.1517,
"step": 1410
},
{
"epoch": 0.2264,
"grad_norm": 0.18359375,
"learning_rate": 0.0001,
"loss": 0.2151,
"step": 1415
},
{
"epoch": 0.2272,
"grad_norm": 0.1005859375,
"learning_rate": 0.0001,
"loss": 0.222,
"step": 1420
},
{
"epoch": 0.228,
"grad_norm": 0.10791015625,
"learning_rate": 0.0001,
"loss": 0.2281,
"step": 1425
},
{
"epoch": 0.2288,
"grad_norm": 0.1650390625,
"learning_rate": 0.0001,
"loss": 0.2195,
"step": 1430
},
{
"epoch": 0.2296,
"grad_norm": 0.087890625,
"learning_rate": 0.0001,
"loss": 0.1634,
"step": 1435
},
{
"epoch": 0.2304,
"grad_norm": 0.13671875,
"learning_rate": 0.0001,
"loss": 0.2158,
"step": 1440
},
{
"epoch": 0.2312,
"grad_norm": 0.146484375,
"learning_rate": 0.0001,
"loss": 0.2034,
"step": 1445
},
{
"epoch": 0.232,
"grad_norm": 0.080078125,
"learning_rate": 0.0001,
"loss": 0.1524,
"step": 1450
},
{
"epoch": 0.2328,
"grad_norm": 0.10009765625,
"learning_rate": 0.0001,
"loss": 0.1106,
"step": 1455
},
{
"epoch": 0.2336,
"grad_norm": 0.10107421875,
"learning_rate": 0.0001,
"loss": 0.1648,
"step": 1460
},
{
"epoch": 0.2344,
"grad_norm": 0.123046875,
"learning_rate": 0.0001,
"loss": 0.2355,
"step": 1465
},
{
"epoch": 0.2352,
"grad_norm": 0.078125,
"learning_rate": 0.0001,
"loss": 0.2031,
"step": 1470
},
{
"epoch": 0.236,
"grad_norm": 0.1513671875,
"learning_rate": 0.0001,
"loss": 0.2039,
"step": 1475
},
{
"epoch": 0.2368,
"grad_norm": 0.1298828125,
"learning_rate": 0.0001,
"loss": 0.2008,
"step": 1480
},
{
"epoch": 0.2376,
"grad_norm": 0.126953125,
"learning_rate": 0.0001,
"loss": 0.1866,
"step": 1485
},
{
"epoch": 0.2384,
"grad_norm": 0.1689453125,
"learning_rate": 0.0001,
"loss": 0.1931,
"step": 1490
},
{
"epoch": 0.2392,
"grad_norm": 0.193359375,
"learning_rate": 0.0001,
"loss": 0.2332,
"step": 1495
},
{
"epoch": 0.24,
"grad_norm": 0.1298828125,
"learning_rate": 0.0001,
"loss": 0.1463,
"step": 1500
},
{
"epoch": 0.2408,
"grad_norm": 0.1357421875,
"learning_rate": 0.0001,
"loss": 0.0964,
"step": 1505
},
{
"epoch": 0.2416,
"grad_norm": 0.490234375,
"learning_rate": 0.0001,
"loss": 0.1877,
"step": 1510
},
{
"epoch": 0.2424,
"grad_norm": 0.1904296875,
"learning_rate": 0.0001,
"loss": 0.2122,
"step": 1515
},
{
"epoch": 0.2432,
"grad_norm": 0.18359375,
"learning_rate": 0.0001,
"loss": 0.2305,
"step": 1520
},
{
"epoch": 0.244,
"grad_norm": 0.1259765625,
"learning_rate": 0.0001,
"loss": 0.1769,
"step": 1525
},
{
"epoch": 0.2448,
"grad_norm": 0.2119140625,
"learning_rate": 0.0001,
"loss": 0.1768,
"step": 1530
},
{
"epoch": 0.2456,
"grad_norm": 0.09814453125,
"learning_rate": 0.0001,
"loss": 0.2186,
"step": 1535
},
{
"epoch": 0.2464,
"grad_norm": 0.12890625,
"learning_rate": 0.0001,
"loss": 0.1831,
"step": 1540
},
{
"epoch": 0.2472,
"grad_norm": 0.12158203125,
"learning_rate": 0.0001,
"loss": 0.1926,
"step": 1545
},
{
"epoch": 0.248,
"grad_norm": 0.18359375,
"learning_rate": 0.0001,
"loss": 0.1691,
"step": 1550
},
{
"epoch": 0.2488,
"grad_norm": 0.10546875,
"learning_rate": 0.0001,
"loss": 0.1169,
"step": 1555
},
{
"epoch": 0.2496,
"grad_norm": 0.162109375,
"learning_rate": 0.0001,
"loss": 0.2345,
"step": 1560
},
{
"epoch": 0.2504,
"grad_norm": 0.1162109375,
"learning_rate": 0.0001,
"loss": 0.1324,
"step": 1565
},
{
"epoch": 0.2512,
"grad_norm": 0.0908203125,
"learning_rate": 0.0001,
"loss": 0.1739,
"step": 1570
},
{
"epoch": 0.252,
"grad_norm": 0.1533203125,
"learning_rate": 0.0001,
"loss": 0.2122,
"step": 1575
},
{
"epoch": 0.2528,
"grad_norm": 0.057861328125,
"learning_rate": 0.0001,
"loss": 0.2077,
"step": 1580
},
{
"epoch": 0.2536,
"grad_norm": 0.12890625,
"learning_rate": 0.0001,
"loss": 0.2082,
"step": 1585
},
{
"epoch": 0.2544,
"grad_norm": 0.0986328125,
"learning_rate": 0.0001,
"loss": 0.1675,
"step": 1590
},
{
"epoch": 0.2552,
"grad_norm": 0.1513671875,
"learning_rate": 0.0001,
"loss": 0.1928,
"step": 1595
},
{
"epoch": 0.256,
"grad_norm": 0.150390625,
"learning_rate": 0.0001,
"loss": 0.1549,
"step": 1600
},
{
"epoch": 0.2568,
"grad_norm": 0.08837890625,
"learning_rate": 0.0001,
"loss": 0.1119,
"step": 1605
},
{
"epoch": 0.2576,
"grad_norm": 0.12353515625,
"learning_rate": 0.0001,
"loss": 0.232,
"step": 1610
},
{
"epoch": 0.2584,
"grad_norm": 0.09765625,
"learning_rate": 0.0001,
"loss": 0.1867,
"step": 1615
},
{
"epoch": 0.2592,
"grad_norm": 0.11865234375,
"learning_rate": 0.0001,
"loss": 0.1444,
"step": 1620
},
{
"epoch": 0.26,
"grad_norm": 0.1396484375,
"learning_rate": 0.0001,
"loss": 0.1955,
"step": 1625
},
{
"epoch": 0.2608,
"grad_norm": 0.09619140625,
"learning_rate": 0.0001,
"loss": 0.1911,
"step": 1630
},
{
"epoch": 0.2616,
"grad_norm": 0.1240234375,
"learning_rate": 0.0001,
"loss": 0.2459,
"step": 1635
},
{
"epoch": 0.2624,
"grad_norm": 0.15625,
"learning_rate": 0.0001,
"loss": 0.253,
"step": 1640
},
{
"epoch": 0.2632,
"grad_norm": 0.1640625,
"learning_rate": 0.0001,
"loss": 0.2522,
"step": 1645
},
{
"epoch": 0.264,
"grad_norm": 0.11181640625,
"learning_rate": 0.0001,
"loss": 0.1855,
"step": 1650
},
{
"epoch": 0.2648,
"grad_norm": 0.1416015625,
"learning_rate": 0.0001,
"loss": 0.0944,
"step": 1655
},
{
"epoch": 0.2656,
"grad_norm": 0.087890625,
"learning_rate": 0.0001,
"loss": 0.1537,
"step": 1660
},
{
"epoch": 0.2664,
"grad_norm": 0.1318359375,
"learning_rate": 0.0001,
"loss": 0.224,
"step": 1665
},
{
"epoch": 0.2672,
"grad_norm": 0.1328125,
"learning_rate": 0.0001,
"loss": 0.2053,
"step": 1670
},
{
"epoch": 0.268,
"grad_norm": 0.09228515625,
"learning_rate": 0.0001,
"loss": 0.2087,
"step": 1675
},
{
"epoch": 0.2688,
"grad_norm": 0.10107421875,
"learning_rate": 0.0001,
"loss": 0.2029,
"step": 1680
},
{
"epoch": 0.2696,
"grad_norm": 0.09716796875,
"learning_rate": 0.0001,
"loss": 0.2013,
"step": 1685
},
{
"epoch": 0.2704,
"grad_norm": 0.10107421875,
"learning_rate": 0.0001,
"loss": 0.2099,
"step": 1690
},
{
"epoch": 0.2712,
"grad_norm": 0.0888671875,
"learning_rate": 0.0001,
"loss": 0.2187,
"step": 1695
},
{
"epoch": 0.272,
"grad_norm": 0.05078125,
"learning_rate": 0.0001,
"loss": 0.128,
"step": 1700
},
{
"epoch": 0.2728,
"grad_norm": 0.125,
"learning_rate": 0.0001,
"loss": 0.1487,
"step": 1705
},
{
"epoch": 0.2736,
"grad_norm": 0.1083984375,
"learning_rate": 0.0001,
"loss": 0.1737,
"step": 1710
},
{
"epoch": 0.2744,
"grad_norm": 0.19921875,
"learning_rate": 0.0001,
"loss": 0.169,
"step": 1715
},
{
"epoch": 0.2752,
"grad_norm": 0.134765625,
"learning_rate": 0.0001,
"loss": 0.1563,
"step": 1720
},
{
"epoch": 0.276,
"grad_norm": 0.1455078125,
"learning_rate": 0.0001,
"loss": 0.2095,
"step": 1725
},
{
"epoch": 0.2768,
"grad_norm": 0.10888671875,
"learning_rate": 0.0001,
"loss": 0.1693,
"step": 1730
},
{
"epoch": 0.2776,
"grad_norm": 0.0947265625,
"learning_rate": 0.0001,
"loss": 0.2359,
"step": 1735
},
{
"epoch": 0.2784,
"grad_norm": 0.1298828125,
"learning_rate": 0.0001,
"loss": 0.2217,
"step": 1740
},
{
"epoch": 0.2792,
"grad_norm": 0.1455078125,
"learning_rate": 0.0001,
"loss": 0.218,
"step": 1745
},
{
"epoch": 0.28,
"grad_norm": 0.09912109375,
"learning_rate": 0.0001,
"loss": 0.1281,
"step": 1750
},
{
"epoch": 0.2808,
"grad_norm": 0.130859375,
"learning_rate": 0.0001,
"loss": 0.1161,
"step": 1755
},
{
"epoch": 0.2816,
"grad_norm": 0.0986328125,
"learning_rate": 0.0001,
"loss": 0.1952,
"step": 1760
},
{
"epoch": 0.2824,
"grad_norm": 0.14453125,
"learning_rate": 0.0001,
"loss": 0.1804,
"step": 1765
},
{
"epoch": 0.2832,
"grad_norm": 0.154296875,
"learning_rate": 0.0001,
"loss": 0.1948,
"step": 1770
},
{
"epoch": 0.284,
"grad_norm": 0.279296875,
"learning_rate": 0.0001,
"loss": 0.2308,
"step": 1775
},
{
"epoch": 0.2848,
"grad_norm": 0.1298828125,
"learning_rate": 0.0001,
"loss": 0.1832,
"step": 1780
},
{
"epoch": 0.2856,
"grad_norm": 0.154296875,
"learning_rate": 0.0001,
"loss": 0.216,
"step": 1785
},
{
"epoch": 0.2864,
"grad_norm": 0.13671875,
"learning_rate": 0.0001,
"loss": 0.211,
"step": 1790
},
{
"epoch": 0.2872,
"grad_norm": 0.15234375,
"learning_rate": 0.0001,
"loss": 0.2424,
"step": 1795
},
{
"epoch": 0.288,
"grad_norm": 0.1552734375,
"learning_rate": 0.0001,
"loss": 0.1359,
"step": 1800
},
{
"epoch": 0.2888,
"grad_norm": 0.1162109375,
"learning_rate": 0.0001,
"loss": 0.159,
"step": 1805
},
{
"epoch": 0.2896,
"grad_norm": 0.1416015625,
"learning_rate": 0.0001,
"loss": 0.1834,
"step": 1810
},
{
"epoch": 0.2904,
"grad_norm": 0.123046875,
"learning_rate": 0.0001,
"loss": 0.2176,
"step": 1815
},
{
"epoch": 0.2912,
"grad_norm": 0.1337890625,
"learning_rate": 0.0001,
"loss": 0.2197,
"step": 1820
},
{
"epoch": 0.292,
"grad_norm": 0.1513671875,
"learning_rate": 0.0001,
"loss": 0.2282,
"step": 1825
},
{
"epoch": 0.2928,
"grad_norm": 0.099609375,
"learning_rate": 0.0001,
"loss": 0.2212,
"step": 1830
},
{
"epoch": 0.2936,
"grad_norm": 0.1455078125,
"learning_rate": 0.0001,
"loss": 0.1991,
"step": 1835
},
{
"epoch": 0.2944,
"grad_norm": 0.1103515625,
"learning_rate": 0.0001,
"loss": 0.1688,
"step": 1840
},
{
"epoch": 0.2952,
"grad_norm": 0.1240234375,
"learning_rate": 0.0001,
"loss": 0.1543,
"step": 1845
},
{
"epoch": 0.296,
"grad_norm": 0.076171875,
"learning_rate": 0.0001,
"loss": 0.162,
"step": 1850
},
{
"epoch": 0.2968,
"grad_norm": 0.1337890625,
"learning_rate": 0.0001,
"loss": 0.1246,
"step": 1855
},
{
"epoch": 0.2976,
"grad_norm": 0.07421875,
"learning_rate": 0.0001,
"loss": 0.1698,
"step": 1860
},
{
"epoch": 0.2984,
"grad_norm": 0.125,
"learning_rate": 0.0001,
"loss": 0.2069,
"step": 1865
},
{
"epoch": 0.2992,
"grad_norm": 0.1181640625,
"learning_rate": 0.0001,
"loss": 0.1932,
"step": 1870
},
{
"epoch": 0.3,
"grad_norm": 0.1591796875,
"learning_rate": 0.0001,
"loss": 0.167,
"step": 1875
},
{
"epoch": 0.3,
"step": 1875,
"total_flos": 7.012531902575002e+17,
"train_loss": 0.21302104190190632,
"train_runtime": 46178.7318,
"train_samples_per_second": 0.65,
"train_steps_per_second": 0.041
}
],
"logging_steps": 5,
"max_steps": 1875,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 90,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.012531902575002e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}