aimonbc24's picture
Upload folder using huggingface_hub
456c1c8 verified
raw
history blame
25.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 420,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 0.0,
"loss": 5.3442,
"step": 2
},
{
"epoch": 0.01,
"learning_rate": 0.0,
"loss": 5.2525,
"step": 4
},
{
"epoch": 0.01,
"learning_rate": 2e-08,
"loss": 5.0768,
"step": 6
},
{
"epoch": 0.02,
"learning_rate": 6e-08,
"loss": 5.1381,
"step": 8
},
{
"epoch": 0.02,
"learning_rate": 8e-08,
"loss": 5.3956,
"step": 10
},
{
"epoch": 0.03,
"learning_rate": 1.2e-07,
"loss": 5.2933,
"step": 12
},
{
"epoch": 0.03,
"learning_rate": 1.6e-07,
"loss": 5.2991,
"step": 14
},
{
"epoch": 0.04,
"learning_rate": 2e-07,
"loss": 5.5463,
"step": 16
},
{
"epoch": 0.04,
"learning_rate": 2.4e-07,
"loss": 5.3438,
"step": 18
},
{
"epoch": 0.05,
"learning_rate": 2.6e-07,
"loss": 5.0126,
"step": 20
},
{
"epoch": 0.05,
"learning_rate": 3e-07,
"loss": 4.8808,
"step": 22
},
{
"epoch": 0.06,
"learning_rate": 3.4000000000000003e-07,
"loss": 4.5046,
"step": 24
},
{
"epoch": 0.06,
"learning_rate": 3.7999999999999996e-07,
"loss": 4.3779,
"step": 26
},
{
"epoch": 0.07,
"learning_rate": 4.1999999999999995e-07,
"loss": 4.2311,
"step": 28
},
{
"epoch": 0.07,
"learning_rate": 4.6e-07,
"loss": 4.0877,
"step": 30
},
{
"epoch": 0.08,
"learning_rate": 5e-07,
"loss": 4.0972,
"step": 32
},
{
"epoch": 0.08,
"learning_rate": 5.4e-07,
"loss": 4.1511,
"step": 34
},
{
"epoch": 0.09,
"learning_rate": 5.8e-07,
"loss": 3.9641,
"step": 36
},
{
"epoch": 0.09,
"learning_rate": 6.2e-07,
"loss": 3.8411,
"step": 38
},
{
"epoch": 0.1,
"learning_rate": 6.6e-07,
"loss": 3.5198,
"step": 40
},
{
"epoch": 0.1,
"learning_rate": 7e-07,
"loss": 3.5286,
"step": 42
},
{
"epoch": 0.1,
"learning_rate": 7.4e-07,
"loss": 3.8059,
"step": 44
},
{
"epoch": 0.11,
"learning_rate": 7.799999999999999e-07,
"loss": 3.374,
"step": 46
},
{
"epoch": 0.11,
"learning_rate": 8.199999999999999e-07,
"loss": 3.3654,
"step": 48
},
{
"epoch": 0.12,
"learning_rate": 8.599999999999999e-07,
"loss": 3.3283,
"step": 50
},
{
"epoch": 0.12,
"learning_rate": 9e-07,
"loss": 3.3164,
"step": 52
},
{
"epoch": 0.13,
"learning_rate": 9.399999999999999e-07,
"loss": 3.13,
"step": 54
},
{
"epoch": 0.13,
"learning_rate": 9.8e-07,
"loss": 2.9915,
"step": 56
},
{
"epoch": 0.14,
"learning_rate": 9.987341772151898e-07,
"loss": 2.7748,
"step": 58
},
{
"epoch": 0.14,
"learning_rate": 9.962025316455696e-07,
"loss": 2.8378,
"step": 60
},
{
"epoch": 0.15,
"learning_rate": 9.936708860759494e-07,
"loss": 2.7933,
"step": 62
},
{
"epoch": 0.15,
"learning_rate": 9.91139240506329e-07,
"loss": 2.7108,
"step": 64
},
{
"epoch": 0.16,
"learning_rate": 9.88607594936709e-07,
"loss": 2.4029,
"step": 66
},
{
"epoch": 0.16,
"learning_rate": 9.860759493670887e-07,
"loss": 2.4199,
"step": 68
},
{
"epoch": 0.17,
"learning_rate": 9.835443037974683e-07,
"loss": 2.3015,
"step": 70
},
{
"epoch": 0.17,
"learning_rate": 9.81012658227848e-07,
"loss": 2.4849,
"step": 72
},
{
"epoch": 0.18,
"learning_rate": 9.784810126582278e-07,
"loss": 2.265,
"step": 74
},
{
"epoch": 0.18,
"learning_rate": 9.759493670886076e-07,
"loss": 2.2422,
"step": 76
},
{
"epoch": 0.19,
"learning_rate": 9.734177215189873e-07,
"loss": 2.188,
"step": 78
},
{
"epoch": 0.19,
"learning_rate": 9.70886075949367e-07,
"loss": 2.1488,
"step": 80
},
{
"epoch": 0.2,
"learning_rate": 9.683544303797469e-07,
"loss": 2.0795,
"step": 82
},
{
"epoch": 0.2,
"learning_rate": 9.658227848101267e-07,
"loss": 1.9767,
"step": 84
},
{
"epoch": 0.2,
"learning_rate": 9.632911392405062e-07,
"loss": 1.9811,
"step": 86
},
{
"epoch": 0.21,
"learning_rate": 9.60759493670886e-07,
"loss": 2.0459,
"step": 88
},
{
"epoch": 0.21,
"learning_rate": 9.582278481012658e-07,
"loss": 1.936,
"step": 90
},
{
"epoch": 0.22,
"learning_rate": 9.556962025316455e-07,
"loss": 1.9373,
"step": 92
},
{
"epoch": 0.22,
"learning_rate": 9.531645569620253e-07,
"loss": 2.0272,
"step": 94
},
{
"epoch": 0.23,
"learning_rate": 9.50632911392405e-07,
"loss": 1.8032,
"step": 96
},
{
"epoch": 0.23,
"learning_rate": 9.481012658227847e-07,
"loss": 1.8799,
"step": 98
},
{
"epoch": 0.24,
"learning_rate": 9.455696202531646e-07,
"loss": 1.9145,
"step": 100
},
{
"epoch": 0.24,
"learning_rate": 9.430379746835443e-07,
"loss": 1.8476,
"step": 102
},
{
"epoch": 0.25,
"learning_rate": 9.40506329113924e-07,
"loss": 1.7027,
"step": 104
},
{
"epoch": 0.25,
"learning_rate": 9.379746835443037e-07,
"loss": 1.6791,
"step": 106
},
{
"epoch": 0.26,
"learning_rate": 9.354430379746835e-07,
"loss": 1.6089,
"step": 108
},
{
"epoch": 0.26,
"learning_rate": 9.329113924050633e-07,
"loss": 1.6315,
"step": 110
},
{
"epoch": 0.27,
"learning_rate": 9.303797468354429e-07,
"loss": 1.6194,
"step": 112
},
{
"epoch": 0.27,
"learning_rate": 9.278481012658227e-07,
"loss": 1.5367,
"step": 114
},
{
"epoch": 0.28,
"learning_rate": 9.253164556962026e-07,
"loss": 1.4617,
"step": 116
},
{
"epoch": 0.28,
"learning_rate": 9.227848101265822e-07,
"loss": 1.6019,
"step": 118
},
{
"epoch": 0.29,
"learning_rate": 9.20253164556962e-07,
"loss": 1.4418,
"step": 120
},
{
"epoch": 0.29,
"learning_rate": 9.177215189873418e-07,
"loss": 1.4306,
"step": 122
},
{
"epoch": 0.3,
"learning_rate": 9.151898734177214e-07,
"loss": 1.3711,
"step": 124
},
{
"epoch": 0.3,
"learning_rate": 9.126582278481012e-07,
"loss": 1.4102,
"step": 126
},
{
"epoch": 0.3,
"learning_rate": 9.10126582278481e-07,
"loss": 1.348,
"step": 128
},
{
"epoch": 0.31,
"learning_rate": 9.075949367088606e-07,
"loss": 1.374,
"step": 130
},
{
"epoch": 0.31,
"learning_rate": 9.050632911392405e-07,
"loss": 1.2833,
"step": 132
},
{
"epoch": 0.32,
"learning_rate": 9.025316455696203e-07,
"loss": 1.2588,
"step": 134
},
{
"epoch": 0.32,
"learning_rate": 9e-07,
"loss": 1.2172,
"step": 136
},
{
"epoch": 0.33,
"learning_rate": 8.974683544303797e-07,
"loss": 1.3046,
"step": 138
},
{
"epoch": 0.33,
"learning_rate": 8.949367088607594e-07,
"loss": 1.0875,
"step": 140
},
{
"epoch": 0.34,
"learning_rate": 8.924050632911392e-07,
"loss": 1.2346,
"step": 142
},
{
"epoch": 0.34,
"learning_rate": 8.898734177215189e-07,
"loss": 1.0975,
"step": 144
},
{
"epoch": 0.35,
"learning_rate": 8.873417721518986e-07,
"loss": 1.2068,
"step": 146
},
{
"epoch": 0.35,
"learning_rate": 8.848101265822785e-07,
"loss": 1.1378,
"step": 148
},
{
"epoch": 0.36,
"learning_rate": 8.822784810126583e-07,
"loss": 1.0359,
"step": 150
},
{
"epoch": 0.36,
"learning_rate": 8.797468354430379e-07,
"loss": 1.2252,
"step": 152
},
{
"epoch": 0.37,
"learning_rate": 8.772151898734177e-07,
"loss": 1.062,
"step": 154
},
{
"epoch": 0.37,
"learning_rate": 8.746835443037975e-07,
"loss": 0.9852,
"step": 156
},
{
"epoch": 0.38,
"learning_rate": 8.721518987341771e-07,
"loss": 1.0557,
"step": 158
},
{
"epoch": 0.38,
"learning_rate": 8.696202531645569e-07,
"loss": 0.9546,
"step": 160
},
{
"epoch": 0.39,
"learning_rate": 8.670886075949367e-07,
"loss": 0.9371,
"step": 162
},
{
"epoch": 0.39,
"learning_rate": 8.645569620253164e-07,
"loss": 0.9128,
"step": 164
},
{
"epoch": 0.4,
"learning_rate": 8.620253164556962e-07,
"loss": 0.9508,
"step": 166
},
{
"epoch": 0.4,
"learning_rate": 8.59493670886076e-07,
"loss": 1.0656,
"step": 168
},
{
"epoch": 0.4,
"learning_rate": 8.569620253164556e-07,
"loss": 0.8574,
"step": 170
},
{
"epoch": 0.41,
"learning_rate": 8.544303797468354e-07,
"loss": 0.7957,
"step": 172
},
{
"epoch": 0.41,
"learning_rate": 8.518987341772152e-07,
"loss": 0.8379,
"step": 174
},
{
"epoch": 0.42,
"learning_rate": 8.493670886075949e-07,
"loss": 0.7316,
"step": 176
},
{
"epoch": 0.42,
"learning_rate": 8.468354430379746e-07,
"loss": 0.8871,
"step": 178
},
{
"epoch": 0.43,
"learning_rate": 8.443037974683543e-07,
"loss": 0.7857,
"step": 180
},
{
"epoch": 0.43,
"learning_rate": 8.417721518987342e-07,
"loss": 0.6546,
"step": 182
},
{
"epoch": 0.44,
"learning_rate": 8.392405063291139e-07,
"loss": 0.7563,
"step": 184
},
{
"epoch": 0.44,
"learning_rate": 8.367088607594936e-07,
"loss": 0.6105,
"step": 186
},
{
"epoch": 0.45,
"learning_rate": 8.341772151898734e-07,
"loss": 0.6173,
"step": 188
},
{
"epoch": 0.45,
"learning_rate": 8.316455696202531e-07,
"loss": 0.536,
"step": 190
},
{
"epoch": 0.46,
"learning_rate": 8.291139240506328e-07,
"loss": 0.4445,
"step": 192
},
{
"epoch": 0.46,
"learning_rate": 8.265822784810126e-07,
"loss": 0.377,
"step": 194
},
{
"epoch": 0.47,
"learning_rate": 8.240506329113924e-07,
"loss": 0.4066,
"step": 196
},
{
"epoch": 0.47,
"learning_rate": 8.215189873417721e-07,
"loss": 0.3306,
"step": 198
},
{
"epoch": 0.48,
"learning_rate": 8.189873417721519e-07,
"loss": 0.3437,
"step": 200
},
{
"epoch": 0.48,
"learning_rate": 8.164556962025317e-07,
"loss": 0.3812,
"step": 202
},
{
"epoch": 0.49,
"learning_rate": 8.139240506329113e-07,
"loss": 0.3661,
"step": 204
},
{
"epoch": 0.49,
"learning_rate": 8.113924050632911e-07,
"loss": 0.3579,
"step": 206
},
{
"epoch": 0.5,
"learning_rate": 8.088607594936709e-07,
"loss": 0.3228,
"step": 208
},
{
"epoch": 0.5,
"learning_rate": 8.063291139240505e-07,
"loss": 0.3723,
"step": 210
},
{
"epoch": 0.5,
"learning_rate": 8.037974683544303e-07,
"loss": 0.3276,
"step": 212
},
{
"epoch": 0.51,
"learning_rate": 8.012658227848102e-07,
"loss": 0.3386,
"step": 214
},
{
"epoch": 0.51,
"learning_rate": 7.987341772151898e-07,
"loss": 0.3415,
"step": 216
},
{
"epoch": 0.52,
"learning_rate": 7.962025316455696e-07,
"loss": 0.3055,
"step": 218
},
{
"epoch": 0.52,
"learning_rate": 7.936708860759494e-07,
"loss": 0.2819,
"step": 220
},
{
"epoch": 0.53,
"learning_rate": 7.911392405063291e-07,
"loss": 0.3376,
"step": 222
},
{
"epoch": 0.53,
"learning_rate": 7.886075949367088e-07,
"loss": 0.306,
"step": 224
},
{
"epoch": 0.54,
"learning_rate": 7.860759493670885e-07,
"loss": 0.2942,
"step": 226
},
{
"epoch": 0.54,
"learning_rate": 7.835443037974683e-07,
"loss": 0.3164,
"step": 228
},
{
"epoch": 0.55,
"learning_rate": 7.810126582278481e-07,
"loss": 0.2918,
"step": 230
},
{
"epoch": 0.55,
"learning_rate": 7.784810126582278e-07,
"loss": 0.3092,
"step": 232
},
{
"epoch": 0.56,
"learning_rate": 7.759493670886076e-07,
"loss": 0.4132,
"step": 234
},
{
"epoch": 0.56,
"learning_rate": 7.734177215189873e-07,
"loss": 0.3428,
"step": 236
},
{
"epoch": 0.57,
"learning_rate": 7.70886075949367e-07,
"loss": 0.2002,
"step": 238
},
{
"epoch": 0.57,
"learning_rate": 7.683544303797468e-07,
"loss": 0.333,
"step": 240
},
{
"epoch": 0.58,
"learning_rate": 7.658227848101266e-07,
"loss": 0.3346,
"step": 242
},
{
"epoch": 0.58,
"learning_rate": 7.632911392405062e-07,
"loss": 0.282,
"step": 244
},
{
"epoch": 0.59,
"learning_rate": 7.60759493670886e-07,
"loss": 0.1892,
"step": 246
},
{
"epoch": 0.59,
"learning_rate": 7.582278481012659e-07,
"loss": 0.2727,
"step": 248
},
{
"epoch": 0.6,
"learning_rate": 7.556962025316455e-07,
"loss": 0.3528,
"step": 250
},
{
"epoch": 0.6,
"learning_rate": 7.531645569620253e-07,
"loss": 0.3449,
"step": 252
},
{
"epoch": 0.6,
"learning_rate": 7.506329113924051e-07,
"loss": 0.2044,
"step": 254
},
{
"epoch": 0.61,
"learning_rate": 7.481012658227847e-07,
"loss": 0.2418,
"step": 256
},
{
"epoch": 0.61,
"learning_rate": 7.455696202531645e-07,
"loss": 0.2712,
"step": 258
},
{
"epoch": 0.62,
"learning_rate": 7.430379746835443e-07,
"loss": 0.2555,
"step": 260
},
{
"epoch": 0.62,
"learning_rate": 7.40506329113924e-07,
"loss": 0.231,
"step": 262
},
{
"epoch": 0.63,
"learning_rate": 7.379746835443038e-07,
"loss": 0.2705,
"step": 264
},
{
"epoch": 0.63,
"learning_rate": 7.354430379746836e-07,
"loss": 0.4189,
"step": 266
},
{
"epoch": 0.64,
"learning_rate": 7.329113924050633e-07,
"loss": 0.2864,
"step": 268
},
{
"epoch": 0.64,
"learning_rate": 7.30379746835443e-07,
"loss": 0.278,
"step": 270
},
{
"epoch": 0.65,
"learning_rate": 7.278481012658227e-07,
"loss": 0.308,
"step": 272
},
{
"epoch": 0.65,
"learning_rate": 7.253164556962025e-07,
"loss": 0.3659,
"step": 274
},
{
"epoch": 0.66,
"learning_rate": 7.227848101265822e-07,
"loss": 0.2763,
"step": 276
},
{
"epoch": 0.66,
"learning_rate": 7.202531645569619e-07,
"loss": 0.3028,
"step": 278
},
{
"epoch": 0.67,
"learning_rate": 7.177215189873418e-07,
"loss": 0.2959,
"step": 280
},
{
"epoch": 0.67,
"learning_rate": 7.151898734177216e-07,
"loss": 0.2135,
"step": 282
},
{
"epoch": 0.68,
"learning_rate": 7.126582278481012e-07,
"loss": 0.2805,
"step": 284
},
{
"epoch": 0.68,
"learning_rate": 7.10126582278481e-07,
"loss": 0.2925,
"step": 286
},
{
"epoch": 0.69,
"learning_rate": 7.075949367088608e-07,
"loss": 0.2914,
"step": 288
},
{
"epoch": 0.69,
"learning_rate": 7.050632911392404e-07,
"loss": 0.2366,
"step": 290
},
{
"epoch": 0.7,
"learning_rate": 7.025316455696202e-07,
"loss": 0.2246,
"step": 292
},
{
"epoch": 0.7,
"learning_rate": 7e-07,
"loss": 0.3242,
"step": 294
},
{
"epoch": 0.7,
"learning_rate": 6.974683544303797e-07,
"loss": 0.3683,
"step": 296
},
{
"epoch": 0.71,
"learning_rate": 6.949367088607595e-07,
"loss": 0.2697,
"step": 298
},
{
"epoch": 0.71,
"learning_rate": 6.924050632911393e-07,
"loss": 0.2314,
"step": 300
},
{
"epoch": 0.72,
"learning_rate": 6.898734177215189e-07,
"loss": 0.268,
"step": 302
},
{
"epoch": 0.72,
"learning_rate": 6.873417721518987e-07,
"loss": 0.3017,
"step": 304
},
{
"epoch": 0.73,
"learning_rate": 6.848101265822784e-07,
"loss": 0.2147,
"step": 306
},
{
"epoch": 0.73,
"learning_rate": 6.822784810126582e-07,
"loss": 0.2574,
"step": 308
},
{
"epoch": 0.74,
"learning_rate": 6.797468354430379e-07,
"loss": 0.2828,
"step": 310
},
{
"epoch": 0.74,
"learning_rate": 6.772151898734176e-07,
"loss": 0.2967,
"step": 312
},
{
"epoch": 0.75,
"learning_rate": 6.746835443037975e-07,
"loss": 0.2444,
"step": 314
},
{
"epoch": 0.75,
"learning_rate": 6.721518987341772e-07,
"loss": 0.2925,
"step": 316
},
{
"epoch": 0.76,
"learning_rate": 6.696202531645569e-07,
"loss": 0.243,
"step": 318
},
{
"epoch": 0.76,
"learning_rate": 6.670886075949367e-07,
"loss": 0.2559,
"step": 320
},
{
"epoch": 0.77,
"learning_rate": 6.645569620253164e-07,
"loss": 0.2143,
"step": 322
},
{
"epoch": 0.77,
"learning_rate": 6.620253164556961e-07,
"loss": 0.2637,
"step": 324
},
{
"epoch": 0.78,
"learning_rate": 6.594936708860759e-07,
"loss": 0.2977,
"step": 326
},
{
"epoch": 0.78,
"learning_rate": 6.569620253164557e-07,
"loss": 0.3284,
"step": 328
},
{
"epoch": 0.79,
"learning_rate": 6.544303797468354e-07,
"loss": 0.2531,
"step": 330
},
{
"epoch": 0.79,
"learning_rate": 6.518987341772152e-07,
"loss": 0.2793,
"step": 332
},
{
"epoch": 0.8,
"learning_rate": 6.49367088607595e-07,
"loss": 0.3183,
"step": 334
},
{
"epoch": 0.8,
"learning_rate": 6.468354430379746e-07,
"loss": 0.3329,
"step": 336
},
{
"epoch": 0.8,
"learning_rate": 6.443037974683544e-07,
"loss": 0.2545,
"step": 338
},
{
"epoch": 0.81,
"learning_rate": 6.417721518987342e-07,
"loss": 0.286,
"step": 340
},
{
"epoch": 0.81,
"learning_rate": 6.392405063291138e-07,
"loss": 0.3286,
"step": 342
},
{
"epoch": 0.82,
"learning_rate": 6.367088607594936e-07,
"loss": 0.2061,
"step": 344
},
{
"epoch": 0.82,
"learning_rate": 6.341772151898735e-07,
"loss": 0.1816,
"step": 346
},
{
"epoch": 0.83,
"learning_rate": 6.316455696202532e-07,
"loss": 0.262,
"step": 348
},
{
"epoch": 0.83,
"learning_rate": 6.291139240506329e-07,
"loss": 0.3591,
"step": 350
},
{
"epoch": 0.84,
"learning_rate": 6.265822784810126e-07,
"loss": 0.2565,
"step": 352
},
{
"epoch": 0.84,
"learning_rate": 6.240506329113924e-07,
"loss": 0.2609,
"step": 354
},
{
"epoch": 0.85,
"learning_rate": 6.215189873417721e-07,
"loss": 0.2728,
"step": 356
},
{
"epoch": 0.85,
"learning_rate": 6.189873417721518e-07,
"loss": 0.3175,
"step": 358
},
{
"epoch": 0.86,
"learning_rate": 6.164556962025316e-07,
"loss": 0.393,
"step": 360
},
{
"epoch": 0.86,
"learning_rate": 6.139240506329114e-07,
"loss": 0.234,
"step": 362
},
{
"epoch": 0.87,
"learning_rate": 6.113924050632911e-07,
"loss": 0.2264,
"step": 364
},
{
"epoch": 0.87,
"learning_rate": 6.088607594936709e-07,
"loss": 0.3062,
"step": 366
},
{
"epoch": 0.88,
"learning_rate": 6.063291139240507e-07,
"loss": 0.184,
"step": 368
},
{
"epoch": 0.88,
"learning_rate": 6.037974683544303e-07,
"loss": 0.2396,
"step": 370
},
{
"epoch": 0.89,
"learning_rate": 6.012658227848101e-07,
"loss": 0.2631,
"step": 372
},
{
"epoch": 0.89,
"learning_rate": 5.987341772151899e-07,
"loss": 0.2776,
"step": 374
},
{
"epoch": 0.9,
"learning_rate": 5.962025316455695e-07,
"loss": 0.2634,
"step": 376
},
{
"epoch": 0.9,
"learning_rate": 5.936708860759494e-07,
"loss": 0.1984,
"step": 378
},
{
"epoch": 0.9,
"learning_rate": 5.911392405063292e-07,
"loss": 0.2973,
"step": 380
},
{
"epoch": 0.91,
"learning_rate": 5.886075949367088e-07,
"loss": 0.3261,
"step": 382
},
{
"epoch": 0.91,
"learning_rate": 5.860759493670886e-07,
"loss": 0.1961,
"step": 384
},
{
"epoch": 0.92,
"learning_rate": 5.835443037974684e-07,
"loss": 0.3002,
"step": 386
},
{
"epoch": 0.92,
"learning_rate": 5.81012658227848e-07,
"loss": 0.2281,
"step": 388
},
{
"epoch": 0.93,
"learning_rate": 5.784810126582278e-07,
"loss": 0.1912,
"step": 390
},
{
"epoch": 0.93,
"learning_rate": 5.759493670886075e-07,
"loss": 0.2726,
"step": 392
},
{
"epoch": 0.94,
"learning_rate": 5.734177215189873e-07,
"loss": 0.2855,
"step": 394
},
{
"epoch": 0.94,
"learning_rate": 5.708860759493671e-07,
"loss": 0.2241,
"step": 396
},
{
"epoch": 0.95,
"learning_rate": 5.683544303797468e-07,
"loss": 0.3273,
"step": 398
},
{
"epoch": 0.95,
"learning_rate": 5.658227848101266e-07,
"loss": 0.209,
"step": 400
},
{
"epoch": 0.96,
"learning_rate": 5.632911392405063e-07,
"loss": 0.2155,
"step": 402
},
{
"epoch": 0.96,
"learning_rate": 5.60759493670886e-07,
"loss": 0.2218,
"step": 404
},
{
"epoch": 0.97,
"learning_rate": 5.582278481012658e-07,
"loss": 0.3097,
"step": 406
},
{
"epoch": 0.97,
"learning_rate": 5.556962025316455e-07,
"loss": 0.1995,
"step": 408
},
{
"epoch": 0.98,
"learning_rate": 5.531645569620252e-07,
"loss": 0.2408,
"step": 410
},
{
"epoch": 0.98,
"learning_rate": 5.506329113924051e-07,
"loss": 0.2443,
"step": 412
},
{
"epoch": 0.99,
"learning_rate": 5.481012658227849e-07,
"loss": 0.2719,
"step": 414
},
{
"epoch": 0.99,
"learning_rate": 5.455696202531645e-07,
"loss": 0.1995,
"step": 416
},
{
"epoch": 1.0,
"learning_rate": 5.430379746835443e-07,
"loss": 0.2849,
"step": 418
},
{
"epoch": 1.0,
"learning_rate": 5.405063291139241e-07,
"loss": 0.272,
"step": 420
}
],
"logging_steps": 2,
"max_steps": 840,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 200,
"total_flos": 1.371080632762368e+19,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}