lsb's picture
add tokenizer
070b684
raw
history blame
12 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.09312721177127957,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 1e-08,
"loss": 24.0683,
"step": 5
},
{
"epoch": 0.0,
"learning_rate": 3.5e-08,
"loss": 17.9067,
"step": 10
},
{
"epoch": 0.0,
"learning_rate": 6.000000000000001e-08,
"loss": 16.9004,
"step": 15
},
{
"epoch": 0.0,
"learning_rate": 8.500000000000001e-08,
"loss": 16.8411,
"step": 20
},
{
"epoch": 0.0,
"learning_rate": 1.1e-07,
"loss": 15.0697,
"step": 25
},
{
"epoch": 0.01,
"learning_rate": 1.35e-07,
"loss": 15.2061,
"step": 30
},
{
"epoch": 0.01,
"learning_rate": 1.6e-07,
"loss": 14.8504,
"step": 35
},
{
"epoch": 0.01,
"learning_rate": 1.85e-07,
"loss": 14.6816,
"step": 40
},
{
"epoch": 0.01,
"learning_rate": 2.1000000000000003e-07,
"loss": 14.9665,
"step": 45
},
{
"epoch": 0.01,
"learning_rate": 2.3500000000000003e-07,
"loss": 14.3427,
"step": 50
},
{
"epoch": 0.01,
"learning_rate": 2.6e-07,
"loss": 18.4574,
"step": 55
},
{
"epoch": 0.01,
"learning_rate": 2.8e-07,
"loss": 18.379,
"step": 60
},
{
"epoch": 0.01,
"learning_rate": 3.0500000000000004e-07,
"loss": 17.7823,
"step": 65
},
{
"epoch": 0.01,
"learning_rate": 3.3e-07,
"loss": 15.2998,
"step": 70
},
{
"epoch": 0.01,
"learning_rate": 3.55e-07,
"loss": 15.2838,
"step": 75
},
{
"epoch": 0.01,
"learning_rate": 3.8e-07,
"loss": 14.8496,
"step": 80
},
{
"epoch": 0.02,
"learning_rate": 4.0500000000000004e-07,
"loss": 14.3004,
"step": 85
},
{
"epoch": 0.02,
"learning_rate": 4.3e-07,
"loss": 14.2005,
"step": 90
},
{
"epoch": 0.02,
"learning_rate": 4.5500000000000004e-07,
"loss": 14.5054,
"step": 95
},
{
"epoch": 0.02,
"learning_rate": 4.800000000000001e-07,
"loss": 14.3888,
"step": 100
},
{
"epoch": 0.02,
"learning_rate": 5.05e-07,
"loss": 17.3825,
"step": 105
},
{
"epoch": 0.02,
"learning_rate": 5.3e-07,
"loss": 17.0812,
"step": 110
},
{
"epoch": 0.02,
"learning_rate": 5.550000000000001e-07,
"loss": 17.0701,
"step": 115
},
{
"epoch": 0.02,
"learning_rate": 5.800000000000001e-07,
"loss": 14.9025,
"step": 120
},
{
"epoch": 0.02,
"learning_rate": 6.05e-07,
"loss": 14.4225,
"step": 125
},
{
"epoch": 0.02,
"learning_rate": 6.3e-07,
"loss": 13.9425,
"step": 130
},
{
"epoch": 0.03,
"learning_rate": 6.550000000000001e-07,
"loss": 13.8037,
"step": 135
},
{
"epoch": 0.03,
"learning_rate": 6.800000000000001e-07,
"loss": 13.4372,
"step": 140
},
{
"epoch": 0.03,
"learning_rate": 7.05e-07,
"loss": 13.6807,
"step": 145
},
{
"epoch": 0.03,
"learning_rate": 7.3e-07,
"loss": 13.742,
"step": 150
},
{
"epoch": 0.03,
"learning_rate": 7.550000000000001e-07,
"loss": 15.1545,
"step": 155
},
{
"epoch": 0.03,
"learning_rate": 7.8e-07,
"loss": 16.1792,
"step": 160
},
{
"epoch": 0.03,
"learning_rate": 8.000000000000001e-07,
"loss": 14.6018,
"step": 165
},
{
"epoch": 0.03,
"learning_rate": 8.250000000000001e-07,
"loss": 12.6032,
"step": 170
},
{
"epoch": 0.03,
"learning_rate": 8.500000000000001e-07,
"loss": 13.0217,
"step": 175
},
{
"epoch": 0.03,
"learning_rate": 8.75e-07,
"loss": 12.2336,
"step": 180
},
{
"epoch": 0.03,
"learning_rate": 9.000000000000001e-07,
"loss": 11.8643,
"step": 185
},
{
"epoch": 0.04,
"learning_rate": 9.25e-07,
"loss": 11.8572,
"step": 190
},
{
"epoch": 0.04,
"learning_rate": 9.500000000000001e-07,
"loss": 10.8976,
"step": 195
},
{
"epoch": 0.04,
"learning_rate": 9.750000000000002e-07,
"loss": 12.2617,
"step": 200
},
{
"epoch": 0.04,
"learning_rate": 1.0000000000000002e-06,
"loss": 13.7753,
"step": 205
},
{
"epoch": 0.04,
"learning_rate": 1.025e-06,
"loss": 11.6137,
"step": 210
},
{
"epoch": 0.04,
"learning_rate": 1.0500000000000001e-06,
"loss": 10.6402,
"step": 215
},
{
"epoch": 0.04,
"learning_rate": 1.075e-06,
"loss": 12.9591,
"step": 220
},
{
"epoch": 0.04,
"learning_rate": 1.1e-06,
"loss": 9.8613,
"step": 225
},
{
"epoch": 0.04,
"learning_rate": 1.125e-06,
"loss": 10.487,
"step": 230
},
{
"epoch": 0.04,
"learning_rate": 1.1500000000000002e-06,
"loss": 9.3773,
"step": 235
},
{
"epoch": 0.04,
"learning_rate": 1.175e-06,
"loss": 9.5665,
"step": 240
},
{
"epoch": 0.05,
"learning_rate": 1.2000000000000002e-06,
"loss": 8.2941,
"step": 245
},
{
"epoch": 0.05,
"learning_rate": 1.2250000000000001e-06,
"loss": 8.5563,
"step": 250
},
{
"epoch": 0.05,
"learning_rate": 1.25e-06,
"loss": 10.4941,
"step": 255
},
{
"epoch": 0.05,
"learning_rate": 1.275e-06,
"loss": 8.4986,
"step": 260
},
{
"epoch": 0.05,
"learning_rate": 1.3e-06,
"loss": 10.2295,
"step": 265
},
{
"epoch": 0.05,
"learning_rate": 1.3250000000000002e-06,
"loss": 8.7026,
"step": 270
},
{
"epoch": 0.05,
"learning_rate": 1.3500000000000002e-06,
"loss": 8.6009,
"step": 275
},
{
"epoch": 0.05,
"learning_rate": 1.3750000000000002e-06,
"loss": 7.7613,
"step": 280
},
{
"epoch": 0.05,
"learning_rate": 1.4000000000000001e-06,
"loss": 7.8609,
"step": 285
},
{
"epoch": 0.05,
"learning_rate": 1.425e-06,
"loss": 7.0097,
"step": 290
},
{
"epoch": 0.05,
"learning_rate": 1.45e-06,
"loss": 5.5692,
"step": 295
},
{
"epoch": 0.06,
"learning_rate": 1.475e-06,
"loss": 5.6402,
"step": 300
},
{
"epoch": 0.06,
"learning_rate": 1.5e-06,
"loss": 9.0815,
"step": 305
},
{
"epoch": 0.06,
"learning_rate": 1.525e-06,
"loss": 8.0803,
"step": 310
},
{
"epoch": 0.06,
"learning_rate": 1.5500000000000002e-06,
"loss": 7.6229,
"step": 315
},
{
"epoch": 0.06,
"learning_rate": 1.5750000000000002e-06,
"loss": 5.5354,
"step": 320
},
{
"epoch": 0.06,
"learning_rate": 1.6000000000000001e-06,
"loss": 8.1564,
"step": 325
},
{
"epoch": 0.06,
"learning_rate": 1.6250000000000001e-06,
"loss": 7.0378,
"step": 330
},
{
"epoch": 0.06,
"learning_rate": 1.6500000000000003e-06,
"loss": 5.3662,
"step": 335
},
{
"epoch": 0.06,
"learning_rate": 1.6750000000000003e-06,
"loss": 7.6436,
"step": 340
},
{
"epoch": 0.06,
"learning_rate": 1.7000000000000002e-06,
"loss": 5.3403,
"step": 345
},
{
"epoch": 0.07,
"learning_rate": 1.725e-06,
"loss": 8.1018,
"step": 350
},
{
"epoch": 0.07,
"learning_rate": 1.745e-06,
"loss": 7.5364,
"step": 355
},
{
"epoch": 0.07,
"learning_rate": 1.77e-06,
"loss": 5.2764,
"step": 360
},
{
"epoch": 0.07,
"learning_rate": 1.7950000000000002e-06,
"loss": 6.4889,
"step": 365
},
{
"epoch": 0.07,
"learning_rate": 1.8200000000000002e-06,
"loss": 5.1625,
"step": 370
},
{
"epoch": 0.07,
"learning_rate": 1.8450000000000001e-06,
"loss": 7.4417,
"step": 375
},
{
"epoch": 0.07,
"learning_rate": 1.87e-06,
"loss": 4.8924,
"step": 380
},
{
"epoch": 0.07,
"learning_rate": 1.895e-06,
"loss": 9.9698,
"step": 385
},
{
"epoch": 0.07,
"learning_rate": 1.9200000000000003e-06,
"loss": 7.4896,
"step": 390
},
{
"epoch": 0.07,
"learning_rate": 1.945e-06,
"loss": 6.7582,
"step": 395
},
{
"epoch": 0.07,
"learning_rate": 1.97e-06,
"loss": 6.0409,
"step": 400
},
{
"epoch": 0.08,
"learning_rate": 1.9950000000000004e-06,
"loss": 7.4142,
"step": 405
},
{
"epoch": 0.08,
"learning_rate": 2.02e-06,
"loss": 7.9007,
"step": 410
},
{
"epoch": 0.08,
"learning_rate": 2.045e-06,
"loss": 5.15,
"step": 415
},
{
"epoch": 0.08,
"learning_rate": 2.07e-06,
"loss": 5.6287,
"step": 420
},
{
"epoch": 0.08,
"learning_rate": 2.0950000000000003e-06,
"loss": 4.7277,
"step": 425
},
{
"epoch": 0.08,
"learning_rate": 2.12e-06,
"loss": 4.9955,
"step": 430
},
{
"epoch": 0.08,
"learning_rate": 2.1450000000000002e-06,
"loss": 6.4111,
"step": 435
},
{
"epoch": 0.08,
"learning_rate": 2.17e-06,
"loss": 5.7243,
"step": 440
},
{
"epoch": 0.08,
"learning_rate": 2.195e-06,
"loss": 5.3146,
"step": 445
},
{
"epoch": 0.08,
"learning_rate": 2.2200000000000003e-06,
"loss": 4.8272,
"step": 450
},
{
"epoch": 0.08,
"learning_rate": 2.245e-06,
"loss": 6.3851,
"step": 455
},
{
"epoch": 0.09,
"learning_rate": 2.2700000000000003e-06,
"loss": 5.0858,
"step": 460
},
{
"epoch": 0.09,
"learning_rate": 2.2950000000000005e-06,
"loss": 13.9773,
"step": 465
},
{
"epoch": 0.09,
"learning_rate": 2.3200000000000002e-06,
"loss": 5.1323,
"step": 470
},
{
"epoch": 0.09,
"learning_rate": 2.345e-06,
"loss": 4.5519,
"step": 475
},
{
"epoch": 0.09,
"learning_rate": 2.37e-06,
"loss": 4.9828,
"step": 480
},
{
"epoch": 0.09,
"learning_rate": 2.395e-06,
"loss": 10.9601,
"step": 485
},
{
"epoch": 0.09,
"learning_rate": 2.42e-06,
"loss": 4.715,
"step": 490
},
{
"epoch": 0.09,
"learning_rate": 2.4450000000000003e-06,
"loss": 6.1112,
"step": 495
},
{
"epoch": 0.09,
"learning_rate": 2.47e-06,
"loss": 4.8677,
"step": 500
},
{
"epoch": 0.09,
"eval_loss": 6.384158611297607,
"eval_runtime": 97.6612,
"eval_samples_per_second": 12.226,
"eval_steps_per_second": 1.536,
"eval_wer": 1.0,
"step": 500
}
],
"max_steps": 161070,
"num_train_epochs": 30,
"total_flos": 4.640032532404224e+16,
"trial_name": null,
"trial_params": null
}