lsb's picture
add tokenizer
070b684
raw
history blame
23.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.18625442354255914,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 1e-08,
"loss": 24.0683,
"step": 5
},
{
"epoch": 0.0,
"learning_rate": 3.5e-08,
"loss": 17.9067,
"step": 10
},
{
"epoch": 0.0,
"learning_rate": 6.000000000000001e-08,
"loss": 16.9004,
"step": 15
},
{
"epoch": 0.0,
"learning_rate": 8.500000000000001e-08,
"loss": 16.8411,
"step": 20
},
{
"epoch": 0.0,
"learning_rate": 1.1e-07,
"loss": 15.0697,
"step": 25
},
{
"epoch": 0.01,
"learning_rate": 1.35e-07,
"loss": 15.2061,
"step": 30
},
{
"epoch": 0.01,
"learning_rate": 1.6e-07,
"loss": 14.8504,
"step": 35
},
{
"epoch": 0.01,
"learning_rate": 1.85e-07,
"loss": 14.6816,
"step": 40
},
{
"epoch": 0.01,
"learning_rate": 2.1000000000000003e-07,
"loss": 14.9665,
"step": 45
},
{
"epoch": 0.01,
"learning_rate": 2.3500000000000003e-07,
"loss": 14.3427,
"step": 50
},
{
"epoch": 0.01,
"learning_rate": 2.6e-07,
"loss": 18.4574,
"step": 55
},
{
"epoch": 0.01,
"learning_rate": 2.8e-07,
"loss": 18.379,
"step": 60
},
{
"epoch": 0.01,
"learning_rate": 3.0500000000000004e-07,
"loss": 17.7823,
"step": 65
},
{
"epoch": 0.01,
"learning_rate": 3.3e-07,
"loss": 15.2998,
"step": 70
},
{
"epoch": 0.01,
"learning_rate": 3.55e-07,
"loss": 15.2838,
"step": 75
},
{
"epoch": 0.01,
"learning_rate": 3.8e-07,
"loss": 14.8496,
"step": 80
},
{
"epoch": 0.02,
"learning_rate": 4.0500000000000004e-07,
"loss": 14.3004,
"step": 85
},
{
"epoch": 0.02,
"learning_rate": 4.3e-07,
"loss": 14.2005,
"step": 90
},
{
"epoch": 0.02,
"learning_rate": 4.5500000000000004e-07,
"loss": 14.5054,
"step": 95
},
{
"epoch": 0.02,
"learning_rate": 4.800000000000001e-07,
"loss": 14.3888,
"step": 100
},
{
"epoch": 0.02,
"learning_rate": 5.05e-07,
"loss": 17.3825,
"step": 105
},
{
"epoch": 0.02,
"learning_rate": 5.3e-07,
"loss": 17.0812,
"step": 110
},
{
"epoch": 0.02,
"learning_rate": 5.550000000000001e-07,
"loss": 17.0701,
"step": 115
},
{
"epoch": 0.02,
"learning_rate": 5.800000000000001e-07,
"loss": 14.9025,
"step": 120
},
{
"epoch": 0.02,
"learning_rate": 6.05e-07,
"loss": 14.4225,
"step": 125
},
{
"epoch": 0.02,
"learning_rate": 6.3e-07,
"loss": 13.9425,
"step": 130
},
{
"epoch": 0.03,
"learning_rate": 6.550000000000001e-07,
"loss": 13.8037,
"step": 135
},
{
"epoch": 0.03,
"learning_rate": 6.800000000000001e-07,
"loss": 13.4372,
"step": 140
},
{
"epoch": 0.03,
"learning_rate": 7.05e-07,
"loss": 13.6807,
"step": 145
},
{
"epoch": 0.03,
"learning_rate": 7.3e-07,
"loss": 13.742,
"step": 150
},
{
"epoch": 0.03,
"learning_rate": 7.550000000000001e-07,
"loss": 15.1545,
"step": 155
},
{
"epoch": 0.03,
"learning_rate": 7.8e-07,
"loss": 16.1792,
"step": 160
},
{
"epoch": 0.03,
"learning_rate": 8.000000000000001e-07,
"loss": 14.6018,
"step": 165
},
{
"epoch": 0.03,
"learning_rate": 8.250000000000001e-07,
"loss": 12.6032,
"step": 170
},
{
"epoch": 0.03,
"learning_rate": 8.500000000000001e-07,
"loss": 13.0217,
"step": 175
},
{
"epoch": 0.03,
"learning_rate": 8.75e-07,
"loss": 12.2336,
"step": 180
},
{
"epoch": 0.03,
"learning_rate": 9.000000000000001e-07,
"loss": 11.8643,
"step": 185
},
{
"epoch": 0.04,
"learning_rate": 9.25e-07,
"loss": 11.8572,
"step": 190
},
{
"epoch": 0.04,
"learning_rate": 9.500000000000001e-07,
"loss": 10.8976,
"step": 195
},
{
"epoch": 0.04,
"learning_rate": 9.750000000000002e-07,
"loss": 12.2617,
"step": 200
},
{
"epoch": 0.04,
"learning_rate": 1.0000000000000002e-06,
"loss": 13.7753,
"step": 205
},
{
"epoch": 0.04,
"learning_rate": 1.025e-06,
"loss": 11.6137,
"step": 210
},
{
"epoch": 0.04,
"learning_rate": 1.0500000000000001e-06,
"loss": 10.6402,
"step": 215
},
{
"epoch": 0.04,
"learning_rate": 1.075e-06,
"loss": 12.9591,
"step": 220
},
{
"epoch": 0.04,
"learning_rate": 1.1e-06,
"loss": 9.8613,
"step": 225
},
{
"epoch": 0.04,
"learning_rate": 1.125e-06,
"loss": 10.487,
"step": 230
},
{
"epoch": 0.04,
"learning_rate": 1.1500000000000002e-06,
"loss": 9.3773,
"step": 235
},
{
"epoch": 0.04,
"learning_rate": 1.175e-06,
"loss": 9.5665,
"step": 240
},
{
"epoch": 0.05,
"learning_rate": 1.2000000000000002e-06,
"loss": 8.2941,
"step": 245
},
{
"epoch": 0.05,
"learning_rate": 1.2250000000000001e-06,
"loss": 8.5563,
"step": 250
},
{
"epoch": 0.05,
"learning_rate": 1.25e-06,
"loss": 10.4941,
"step": 255
},
{
"epoch": 0.05,
"learning_rate": 1.275e-06,
"loss": 8.4986,
"step": 260
},
{
"epoch": 0.05,
"learning_rate": 1.3e-06,
"loss": 10.2295,
"step": 265
},
{
"epoch": 0.05,
"learning_rate": 1.3250000000000002e-06,
"loss": 8.7026,
"step": 270
},
{
"epoch": 0.05,
"learning_rate": 1.3500000000000002e-06,
"loss": 8.6009,
"step": 275
},
{
"epoch": 0.05,
"learning_rate": 1.3750000000000002e-06,
"loss": 7.7613,
"step": 280
},
{
"epoch": 0.05,
"learning_rate": 1.4000000000000001e-06,
"loss": 7.8609,
"step": 285
},
{
"epoch": 0.05,
"learning_rate": 1.425e-06,
"loss": 7.0097,
"step": 290
},
{
"epoch": 0.05,
"learning_rate": 1.45e-06,
"loss": 5.5692,
"step": 295
},
{
"epoch": 0.06,
"learning_rate": 1.475e-06,
"loss": 5.6402,
"step": 300
},
{
"epoch": 0.06,
"learning_rate": 1.5e-06,
"loss": 9.0815,
"step": 305
},
{
"epoch": 0.06,
"learning_rate": 1.525e-06,
"loss": 8.0803,
"step": 310
},
{
"epoch": 0.06,
"learning_rate": 1.5500000000000002e-06,
"loss": 7.6229,
"step": 315
},
{
"epoch": 0.06,
"learning_rate": 1.5750000000000002e-06,
"loss": 5.5354,
"step": 320
},
{
"epoch": 0.06,
"learning_rate": 1.6000000000000001e-06,
"loss": 8.1564,
"step": 325
},
{
"epoch": 0.06,
"learning_rate": 1.6250000000000001e-06,
"loss": 7.0378,
"step": 330
},
{
"epoch": 0.06,
"learning_rate": 1.6500000000000003e-06,
"loss": 5.3662,
"step": 335
},
{
"epoch": 0.06,
"learning_rate": 1.6750000000000003e-06,
"loss": 7.6436,
"step": 340
},
{
"epoch": 0.06,
"learning_rate": 1.7000000000000002e-06,
"loss": 5.3403,
"step": 345
},
{
"epoch": 0.07,
"learning_rate": 1.725e-06,
"loss": 8.1018,
"step": 350
},
{
"epoch": 0.07,
"learning_rate": 1.745e-06,
"loss": 7.5364,
"step": 355
},
{
"epoch": 0.07,
"learning_rate": 1.77e-06,
"loss": 5.2764,
"step": 360
},
{
"epoch": 0.07,
"learning_rate": 1.7950000000000002e-06,
"loss": 6.4889,
"step": 365
},
{
"epoch": 0.07,
"learning_rate": 1.8200000000000002e-06,
"loss": 5.1625,
"step": 370
},
{
"epoch": 0.07,
"learning_rate": 1.8450000000000001e-06,
"loss": 7.4417,
"step": 375
},
{
"epoch": 0.07,
"learning_rate": 1.87e-06,
"loss": 4.8924,
"step": 380
},
{
"epoch": 0.07,
"learning_rate": 1.895e-06,
"loss": 9.9698,
"step": 385
},
{
"epoch": 0.07,
"learning_rate": 1.9200000000000003e-06,
"loss": 7.4896,
"step": 390
},
{
"epoch": 0.07,
"learning_rate": 1.945e-06,
"loss": 6.7582,
"step": 395
},
{
"epoch": 0.07,
"learning_rate": 1.97e-06,
"loss": 6.0409,
"step": 400
},
{
"epoch": 0.08,
"learning_rate": 1.9950000000000004e-06,
"loss": 7.4142,
"step": 405
},
{
"epoch": 0.08,
"learning_rate": 2.02e-06,
"loss": 7.9007,
"step": 410
},
{
"epoch": 0.08,
"learning_rate": 2.045e-06,
"loss": 5.15,
"step": 415
},
{
"epoch": 0.08,
"learning_rate": 2.07e-06,
"loss": 5.6287,
"step": 420
},
{
"epoch": 0.08,
"learning_rate": 2.0950000000000003e-06,
"loss": 4.7277,
"step": 425
},
{
"epoch": 0.08,
"learning_rate": 2.12e-06,
"loss": 4.9955,
"step": 430
},
{
"epoch": 0.08,
"learning_rate": 2.1450000000000002e-06,
"loss": 6.4111,
"step": 435
},
{
"epoch": 0.08,
"learning_rate": 2.17e-06,
"loss": 5.7243,
"step": 440
},
{
"epoch": 0.08,
"learning_rate": 2.195e-06,
"loss": 5.3146,
"step": 445
},
{
"epoch": 0.08,
"learning_rate": 2.2200000000000003e-06,
"loss": 4.8272,
"step": 450
},
{
"epoch": 0.08,
"learning_rate": 2.245e-06,
"loss": 6.3851,
"step": 455
},
{
"epoch": 0.09,
"learning_rate": 2.2700000000000003e-06,
"loss": 5.0858,
"step": 460
},
{
"epoch": 0.09,
"learning_rate": 2.2950000000000005e-06,
"loss": 13.9773,
"step": 465
},
{
"epoch": 0.09,
"learning_rate": 2.3200000000000002e-06,
"loss": 5.1323,
"step": 470
},
{
"epoch": 0.09,
"learning_rate": 2.345e-06,
"loss": 4.5519,
"step": 475
},
{
"epoch": 0.09,
"learning_rate": 2.37e-06,
"loss": 4.9828,
"step": 480
},
{
"epoch": 0.09,
"learning_rate": 2.395e-06,
"loss": 10.9601,
"step": 485
},
{
"epoch": 0.09,
"learning_rate": 2.42e-06,
"loss": 4.715,
"step": 490
},
{
"epoch": 0.09,
"learning_rate": 2.4450000000000003e-06,
"loss": 6.1112,
"step": 495
},
{
"epoch": 0.09,
"learning_rate": 2.47e-06,
"loss": 4.8677,
"step": 500
},
{
"epoch": 0.09,
"eval_loss": 6.384158611297607,
"eval_runtime": 97.6612,
"eval_samples_per_second": 12.226,
"eval_steps_per_second": 1.536,
"eval_wer": 1.0,
"step": 500
},
{
"epoch": 0.09,
"learning_rate": 2.4950000000000003e-06,
"loss": 5.6367,
"step": 505
},
{
"epoch": 0.09,
"learning_rate": 2.52e-06,
"loss": 4.798,
"step": 510
},
{
"epoch": 0.1,
"learning_rate": 2.545e-06,
"loss": 4.7321,
"step": 515
},
{
"epoch": 0.1,
"learning_rate": 2.5700000000000004e-06,
"loss": 9.0061,
"step": 520
},
{
"epoch": 0.1,
"learning_rate": 2.595e-06,
"loss": 4.5565,
"step": 525
},
{
"epoch": 0.1,
"learning_rate": 2.6200000000000003e-06,
"loss": 4.9119,
"step": 530
},
{
"epoch": 0.1,
"learning_rate": 2.6450000000000005e-06,
"loss": 4.6625,
"step": 535
},
{
"epoch": 0.1,
"learning_rate": 2.6700000000000003e-06,
"loss": 4.4122,
"step": 540
},
{
"epoch": 0.1,
"learning_rate": 2.6950000000000005e-06,
"loss": 6.515,
"step": 545
},
{
"epoch": 0.1,
"learning_rate": 2.7200000000000002e-06,
"loss": 6.2983,
"step": 550
},
{
"epoch": 0.1,
"learning_rate": 2.7450000000000004e-06,
"loss": 7.8164,
"step": 555
},
{
"epoch": 0.1,
"learning_rate": 2.7700000000000006e-06,
"loss": 5.5597,
"step": 560
},
{
"epoch": 0.11,
"learning_rate": 2.7950000000000003e-06,
"loss": 4.1535,
"step": 565
},
{
"epoch": 0.11,
"learning_rate": 2.82e-06,
"loss": 5.9433,
"step": 570
},
{
"epoch": 0.11,
"learning_rate": 2.845e-06,
"loss": 4.8775,
"step": 575
},
{
"epoch": 0.11,
"learning_rate": 2.87e-06,
"loss": 6.3474,
"step": 580
},
{
"epoch": 0.11,
"learning_rate": 2.8950000000000002e-06,
"loss": 8.6641,
"step": 585
},
{
"epoch": 0.11,
"learning_rate": 2.92e-06,
"loss": 6.0309,
"step": 590
},
{
"epoch": 0.11,
"learning_rate": 2.945e-06,
"loss": 4.1472,
"step": 595
},
{
"epoch": 0.11,
"learning_rate": 2.97e-06,
"loss": 4.5871,
"step": 600
},
{
"epoch": 0.11,
"learning_rate": 2.995e-06,
"loss": 5.0069,
"step": 605
},
{
"epoch": 0.11,
"learning_rate": 3.0200000000000003e-06,
"loss": 4.694,
"step": 610
},
{
"epoch": 0.11,
"learning_rate": 3.045e-06,
"loss": 4.502,
"step": 615
},
{
"epoch": 0.12,
"learning_rate": 3.0700000000000003e-06,
"loss": 7.2696,
"step": 620
},
{
"epoch": 0.12,
"learning_rate": 3.0950000000000004e-06,
"loss": 4.5793,
"step": 625
},
{
"epoch": 0.12,
"learning_rate": 3.12e-06,
"loss": 4.1937,
"step": 630
},
{
"epoch": 0.12,
"learning_rate": 3.1450000000000004e-06,
"loss": 6.3539,
"step": 635
},
{
"epoch": 0.12,
"learning_rate": 3.17e-06,
"loss": 4.1621,
"step": 640
},
{
"epoch": 0.12,
"learning_rate": 3.1950000000000003e-06,
"loss": 4.4824,
"step": 645
},
{
"epoch": 0.12,
"learning_rate": 3.2200000000000005e-06,
"loss": 4.4693,
"step": 650
},
{
"epoch": 0.12,
"learning_rate": 3.2450000000000003e-06,
"loss": 5.6584,
"step": 655
},
{
"epoch": 0.12,
"learning_rate": 3.2700000000000005e-06,
"loss": 4.813,
"step": 660
},
{
"epoch": 0.12,
"learning_rate": 3.2950000000000002e-06,
"loss": 6.1425,
"step": 665
},
{
"epoch": 0.12,
"learning_rate": 3.3200000000000004e-06,
"loss": 7.5331,
"step": 670
},
{
"epoch": 0.13,
"learning_rate": 3.3450000000000006e-06,
"loss": 9.772,
"step": 675
},
{
"epoch": 0.13,
"learning_rate": 3.3700000000000003e-06,
"loss": 4.1922,
"step": 680
},
{
"epoch": 0.13,
"learning_rate": 3.3950000000000005e-06,
"loss": 3.8468,
"step": 685
},
{
"epoch": 0.13,
"learning_rate": 3.4200000000000007e-06,
"loss": 3.5808,
"step": 690
},
{
"epoch": 0.13,
"learning_rate": 3.445e-06,
"loss": 5.5091,
"step": 695
},
{
"epoch": 0.13,
"learning_rate": 3.4700000000000002e-06,
"loss": 3.768,
"step": 700
},
{
"epoch": 0.13,
"learning_rate": 3.495e-06,
"loss": 4.9197,
"step": 705
},
{
"epoch": 0.13,
"learning_rate": 3.52e-06,
"loss": 4.1167,
"step": 710
},
{
"epoch": 0.13,
"learning_rate": 3.545e-06,
"loss": 7.1805,
"step": 715
},
{
"epoch": 0.13,
"learning_rate": 3.57e-06,
"loss": 7.7838,
"step": 720
},
{
"epoch": 0.14,
"learning_rate": 3.5950000000000003e-06,
"loss": 6.4851,
"step": 725
},
{
"epoch": 0.14,
"learning_rate": 3.62e-06,
"loss": 4.2357,
"step": 730
},
{
"epoch": 0.14,
"learning_rate": 3.6450000000000003e-06,
"loss": 5.7078,
"step": 735
},
{
"epoch": 0.14,
"learning_rate": 3.6700000000000004e-06,
"loss": 5.5984,
"step": 740
},
{
"epoch": 0.14,
"learning_rate": 3.695e-06,
"loss": 5.8425,
"step": 745
},
{
"epoch": 0.14,
"learning_rate": 3.7200000000000004e-06,
"loss": 3.9582,
"step": 750
},
{
"epoch": 0.14,
"learning_rate": 3.745e-06,
"loss": 6.1284,
"step": 755
},
{
"epoch": 0.14,
"learning_rate": 3.7700000000000003e-06,
"loss": 6.4174,
"step": 760
},
{
"epoch": 0.14,
"learning_rate": 3.7950000000000005e-06,
"loss": 4.0111,
"step": 765
},
{
"epoch": 0.14,
"learning_rate": 3.820000000000001e-06,
"loss": 6.0067,
"step": 770
},
{
"epoch": 0.14,
"learning_rate": 3.8450000000000005e-06,
"loss": 5.5284,
"step": 775
},
{
"epoch": 0.15,
"learning_rate": 3.87e-06,
"loss": 5.7256,
"step": 780
},
{
"epoch": 0.15,
"learning_rate": 3.895000000000001e-06,
"loss": 5.4687,
"step": 785
},
{
"epoch": 0.15,
"learning_rate": 3.920000000000001e-06,
"loss": 5.5711,
"step": 790
},
{
"epoch": 0.15,
"learning_rate": 3.945e-06,
"loss": 3.9495,
"step": 795
},
{
"epoch": 0.15,
"learning_rate": 3.97e-06,
"loss": 7.5433,
"step": 800
},
{
"epoch": 0.15,
"learning_rate": 3.995000000000001e-06,
"loss": 6.6596,
"step": 805
},
{
"epoch": 0.15,
"learning_rate": 4.0200000000000005e-06,
"loss": 3.8816,
"step": 810
},
{
"epoch": 0.15,
"learning_rate": 4.045e-06,
"loss": 6.0984,
"step": 815
},
{
"epoch": 0.15,
"learning_rate": 4.07e-06,
"loss": 3.6398,
"step": 820
},
{
"epoch": 0.15,
"learning_rate": 4.095e-06,
"loss": 6.2519,
"step": 825
},
{
"epoch": 0.15,
"learning_rate": 4.12e-06,
"loss": 5.4348,
"step": 830
},
{
"epoch": 0.16,
"learning_rate": 4.145e-06,
"loss": 5.7349,
"step": 835
},
{
"epoch": 0.16,
"learning_rate": 4.17e-06,
"loss": 5.6213,
"step": 840
},
{
"epoch": 0.16,
"learning_rate": 4.1950000000000005e-06,
"loss": 3.7084,
"step": 845
},
{
"epoch": 0.16,
"learning_rate": 4.22e-06,
"loss": 7.2329,
"step": 850
},
{
"epoch": 0.16,
"learning_rate": 4.245e-06,
"loss": 5.7584,
"step": 855
},
{
"epoch": 0.16,
"learning_rate": 4.270000000000001e-06,
"loss": 6.9726,
"step": 860
},
{
"epoch": 0.16,
"learning_rate": 4.295e-06,
"loss": 3.7044,
"step": 865
},
{
"epoch": 0.16,
"learning_rate": 4.32e-06,
"loss": 4.0139,
"step": 870
},
{
"epoch": 0.16,
"learning_rate": 4.345000000000001e-06,
"loss": 6.19,
"step": 875
},
{
"epoch": 0.16,
"learning_rate": 4.3700000000000005e-06,
"loss": 6.5308,
"step": 880
},
{
"epoch": 0.16,
"learning_rate": 4.395e-06,
"loss": 5.7261,
"step": 885
},
{
"epoch": 0.17,
"learning_rate": 4.42e-06,
"loss": 3.8616,
"step": 890
},
{
"epoch": 0.17,
"learning_rate": 4.445000000000001e-06,
"loss": 3.4939,
"step": 895
},
{
"epoch": 0.17,
"learning_rate": 4.47e-06,
"loss": 3.5178,
"step": 900
},
{
"epoch": 0.17,
"learning_rate": 4.495e-06,
"loss": 3.6419,
"step": 905
},
{
"epoch": 0.17,
"learning_rate": 4.520000000000001e-06,
"loss": 8.8566,
"step": 910
},
{
"epoch": 0.17,
"learning_rate": 4.5450000000000005e-06,
"loss": 3.7769,
"step": 915
},
{
"epoch": 0.17,
"learning_rate": 4.57e-06,
"loss": 3.6874,
"step": 920
},
{
"epoch": 0.17,
"learning_rate": 4.595000000000001e-06,
"loss": 5.7525,
"step": 925
},
{
"epoch": 0.17,
"learning_rate": 4.620000000000001e-06,
"loss": 5.7784,
"step": 930
},
{
"epoch": 0.17,
"learning_rate": 4.645e-06,
"loss": 5.0083,
"step": 935
},
{
"epoch": 0.18,
"learning_rate": 4.670000000000001e-06,
"loss": 3.4095,
"step": 940
},
{
"epoch": 0.18,
"learning_rate": 4.695e-06,
"loss": 3.212,
"step": 945
},
{
"epoch": 0.18,
"learning_rate": 4.7200000000000005e-06,
"loss": 5.4597,
"step": 950
},
{
"epoch": 0.18,
"learning_rate": 4.745e-06,
"loss": 3.7717,
"step": 955
},
{
"epoch": 0.18,
"learning_rate": 4.77e-06,
"loss": 3.3286,
"step": 960
},
{
"epoch": 0.18,
"learning_rate": 4.795e-06,
"loss": 3.3198,
"step": 965
},
{
"epoch": 0.18,
"learning_rate": 4.8200000000000004e-06,
"loss": 3.6952,
"step": 970
},
{
"epoch": 0.18,
"learning_rate": 4.845e-06,
"loss": 3.3483,
"step": 975
},
{
"epoch": 0.18,
"learning_rate": 4.87e-06,
"loss": 4.8449,
"step": 980
},
{
"epoch": 0.18,
"learning_rate": 4.8950000000000006e-06,
"loss": 3.5239,
"step": 985
},
{
"epoch": 0.18,
"learning_rate": 4.92e-06,
"loss": 5.7172,
"step": 990
},
{
"epoch": 0.19,
"learning_rate": 4.945e-06,
"loss": 3.9009,
"step": 995
},
{
"epoch": 0.19,
"learning_rate": 4.970000000000001e-06,
"loss": 3.293,
"step": 1000
},
{
"epoch": 0.19,
"eval_loss": 5.126453876495361,
"eval_runtime": 138.3538,
"eval_samples_per_second": 8.63,
"eval_steps_per_second": 1.084,
"eval_wer": 1.0,
"step": 1000
}
],
"max_steps": 161070,
"num_train_epochs": 30,
"total_flos": 9.309229789594176e+16,
"trial_name": null,
"trial_params": null
}