Alex Spangher commited on
Commit
dc209d3
·
1 Parent(s): b763a7c
Files changed (5) hide show
  1. optimizer.pt +1 -1
  2. pytorch_model.bin +1 -1
  3. rng_state.pth +1 -1
  4. scheduler.pt +1 -1
  5. trainer_state.json +687 -3
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:144ed410111f9523612c1a45c493d18a7136d4ca1713414d972f0065d6ec1ac6
3
  size 731622405
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de12922da6650e9a750157f2d56f18c6a6231eabfc8c0aa2da93e17480f30ee1
3
  size 731622405
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:de0b6b5faffa55240211976b9433e16a6dc4e544eeccd593dd3fb6fb04b04ebb
3
  size 509949681
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1941dd9c551eff6459b42989c78eb6514b639b2d02202840012315447e88233
3
  size 509949681
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5563f93f9ac0c9fe5c2a7ab9a2f433fc846204115b90bd053536d8ee6159877a
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc33da413769d37bb1e11d9c4626efbaf418efd2bff1d3b6b3ade7067311d219
3
  size 14575
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f2b1807c166dbf7692014ada86b86f2a1f3dd204193db9b7aad110046f6d997
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec7e47f7f5b0d7b1a4f2877ddbda6ecbad6a56a50f484f6701523f4b9da5906a
3
  size 627
trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.0,
5
  "eval_steps": 3000,
6
- "global_step": 89562,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1371,13 +1371,697 @@
1371
  "learning_rate": 3.0013845157544496e-05,
1372
  "loss": 1.1827,
1373
  "step": 89500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1374
  }
1375
  ],
1376
  "logging_steps": 500,
1377
  "max_steps": 223905,
1378
  "num_train_epochs": 5,
1379
  "save_steps": 500,
1380
- "total_flos": 5.924771436255374e+16,
1381
  "trial_name": null,
1382
  "trial_params": null
1383
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
  "eval_steps": 3000,
6
+ "global_step": 134343,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1371
  "learning_rate": 3.0013845157544496e-05,
1372
  "loss": 1.1827,
1373
  "step": 89500
1374
+ },
1375
+ {
1376
+ "epoch": 2.01,
1377
+ "learning_rate": 2.9902190661217926e-05,
1378
+ "loss": 0.9657,
1379
+ "step": 90000
1380
+ },
1381
+ {
1382
+ "epoch": 2.01,
1383
+ "eval_e": 0.6182643221781055,
1384
+ "eval_f1": 0.5581679626998242,
1385
+ "eval_loss": 2.8641064167022705,
1386
+ "eval_runtime": 287.6124,
1387
+ "eval_samples_per_second": 12.26,
1388
+ "eval_steps_per_second": 12.26,
1389
+ "step": 90000
1390
+ },
1391
+ {
1392
+ "epoch": 2.02,
1393
+ "learning_rate": 2.9790536164891362e-05,
1394
+ "loss": 0.8826,
1395
+ "step": 90500
1396
+ },
1397
+ {
1398
+ "epoch": 2.03,
1399
+ "learning_rate": 2.9678881668564795e-05,
1400
+ "loss": 0.9352,
1401
+ "step": 91000
1402
+ },
1403
+ {
1404
+ "epoch": 2.04,
1405
+ "learning_rate": 2.9567227172238225e-05,
1406
+ "loss": 0.9967,
1407
+ "step": 91500
1408
+ },
1409
+ {
1410
+ "epoch": 2.05,
1411
+ "learning_rate": 2.9455572675911658e-05,
1412
+ "loss": 0.9126,
1413
+ "step": 92000
1414
+ },
1415
+ {
1416
+ "epoch": 2.07,
1417
+ "learning_rate": 2.9343918179585094e-05,
1418
+ "loss": 0.9895,
1419
+ "step": 92500
1420
+ },
1421
+ {
1422
+ "epoch": 2.08,
1423
+ "learning_rate": 2.9232263683258527e-05,
1424
+ "loss": 0.9691,
1425
+ "step": 93000
1426
+ },
1427
+ {
1428
+ "epoch": 2.08,
1429
+ "eval_e": 0.6154282473057289,
1430
+ "eval_f1": 0.5556482680765841,
1431
+ "eval_loss": 2.8704285621643066,
1432
+ "eval_runtime": 287.9206,
1433
+ "eval_samples_per_second": 12.246,
1434
+ "eval_steps_per_second": 12.246,
1435
+ "step": 93000
1436
+ },
1437
+ {
1438
+ "epoch": 2.09,
1439
+ "learning_rate": 2.9120609186931957e-05,
1440
+ "loss": 0.8332,
1441
+ "step": 93500
1442
+ },
1443
+ {
1444
+ "epoch": 2.1,
1445
+ "learning_rate": 2.9008954690605393e-05,
1446
+ "loss": 1.0427,
1447
+ "step": 94000
1448
+ },
1449
+ {
1450
+ "epoch": 2.11,
1451
+ "learning_rate": 2.8897300194278826e-05,
1452
+ "loss": 1.0371,
1453
+ "step": 94500
1454
+ },
1455
+ {
1456
+ "epoch": 2.12,
1457
+ "learning_rate": 2.8785645697952256e-05,
1458
+ "loss": 0.8845,
1459
+ "step": 95000
1460
+ },
1461
+ {
1462
+ "epoch": 2.13,
1463
+ "learning_rate": 2.867399120162569e-05,
1464
+ "loss": 0.8747,
1465
+ "step": 95500
1466
+ },
1467
+ {
1468
+ "epoch": 2.14,
1469
+ "learning_rate": 2.8562336705299125e-05,
1470
+ "loss": 0.97,
1471
+ "step": 96000
1472
+ },
1473
+ {
1474
+ "epoch": 2.14,
1475
+ "eval_e": 0.6134429948950653,
1476
+ "eval_f1": 0.5550596343239774,
1477
+ "eval_loss": 2.79943585395813,
1478
+ "eval_runtime": 282.8048,
1479
+ "eval_samples_per_second": 12.468,
1480
+ "eval_steps_per_second": 12.468,
1481
+ "step": 96000
1482
+ },
1483
+ {
1484
+ "epoch": 2.15,
1485
+ "learning_rate": 2.8450682208972558e-05,
1486
+ "loss": 1.0791,
1487
+ "step": 96500
1488
+ },
1489
+ {
1490
+ "epoch": 2.17,
1491
+ "learning_rate": 2.8339027712645988e-05,
1492
+ "loss": 1.008,
1493
+ "step": 97000
1494
+ },
1495
+ {
1496
+ "epoch": 2.18,
1497
+ "learning_rate": 2.8227373216319424e-05,
1498
+ "loss": 0.8047,
1499
+ "step": 97500
1500
+ },
1501
+ {
1502
+ "epoch": 2.19,
1503
+ "learning_rate": 2.8115718719992857e-05,
1504
+ "loss": 0.8778,
1505
+ "step": 98000
1506
+ },
1507
+ {
1508
+ "epoch": 2.2,
1509
+ "learning_rate": 2.8004064223666287e-05,
1510
+ "loss": 0.8398,
1511
+ "step": 98500
1512
+ },
1513
+ {
1514
+ "epoch": 2.21,
1515
+ "learning_rate": 2.789240972733972e-05,
1516
+ "loss": 1.0125,
1517
+ "step": 99000
1518
+ },
1519
+ {
1520
+ "epoch": 2.21,
1521
+ "eval_e": 0.6176971072036301,
1522
+ "eval_f1": 0.5549828088169888,
1523
+ "eval_loss": 2.8314826488494873,
1524
+ "eval_runtime": 283.0891,
1525
+ "eval_samples_per_second": 12.455,
1526
+ "eval_steps_per_second": 12.455,
1527
+ "step": 99000
1528
+ },
1529
+ {
1530
+ "epoch": 2.22,
1531
+ "learning_rate": 2.7780755231013156e-05,
1532
+ "loss": 0.7736,
1533
+ "step": 99500
1534
+ },
1535
+ {
1536
+ "epoch": 2.23,
1537
+ "learning_rate": 2.7669100734686586e-05,
1538
+ "loss": 0.8345,
1539
+ "step": 100000
1540
+ },
1541
+ {
1542
+ "epoch": 2.24,
1543
+ "learning_rate": 2.755744623836002e-05,
1544
+ "loss": 0.9044,
1545
+ "step": 100500
1546
+ },
1547
+ {
1548
+ "epoch": 2.26,
1549
+ "learning_rate": 2.7445791742033455e-05,
1550
+ "loss": 0.9082,
1551
+ "step": 101000
1552
+ },
1553
+ {
1554
+ "epoch": 2.27,
1555
+ "learning_rate": 2.7334137245706888e-05,
1556
+ "loss": 0.8307,
1557
+ "step": 101500
1558
+ },
1559
+ {
1560
+ "epoch": 2.28,
1561
+ "learning_rate": 2.7222482749380318e-05,
1562
+ "loss": 1.0004,
1563
+ "step": 102000
1564
+ },
1565
+ {
1566
+ "epoch": 2.28,
1567
+ "eval_e": 0.6165626772546795,
1568
+ "eval_f1": 0.5601784958413148,
1569
+ "eval_loss": 2.6772687435150146,
1570
+ "eval_runtime": 280.257,
1571
+ "eval_samples_per_second": 12.581,
1572
+ "eval_steps_per_second": 12.581,
1573
+ "step": 102000
1574
+ },
1575
+ {
1576
+ "epoch": 2.29,
1577
+ "learning_rate": 2.711082825305375e-05,
1578
+ "loss": 0.948,
1579
+ "step": 102500
1580
+ },
1581
+ {
1582
+ "epoch": 2.3,
1583
+ "learning_rate": 2.6999173756727187e-05,
1584
+ "loss": 0.8935,
1585
+ "step": 103000
1586
+ },
1587
+ {
1588
+ "epoch": 2.31,
1589
+ "learning_rate": 2.6887519260400617e-05,
1590
+ "loss": 0.8814,
1591
+ "step": 103500
1592
+ },
1593
+ {
1594
+ "epoch": 2.32,
1595
+ "learning_rate": 2.677586476407405e-05,
1596
+ "loss": 0.9694,
1597
+ "step": 104000
1598
+ },
1599
+ {
1600
+ "epoch": 2.33,
1601
+ "learning_rate": 2.6664210267747486e-05,
1602
+ "loss": 0.8705,
1603
+ "step": 104500
1604
+ },
1605
+ {
1606
+ "epoch": 2.34,
1607
+ "learning_rate": 2.655255577142092e-05,
1608
+ "loss": 1.0145,
1609
+ "step": 105000
1610
+ },
1611
+ {
1612
+ "epoch": 2.34,
1613
+ "eval_e": 0.5998298355076574,
1614
+ "eval_f1": 0.5481616751996528,
1615
+ "eval_loss": 2.488840341567993,
1616
+ "eval_runtime": 279.9287,
1617
+ "eval_samples_per_second": 12.596,
1618
+ "eval_steps_per_second": 12.596,
1619
+ "step": 105000
1620
+ },
1621
+ {
1622
+ "epoch": 2.36,
1623
+ "learning_rate": 2.644090127509435e-05,
1624
+ "loss": 0.9333,
1625
+ "step": 105500
1626
+ },
1627
+ {
1628
+ "epoch": 2.37,
1629
+ "learning_rate": 2.632924677876778e-05,
1630
+ "loss": 1.1367,
1631
+ "step": 106000
1632
+ },
1633
+ {
1634
+ "epoch": 2.38,
1635
+ "learning_rate": 2.6217592282441218e-05,
1636
+ "loss": 0.9492,
1637
+ "step": 106500
1638
+ },
1639
+ {
1640
+ "epoch": 2.39,
1641
+ "learning_rate": 2.6105937786114647e-05,
1642
+ "loss": 1.0456,
1643
+ "step": 107000
1644
+ },
1645
+ {
1646
+ "epoch": 2.4,
1647
+ "learning_rate": 2.599428328978808e-05,
1648
+ "loss": 0.8105,
1649
+ "step": 107500
1650
+ },
1651
+ {
1652
+ "epoch": 2.41,
1653
+ "learning_rate": 2.5882628793461517e-05,
1654
+ "loss": 1.0848,
1655
+ "step": 108000
1656
+ },
1657
+ {
1658
+ "epoch": 2.41,
1659
+ "eval_e": 0.6089052750992626,
1660
+ "eval_f1": 0.5539872559411191,
1661
+ "eval_loss": 2.320396661758423,
1662
+ "eval_runtime": 280.0515,
1663
+ "eval_samples_per_second": 12.591,
1664
+ "eval_steps_per_second": 12.591,
1665
+ "step": 108000
1666
+ },
1667
+ {
1668
+ "epoch": 2.42,
1669
+ "learning_rate": 2.5770974297134946e-05,
1670
+ "loss": 0.9052,
1671
+ "step": 108500
1672
+ },
1673
+ {
1674
+ "epoch": 2.43,
1675
+ "learning_rate": 2.565931980080838e-05,
1676
+ "loss": 0.9562,
1677
+ "step": 109000
1678
+ },
1679
+ {
1680
+ "epoch": 2.45,
1681
+ "learning_rate": 2.554766530448181e-05,
1682
+ "loss": 0.9221,
1683
+ "step": 109500
1684
+ },
1685
+ {
1686
+ "epoch": 2.46,
1687
+ "learning_rate": 2.543601080815525e-05,
1688
+ "loss": 0.9997,
1689
+ "step": 110000
1690
+ },
1691
+ {
1692
+ "epoch": 2.47,
1693
+ "learning_rate": 2.532435631182868e-05,
1694
+ "loss": 0.9698,
1695
+ "step": 110500
1696
+ },
1697
+ {
1698
+ "epoch": 2.48,
1699
+ "learning_rate": 2.521270181550211e-05,
1700
+ "loss": 0.9949,
1701
+ "step": 111000
1702
+ },
1703
+ {
1704
+ "epoch": 2.48,
1705
+ "eval_e": 0.6168462847419172,
1706
+ "eval_f1": 0.556568464310343,
1707
+ "eval_loss": 2.4436562061309814,
1708
+ "eval_runtime": 283.2598,
1709
+ "eval_samples_per_second": 12.448,
1710
+ "eval_steps_per_second": 12.448,
1711
+ "step": 111000
1712
+ },
1713
+ {
1714
+ "epoch": 2.49,
1715
+ "learning_rate": 2.5101047319175548e-05,
1716
+ "loss": 0.9329,
1717
+ "step": 111500
1718
+ },
1719
+ {
1720
+ "epoch": 2.5,
1721
+ "learning_rate": 2.4989392822848977e-05,
1722
+ "loss": 0.9796,
1723
+ "step": 112000
1724
+ },
1725
+ {
1726
+ "epoch": 2.51,
1727
+ "learning_rate": 2.487773832652241e-05,
1728
+ "loss": 0.7865,
1729
+ "step": 112500
1730
+ },
1731
+ {
1732
+ "epoch": 2.52,
1733
+ "learning_rate": 2.4766083830195843e-05,
1734
+ "loss": 0.9462,
1735
+ "step": 113000
1736
+ },
1737
+ {
1738
+ "epoch": 2.53,
1739
+ "learning_rate": 2.4654429333869276e-05,
1740
+ "loss": 0.9154,
1741
+ "step": 113500
1742
+ },
1743
+ {
1744
+ "epoch": 2.55,
1745
+ "learning_rate": 2.454277483754271e-05,
1746
+ "loss": 0.9923,
1747
+ "step": 114000
1748
+ },
1749
+ {
1750
+ "epoch": 2.55,
1751
+ "eval_e": 0.6145774248440159,
1752
+ "eval_f1": 0.5539626796675543,
1753
+ "eval_loss": 2.7537660598754883,
1754
+ "eval_runtime": 286.2619,
1755
+ "eval_samples_per_second": 12.317,
1756
+ "eval_steps_per_second": 12.317,
1757
+ "step": 114000
1758
+ },
1759
+ {
1760
+ "epoch": 2.56,
1761
+ "learning_rate": 2.4431120341216142e-05,
1762
+ "loss": 0.8292,
1763
+ "step": 114500
1764
+ },
1765
+ {
1766
+ "epoch": 2.57,
1767
+ "learning_rate": 2.4319465844889575e-05,
1768
+ "loss": 0.8699,
1769
+ "step": 115000
1770
+ },
1771
+ {
1772
+ "epoch": 2.58,
1773
+ "learning_rate": 2.4207811348563008e-05,
1774
+ "loss": 0.9969,
1775
+ "step": 115500
1776
+ },
1777
+ {
1778
+ "epoch": 2.59,
1779
+ "learning_rate": 2.409615685223644e-05,
1780
+ "loss": 0.6963,
1781
+ "step": 116000
1782
+ },
1783
+ {
1784
+ "epoch": 2.6,
1785
+ "learning_rate": 2.3984502355909874e-05,
1786
+ "loss": 0.9601,
1787
+ "step": 116500
1788
+ },
1789
+ {
1790
+ "epoch": 2.61,
1791
+ "learning_rate": 2.3872847859583307e-05,
1792
+ "loss": 1.0571,
1793
+ "step": 117000
1794
+ },
1795
+ {
1796
+ "epoch": 2.61,
1797
+ "eval_e": 0.6216676120249575,
1798
+ "eval_f1": 0.5614300304448882,
1799
+ "eval_loss": 2.4599826335906982,
1800
+ "eval_runtime": 284.3149,
1801
+ "eval_samples_per_second": 12.402,
1802
+ "eval_steps_per_second": 12.402,
1803
+ "step": 117000
1804
+ },
1805
+ {
1806
+ "epoch": 2.62,
1807
+ "learning_rate": 2.376119336325674e-05,
1808
+ "loss": 0.9524,
1809
+ "step": 117500
1810
+ },
1811
+ {
1812
+ "epoch": 2.64,
1813
+ "learning_rate": 2.3649538866930173e-05,
1814
+ "loss": 1.1019,
1815
+ "step": 118000
1816
+ },
1817
+ {
1818
+ "epoch": 2.65,
1819
+ "learning_rate": 2.3537884370603606e-05,
1820
+ "loss": 0.8693,
1821
+ "step": 118500
1822
+ },
1823
+ {
1824
+ "epoch": 2.66,
1825
+ "learning_rate": 2.342622987427704e-05,
1826
+ "loss": 0.8238,
1827
+ "step": 119000
1828
+ },
1829
+ {
1830
+ "epoch": 2.67,
1831
+ "learning_rate": 2.3314575377950472e-05,
1832
+ "loss": 1.0947,
1833
+ "step": 119500
1834
+ },
1835
+ {
1836
+ "epoch": 2.68,
1837
+ "learning_rate": 2.3202920881623905e-05,
1838
+ "loss": 0.887,
1839
+ "step": 120000
1840
+ },
1841
+ {
1842
+ "epoch": 2.68,
1843
+ "eval_e": 0.6091888825865003,
1844
+ "eval_f1": 0.5543750338048291,
1845
+ "eval_loss": 2.853802442550659,
1846
+ "eval_runtime": 280.8484,
1847
+ "eval_samples_per_second": 12.555,
1848
+ "eval_steps_per_second": 12.555,
1849
+ "step": 120000
1850
+ },
1851
+ {
1852
+ "epoch": 2.69,
1853
+ "learning_rate": 2.3091266385297335e-05,
1854
+ "loss": 0.8148,
1855
+ "step": 120500
1856
+ },
1857
+ {
1858
+ "epoch": 2.7,
1859
+ "learning_rate": 2.297961188897077e-05,
1860
+ "loss": 0.8113,
1861
+ "step": 121000
1862
+ },
1863
+ {
1864
+ "epoch": 2.71,
1865
+ "learning_rate": 2.2867957392644204e-05,
1866
+ "loss": 1.0991,
1867
+ "step": 121500
1868
+ },
1869
+ {
1870
+ "epoch": 2.72,
1871
+ "learning_rate": 2.2756302896317637e-05,
1872
+ "loss": 0.7902,
1873
+ "step": 122000
1874
+ },
1875
+ {
1876
+ "epoch": 2.74,
1877
+ "learning_rate": 2.264464839999107e-05,
1878
+ "loss": 0.9871,
1879
+ "step": 122500
1880
+ },
1881
+ {
1882
+ "epoch": 2.75,
1883
+ "learning_rate": 2.25329939036645e-05,
1884
+ "loss": 0.861,
1885
+ "step": 123000
1886
+ },
1887
+ {
1888
+ "epoch": 2.75,
1889
+ "eval_e": 0.6074872376630743,
1890
+ "eval_f1": 0.551903563769941,
1891
+ "eval_loss": 2.6797034740448,
1892
+ "eval_runtime": 288.8697,
1893
+ "eval_samples_per_second": 12.206,
1894
+ "eval_steps_per_second": 12.206,
1895
+ "step": 123000
1896
+ },
1897
+ {
1898
+ "epoch": 2.76,
1899
+ "learning_rate": 2.2421339407337936e-05,
1900
+ "loss": 0.8356,
1901
+ "step": 123500
1902
+ },
1903
+ {
1904
+ "epoch": 2.77,
1905
+ "learning_rate": 2.2309684911011366e-05,
1906
+ "loss": 0.9589,
1907
+ "step": 124000
1908
+ },
1909
+ {
1910
+ "epoch": 2.78,
1911
+ "learning_rate": 2.2198030414684802e-05,
1912
+ "loss": 1.1244,
1913
+ "step": 124500
1914
+ },
1915
+ {
1916
+ "epoch": 2.79,
1917
+ "learning_rate": 2.2086375918358235e-05,
1918
+ "loss": 1.0342,
1919
+ "step": 125000
1920
+ },
1921
+ {
1922
+ "epoch": 2.8,
1923
+ "learning_rate": 2.1974721422031664e-05,
1924
+ "loss": 0.995,
1925
+ "step": 125500
1926
+ },
1927
+ {
1928
+ "epoch": 2.81,
1929
+ "learning_rate": 2.18630669257051e-05,
1930
+ "loss": 1.2528,
1931
+ "step": 126000
1932
+ },
1933
+ {
1934
+ "epoch": 2.81,
1935
+ "eval_e": 0.6213840045377198,
1936
+ "eval_f1": 0.5634895725291656,
1937
+ "eval_loss": 2.4178013801574707,
1938
+ "eval_runtime": 286.5357,
1939
+ "eval_samples_per_second": 12.306,
1940
+ "eval_steps_per_second": 12.306,
1941
+ "step": 126000
1942
+ },
1943
+ {
1944
+ "epoch": 2.82,
1945
+ "learning_rate": 2.175141242937853e-05,
1946
+ "loss": 0.9028,
1947
+ "step": 126500
1948
+ },
1949
+ {
1950
+ "epoch": 2.84,
1951
+ "learning_rate": 2.1639757933051967e-05,
1952
+ "loss": 1.0706,
1953
+ "step": 127000
1954
+ },
1955
+ {
1956
+ "epoch": 2.85,
1957
+ "learning_rate": 2.1528103436725396e-05,
1958
+ "loss": 0.8691,
1959
+ "step": 127500
1960
+ },
1961
+ {
1962
+ "epoch": 2.86,
1963
+ "learning_rate": 2.141644894039883e-05,
1964
+ "loss": 0.8903,
1965
+ "step": 128000
1966
+ },
1967
+ {
1968
+ "epoch": 2.87,
1969
+ "learning_rate": 2.1304794444072262e-05,
1970
+ "loss": 0.7932,
1971
+ "step": 128500
1972
+ },
1973
+ {
1974
+ "epoch": 2.88,
1975
+ "learning_rate": 2.1193139947745695e-05,
1976
+ "loss": 0.8327,
1977
+ "step": 129000
1978
+ },
1979
+ {
1980
+ "epoch": 2.88,
1981
+ "eval_e": 0.6193987521270562,
1982
+ "eval_f1": 0.561697084299324,
1983
+ "eval_loss": 2.573134660720825,
1984
+ "eval_runtime": 286.0117,
1985
+ "eval_samples_per_second": 12.328,
1986
+ "eval_steps_per_second": 12.328,
1987
+ "step": 129000
1988
+ },
1989
+ {
1990
+ "epoch": 2.89,
1991
+ "learning_rate": 2.1081485451419132e-05,
1992
+ "loss": 1.0172,
1993
+ "step": 129500
1994
+ },
1995
+ {
1996
+ "epoch": 2.9,
1997
+ "learning_rate": 2.096983095509256e-05,
1998
+ "loss": 0.9395,
1999
+ "step": 130000
2000
+ },
2001
+ {
2002
+ "epoch": 2.91,
2003
+ "learning_rate": 2.0858176458765998e-05,
2004
+ "loss": 0.9068,
2005
+ "step": 130500
2006
+ },
2007
+ {
2008
+ "epoch": 2.93,
2009
+ "learning_rate": 2.0746521962439427e-05,
2010
+ "loss": 1.0099,
2011
+ "step": 131000
2012
+ },
2013
+ {
2014
+ "epoch": 2.94,
2015
+ "learning_rate": 2.063486746611286e-05,
2016
+ "loss": 0.9126,
2017
+ "step": 131500
2018
+ },
2019
+ {
2020
+ "epoch": 2.95,
2021
+ "learning_rate": 2.0523212969786293e-05,
2022
+ "loss": 1.0005,
2023
+ "step": 132000
2024
+ },
2025
+ {
2026
+ "epoch": 2.95,
2027
+ "eval_e": 0.6114577424844015,
2028
+ "eval_f1": 0.5556109691935686,
2029
+ "eval_loss": 2.36938214302063,
2030
+ "eval_runtime": 284.4774,
2031
+ "eval_samples_per_second": 12.395,
2032
+ "eval_steps_per_second": 12.395,
2033
+ "step": 132000
2034
+ },
2035
+ {
2036
+ "epoch": 2.96,
2037
+ "learning_rate": 2.0411558473459726e-05,
2038
+ "loss": 0.9687,
2039
+ "step": 132500
2040
+ },
2041
+ {
2042
+ "epoch": 2.97,
2043
+ "learning_rate": 2.0299903977133163e-05,
2044
+ "loss": 0.9334,
2045
+ "step": 133000
2046
+ },
2047
+ {
2048
+ "epoch": 2.98,
2049
+ "learning_rate": 2.0188249480806592e-05,
2050
+ "loss": 0.8237,
2051
+ "step": 133500
2052
+ },
2053
+ {
2054
+ "epoch": 2.99,
2055
+ "learning_rate": 2.0076594984480025e-05,
2056
+ "loss": 0.9511,
2057
+ "step": 134000
2058
  }
2059
  ],
2060
  "logging_steps": 500,
2061
  "max_steps": 223905,
2062
  "num_train_epochs": 5,
2063
  "save_steps": 500,
2064
+ "total_flos": 8.88715715438306e+16,
2065
  "trial_name": null,
2066
  "trial_params": null
2067
  }