farmery commited on
Commit
de1dd0f
·
verified ·
1 Parent(s): 22877cb

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:af03696eb7ebf496a2f3071895b47c1b493fe6a3b2efa91a15b26781f6f4db0f
3
  size 159967880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb2e812288d309133aae30a4e2ab39ce4311efe5165e9d48d0dfef65d381d95c
3
  size 159967880
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fc1d7d5be86b7f5014ecf5f0b4221f25d696fed448ab1ffc75974caea9b4aca3
3
  size 320194002
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:227f4b5077da6653384f19dcc642331f62c758409736889932db9fb4421f63d4
3
  size 320194002
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e68167c8d678477ffebb09a051524db7dc0a47c6ca5fdd9731dea2d4cf533e2d
3
  size 14960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0211afb67fcb6f9ca60d62990174660fae3a84e1870561d0f6869ce76b4d3984
3
  size 14960
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d334bd1b13cd7760eff044b538d4dfa8741682e468a17c5d3030193ddce227de
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f222c17478fb114ecaf51e861347c79cb7813e276551b6f859323a82ab647ad8
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:52e108a8669fed0b2412fe70b3d74e1fb045fa29dbdcfc8bdea1ed862bcde354
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09c8e018d20cd6d9a718036ab145aeb574c16648c33b4debe4b130f9c0651622
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:92e8f1adbd80f39ab0112d83af61b731937afc180931e7ce4fe124b11969a609
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe940586a81b5a518b2ed6f22a0bf0191b6f987cf05456fafe7ff74873ed1c6d
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f23e2214bcafb439ebc7528dcc283ef6218d509a276c0baff0743503ecbe3d92
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49d60a69e2379be2053e816cbaff31e6c931b5922dd86c71c9eaf473299cbf62
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.20408163265306123,
5
  "eval_steps": 9,
6
- "global_step": 75,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -604,6 +604,205 @@
604
  "learning_rate": 1.7860619515673033e-05,
605
  "loss": 0.7707,
606
  "step": 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
607
  }
608
  ],
609
  "logging_steps": 1,
@@ -618,12 +817,12 @@
618
  "should_evaluate": false,
619
  "should_log": false,
620
  "should_save": true,
621
- "should_training_stop": false
622
  },
623
  "attributes": {}
624
  }
625
  },
626
- "total_flos": 2.5782671256426906e+17,
627
  "train_batch_size": 8,
628
  "trial_name": null,
629
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.272108843537415,
5
  "eval_steps": 9,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
604
  "learning_rate": 1.7860619515673033e-05,
605
  "loss": 0.7707,
606
  "step": 75
607
+ },
608
+ {
609
+ "epoch": 0.20680272108843537,
610
+ "grad_norm": 0.1923210769891739,
611
+ "learning_rate": 1.6543469682057106e-05,
612
+ "loss": 0.7343,
613
+ "step": 76
614
+ },
615
+ {
616
+ "epoch": 0.20952380952380953,
617
+ "grad_norm": 0.15001152455806732,
618
+ "learning_rate": 1.526708147705013e-05,
619
+ "loss": 0.6671,
620
+ "step": 77
621
+ },
622
+ {
623
+ "epoch": 0.21224489795918366,
624
+ "grad_norm": 0.18915919959545135,
625
+ "learning_rate": 1.4033009983067452e-05,
626
+ "loss": 0.7056,
627
+ "step": 78
628
+ },
629
+ {
630
+ "epoch": 0.21496598639455783,
631
+ "grad_norm": 0.19046439230442047,
632
+ "learning_rate": 1.2842758726130283e-05,
633
+ "loss": 0.83,
634
+ "step": 79
635
+ },
636
+ {
637
+ "epoch": 0.21768707482993196,
638
+ "grad_norm": 0.17637431621551514,
639
+ "learning_rate": 1.1697777844051105e-05,
640
+ "loss": 0.7493,
641
+ "step": 80
642
+ },
643
+ {
644
+ "epoch": 0.22040816326530613,
645
+ "grad_norm": 0.153495192527771,
646
+ "learning_rate": 1.0599462319663905e-05,
647
+ "loss": 0.7012,
648
+ "step": 81
649
+ },
650
+ {
651
+ "epoch": 0.22040816326530613,
652
+ "eval_loss": 0.7618221640586853,
653
+ "eval_runtime": 32.195,
654
+ "eval_samples_per_second": 38.422,
655
+ "eval_steps_per_second": 1.211,
656
+ "step": 81
657
+ },
658
+ {
659
+ "epoch": 0.22312925170068026,
660
+ "grad_norm": 0.16720904409885406,
661
+ "learning_rate": 9.549150281252633e-06,
662
+ "loss": 0.7049,
663
+ "step": 82
664
+ },
665
+ {
666
+ "epoch": 0.22585034013605443,
667
+ "grad_norm": 0.16498345136642456,
668
+ "learning_rate": 8.548121372247918e-06,
669
+ "loss": 0.6583,
670
+ "step": 83
671
+ },
672
+ {
673
+ "epoch": 0.22857142857142856,
674
+ "grad_norm": 0.17059318721294403,
675
+ "learning_rate": 7.597595192178702e-06,
676
+ "loss": 0.812,
677
+ "step": 84
678
+ },
679
+ {
680
+ "epoch": 0.23129251700680273,
681
+ "grad_norm": 0.14581593871116638,
682
+ "learning_rate": 6.698729810778065e-06,
683
+ "loss": 0.7116,
684
+ "step": 85
685
+ },
686
+ {
687
+ "epoch": 0.23401360544217686,
688
+ "grad_norm": 0.18653877079486847,
689
+ "learning_rate": 5.852620357053651e-06,
690
+ "loss": 0.9057,
691
+ "step": 86
692
+ },
693
+ {
694
+ "epoch": 0.23673469387755103,
695
+ "grad_norm": 0.17196914553642273,
696
+ "learning_rate": 5.060297685041659e-06,
697
+ "loss": 0.7439,
698
+ "step": 87
699
+ },
700
+ {
701
+ "epoch": 0.23945578231292516,
702
+ "grad_norm": 0.17426681518554688,
703
+ "learning_rate": 4.322727117869951e-06,
704
+ "loss": 0.8396,
705
+ "step": 88
706
+ },
707
+ {
708
+ "epoch": 0.24217687074829933,
709
+ "grad_norm": 0.18171295523643494,
710
+ "learning_rate": 3.6408072716606346e-06,
711
+ "loss": 0.7002,
712
+ "step": 89
713
+ },
714
+ {
715
+ "epoch": 0.24489795918367346,
716
+ "grad_norm": 0.17335541546344757,
717
+ "learning_rate": 3.0153689607045845e-06,
718
+ "loss": 0.7719,
719
+ "step": 90
720
+ },
721
+ {
722
+ "epoch": 0.24489795918367346,
723
+ "eval_loss": 0.7605471014976501,
724
+ "eval_runtime": 32.1868,
725
+ "eval_samples_per_second": 38.432,
726
+ "eval_steps_per_second": 1.212,
727
+ "step": 90
728
+ },
729
+ {
730
+ "epoch": 0.24761904761904763,
731
+ "grad_norm": 0.1640811711549759,
732
+ "learning_rate": 2.4471741852423237e-06,
733
+ "loss": 0.7349,
734
+ "step": 91
735
+ },
736
+ {
737
+ "epoch": 0.2503401360544218,
738
+ "grad_norm": 0.20482207834720612,
739
+ "learning_rate": 1.9369152030840556e-06,
740
+ "loss": 0.7466,
741
+ "step": 92
742
+ },
743
+ {
744
+ "epoch": 0.2530612244897959,
745
+ "grad_norm": 0.16231241822242737,
746
+ "learning_rate": 1.4852136862001764e-06,
747
+ "loss": 0.5886,
748
+ "step": 93
749
+ },
750
+ {
751
+ "epoch": 0.25578231292517006,
752
+ "grad_norm": 0.16183051466941833,
753
+ "learning_rate": 1.0926199633097157e-06,
754
+ "loss": 0.7642,
755
+ "step": 94
756
+ },
757
+ {
758
+ "epoch": 0.2585034013605442,
759
+ "grad_norm": 0.1630152016878128,
760
+ "learning_rate": 7.596123493895991e-07,
761
+ "loss": 0.6387,
762
+ "step": 95
763
+ },
764
+ {
765
+ "epoch": 0.2612244897959184,
766
+ "grad_norm": 0.1637643575668335,
767
+ "learning_rate": 4.865965629214819e-07,
768
+ "loss": 0.6792,
769
+ "step": 96
770
+ },
771
+ {
772
+ "epoch": 0.2639455782312925,
773
+ "grad_norm": 0.1643003672361374,
774
+ "learning_rate": 2.7390523158633554e-07,
775
+ "loss": 0.7333,
776
+ "step": 97
777
+ },
778
+ {
779
+ "epoch": 0.26666666666666666,
780
+ "grad_norm": 0.1853807270526886,
781
+ "learning_rate": 1.2179748700879012e-07,
782
+ "loss": 0.7241,
783
+ "step": 98
784
+ },
785
+ {
786
+ "epoch": 0.2693877551020408,
787
+ "grad_norm": 0.16440825164318085,
788
+ "learning_rate": 3.04586490452119e-08,
789
+ "loss": 0.6817,
790
+ "step": 99
791
+ },
792
+ {
793
+ "epoch": 0.2693877551020408,
794
+ "eval_loss": 0.7603068947792053,
795
+ "eval_runtime": 32.195,
796
+ "eval_samples_per_second": 38.422,
797
+ "eval_steps_per_second": 1.211,
798
+ "step": 99
799
+ },
800
+ {
801
+ "epoch": 0.272108843537415,
802
+ "grad_norm": 0.19654084742069244,
803
+ "learning_rate": 0.0,
804
+ "loss": 0.8268,
805
+ "step": 100
806
  }
807
  ],
808
  "logging_steps": 1,
 
817
  "should_evaluate": false,
818
  "should_log": false,
819
  "should_save": true,
820
+ "should_training_stop": true
821
  },
822
  "attributes": {}
823
  }
824
  },
825
+ "total_flos": 3.4409949730072166e+17,
826
  "train_batch_size": 8,
827
  "trial_name": null,
828
  "trial_params": null