AmberYifan commited on
Commit
2a4adcc
·
verified ·
1 Parent(s): ffa30fe

Training in progress, epoch 2, checkpoint

Browse files
last-checkpoint/global_step626/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a348a22c4e7292a63c81cfc0fcc1b8ac3dae85423a483a0702496957d13b6b95
3
+ size 13476836524
last-checkpoint/global_step626/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d1fa4db8c10c30bd860a7eaef1f71d3c5747deb313934742d2744935d062557
3
+ size 13476836524
last-checkpoint/global_step626/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b51bda3e9193367ef28889dd6408835151474efb556046c8f933c6ca91a5431
3
+ size 13476836524
last-checkpoint/global_step626/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6beeba792c0b122547d06c33b304f468ea691c48a6de2d30ffd2663bad9f14c4
3
+ size 13476836524
last-checkpoint/global_step626/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f04249e1b8fe1c2cdc217cf32f7eea80a19cb28668041a7568bda12381df332
3
+ size 150693
last-checkpoint/global_step626/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a8e7204e992a56a415d70b4b33a5427de747e33a8410a8d2e57b2d3760f41a3
3
+ size 150693
last-checkpoint/global_step626/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b82bc0de7fa407ebf2e63d8203a63e23579c6341814abb2367c0d8f0fb95b0c2
3
+ size 150693
last-checkpoint/global_step626/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f568cb4b0ff22d0f18efb7a633cdedc227f3a80be6b0c1fcc6c09a4985216c2c
3
+ size 150693
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step313
 
1
+ global_step626
last-checkpoint/model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f5904e952701479e892c2d17a4d0b611f4ed302daf4b6a1aba2eaf266f860079
3
  size 4938985352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bba1df728f0a13c32473530dfdf8f2b9e407ef34cc781c3f369d508860f0ac6
3
  size 4938985352
last-checkpoint/model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8a2daac32f9ba6f35b798552797ed4b27987e5b51730c584e2939993e47216dd
3
  size 4947390880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11b000d5ce7e244db5581aa54cbceba6b8330bae9c6128c79cb7cc8b33fe3425
3
  size 4947390880
last-checkpoint/model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c2d3fdd1381913365ea0afbde72a0f74775bce063d67ec06dcf7ecb72d472be
3
  size 3590488816
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8848153f15166b9cad0472ba12cd8877ab2aef7f2e79e9af3a0a62a3742c4156
3
  size 3590488816
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1ce770862b76ceecb3453a5d0b9ef65311eab0f6a60f52997726a89d718de08
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2307c03867cef25b5028feb9a23f80e784b9af9a615de13ddca560a6a90fb593
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2bcae6573442578b752be3e988d4ccb38056b45c31b5c02eb579ec6cebcfa62e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50218cfaecdd818354e567b7167c13899e3b42297e7d8f58bd7e732cfa547800
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:398ff45072fee2975e88b9e078f915103d75d94bc08753303cf855b915973623
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9814a66b49861f5495b06dae3be12ddf7185b88e2cae1fb808ca9efd99d5807f
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f4b210bc832d4c648e3bee8c21dca26e5a8b365d6ec90c638062005a052e57b
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7be93040a65e0a29975f6c70b94418e1fdf88423a50c58aa572141d3c92fbfc
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b9aad3a748e15719e20120f0a760f22686338e176ee3f1c011b6dab2f3ab19b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7725b44c69725321786f1e58dccd7ea4d3ae5794ea47bd49c0d4a139aec266d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.0,
5
  "eval_steps": 500,
6
- "global_step": 313,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -503,6 +503,487 @@
503
  "eval_samples_per_second": 16.39,
504
  "eval_steps_per_second": 0.574,
505
  "step": 313
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
  ],
508
  "logging_steps": 10,
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 2.0,
5
  "eval_steps": 500,
6
+ "global_step": 626,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
503
  "eval_samples_per_second": 16.39,
504
  "eval_steps_per_second": 0.574,
505
  "step": 313
506
+ },
507
+ {
508
+ "epoch": 1.0223642172523961,
509
+ "grad_norm": 15.216502023078874,
510
+ "learning_rate": 3.662721893491124e-07,
511
+ "logits/chosen": -1.578125,
512
+ "logits/rejected": -1.59375,
513
+ "logps/chosen": -230.0,
514
+ "logps/rejected": -262.0,
515
+ "loss": 0.4383,
516
+ "rewards/accuracies": 0.8500000238418579,
517
+ "rewards/chosen": 0.66015625,
518
+ "rewards/margins": 1.0546875,
519
+ "rewards/rejected": -0.392578125,
520
+ "step": 320
521
+ },
522
+ {
523
+ "epoch": 1.0543130990415335,
524
+ "grad_norm": 16.390892234984523,
525
+ "learning_rate": 3.603550295857988e-07,
526
+ "logits/chosen": -1.4921875,
527
+ "logits/rejected": -1.59375,
528
+ "logps/chosen": -232.0,
529
+ "logps/rejected": -276.0,
530
+ "loss": 0.3885,
531
+ "rewards/accuracies": 0.9375,
532
+ "rewards/chosen": 0.6328125,
533
+ "rewards/margins": 1.2578125,
534
+ "rewards/rejected": -0.625,
535
+ "step": 330
536
+ },
537
+ {
538
+ "epoch": 1.0862619808306708,
539
+ "grad_norm": 14.29774575781621,
540
+ "learning_rate": 3.544378698224852e-07,
541
+ "logits/chosen": -1.5390625,
542
+ "logits/rejected": -1.5546875,
543
+ "logps/chosen": -228.0,
544
+ "logps/rejected": -268.0,
545
+ "loss": 0.3978,
546
+ "rewards/accuracies": 0.875,
547
+ "rewards/chosen": 0.63671875,
548
+ "rewards/margins": 1.2265625,
549
+ "rewards/rejected": -0.58984375,
550
+ "step": 340
551
+ },
552
+ {
553
+ "epoch": 1.1182108626198084,
554
+ "grad_norm": 16.80859709996563,
555
+ "learning_rate": 3.485207100591716e-07,
556
+ "logits/chosen": -1.5390625,
557
+ "logits/rejected": -1.5078125,
558
+ "logps/chosen": -236.0,
559
+ "logps/rejected": -268.0,
560
+ "loss": 0.3711,
561
+ "rewards/accuracies": 0.8374999761581421,
562
+ "rewards/chosen": 0.50390625,
563
+ "rewards/margins": 1.0859375,
564
+ "rewards/rejected": -0.58203125,
565
+ "step": 350
566
+ },
567
+ {
568
+ "epoch": 1.1501597444089458,
569
+ "grad_norm": 17.099448590902114,
570
+ "learning_rate": 3.42603550295858e-07,
571
+ "logits/chosen": -1.53125,
572
+ "logits/rejected": -1.5625,
573
+ "logps/chosen": -228.0,
574
+ "logps/rejected": -262.0,
575
+ "loss": 0.3884,
576
+ "rewards/accuracies": 0.800000011920929,
577
+ "rewards/chosen": 0.515625,
578
+ "rewards/margins": 1.0546875,
579
+ "rewards/rejected": -0.54296875,
580
+ "step": 360
581
+ },
582
+ {
583
+ "epoch": 1.182108626198083,
584
+ "grad_norm": 15.964788390337974,
585
+ "learning_rate": 3.366863905325444e-07,
586
+ "logits/chosen": -1.5625,
587
+ "logits/rejected": -1.5625,
588
+ "logps/chosen": -234.0,
589
+ "logps/rejected": -266.0,
590
+ "loss": 0.3964,
591
+ "rewards/accuracies": 0.824999988079071,
592
+ "rewards/chosen": 0.5546875,
593
+ "rewards/margins": 0.97265625,
594
+ "rewards/rejected": -0.416015625,
595
+ "step": 370
596
+ },
597
+ {
598
+ "epoch": 1.2140575079872205,
599
+ "grad_norm": 17.10451165088142,
600
+ "learning_rate": 3.3076923076923075e-07,
601
+ "logits/chosen": -1.5546875,
602
+ "logits/rejected": -1.5703125,
603
+ "logps/chosen": -232.0,
604
+ "logps/rejected": -260.0,
605
+ "loss": 0.3918,
606
+ "rewards/accuracies": 0.762499988079071,
607
+ "rewards/chosen": 0.51953125,
608
+ "rewards/margins": 1.015625,
609
+ "rewards/rejected": -0.494140625,
610
+ "step": 380
611
+ },
612
+ {
613
+ "epoch": 1.2460063897763578,
614
+ "grad_norm": 16.51862951602937,
615
+ "learning_rate": 3.2485207100591716e-07,
616
+ "logits/chosen": -1.546875,
617
+ "logits/rejected": -1.6328125,
618
+ "logps/chosen": -228.0,
619
+ "logps/rejected": -276.0,
620
+ "loss": 0.3695,
621
+ "rewards/accuracies": 0.8999999761581421,
622
+ "rewards/chosen": 0.8125,
623
+ "rewards/margins": 1.6953125,
624
+ "rewards/rejected": -0.88671875,
625
+ "step": 390
626
+ },
627
+ {
628
+ "epoch": 1.2779552715654952,
629
+ "grad_norm": 19.593966068797595,
630
+ "learning_rate": 3.189349112426035e-07,
631
+ "logits/chosen": -1.5859375,
632
+ "logits/rejected": -1.625,
633
+ "logps/chosen": -235.0,
634
+ "logps/rejected": -276.0,
635
+ "loss": 0.4045,
636
+ "rewards/accuracies": 0.800000011920929,
637
+ "rewards/chosen": 0.5234375,
638
+ "rewards/margins": 1.125,
639
+ "rewards/rejected": -0.59765625,
640
+ "step": 400
641
+ },
642
+ {
643
+ "epoch": 1.3099041533546325,
644
+ "grad_norm": 19.821206837758886,
645
+ "learning_rate": 3.130177514792899e-07,
646
+ "logits/chosen": -1.546875,
647
+ "logits/rejected": -1.6015625,
648
+ "logps/chosen": -239.0,
649
+ "logps/rejected": -266.0,
650
+ "loss": 0.4309,
651
+ "rewards/accuracies": 0.8500000238418579,
652
+ "rewards/chosen": 0.427734375,
653
+ "rewards/margins": 1.1640625,
654
+ "rewards/rejected": -0.734375,
655
+ "step": 410
656
+ },
657
+ {
658
+ "epoch": 1.34185303514377,
659
+ "grad_norm": 17.457629857186152,
660
+ "learning_rate": 3.071005917159763e-07,
661
+ "logits/chosen": -1.625,
662
+ "logits/rejected": -1.609375,
663
+ "logps/chosen": -243.0,
664
+ "logps/rejected": -270.0,
665
+ "loss": 0.3913,
666
+ "rewards/accuracies": 0.8125,
667
+ "rewards/chosen": 0.4609375,
668
+ "rewards/margins": 1.1796875,
669
+ "rewards/rejected": -0.72265625,
670
+ "step": 420
671
+ },
672
+ {
673
+ "epoch": 1.3738019169329074,
674
+ "grad_norm": 16.963757931748397,
675
+ "learning_rate": 3.011834319526627e-07,
676
+ "logits/chosen": -1.578125,
677
+ "logits/rejected": -1.609375,
678
+ "logps/chosen": -235.0,
679
+ "logps/rejected": -270.0,
680
+ "loss": 0.3808,
681
+ "rewards/accuracies": 0.8374999761581421,
682
+ "rewards/chosen": 0.41796875,
683
+ "rewards/margins": 1.1953125,
684
+ "rewards/rejected": -0.78125,
685
+ "step": 430
686
+ },
687
+ {
688
+ "epoch": 1.4057507987220448,
689
+ "grad_norm": 16.933512859408392,
690
+ "learning_rate": 2.952662721893491e-07,
691
+ "logits/chosen": -1.546875,
692
+ "logits/rejected": -1.578125,
693
+ "logps/chosen": -238.0,
694
+ "logps/rejected": -262.0,
695
+ "loss": 0.4261,
696
+ "rewards/accuracies": 0.8125,
697
+ "rewards/chosen": 0.40234375,
698
+ "rewards/margins": 1.0,
699
+ "rewards/rejected": -0.6015625,
700
+ "step": 440
701
+ },
702
+ {
703
+ "epoch": 1.4376996805111821,
704
+ "grad_norm": 17.582118451476234,
705
+ "learning_rate": 2.893491124260355e-07,
706
+ "logits/chosen": -1.625,
707
+ "logits/rejected": -1.6015625,
708
+ "logps/chosen": -235.0,
709
+ "logps/rejected": -268.0,
710
+ "loss": 0.3921,
711
+ "rewards/accuracies": 0.800000011920929,
712
+ "rewards/chosen": 0.435546875,
713
+ "rewards/margins": 1.0546875,
714
+ "rewards/rejected": -0.62109375,
715
+ "step": 450
716
+ },
717
+ {
718
+ "epoch": 1.4696485623003195,
719
+ "grad_norm": 14.599835012516651,
720
+ "learning_rate": 2.834319526627219e-07,
721
+ "logits/chosen": -1.5546875,
722
+ "logits/rejected": -1.5859375,
723
+ "logps/chosen": -234.0,
724
+ "logps/rejected": -266.0,
725
+ "loss": 0.357,
726
+ "rewards/accuracies": 0.8125,
727
+ "rewards/chosen": 0.353515625,
728
+ "rewards/margins": 1.1796875,
729
+ "rewards/rejected": -0.828125,
730
+ "step": 460
731
+ },
732
+ {
733
+ "epoch": 1.5015974440894568,
734
+ "grad_norm": 17.01992762798607,
735
+ "learning_rate": 2.7751479289940824e-07,
736
+ "logits/chosen": -1.5625,
737
+ "logits/rejected": -1.625,
738
+ "logps/chosen": -238.0,
739
+ "logps/rejected": -272.0,
740
+ "loss": 0.3465,
741
+ "rewards/accuracies": 0.875,
742
+ "rewards/chosen": 0.232421875,
743
+ "rewards/margins": 1.1640625,
744
+ "rewards/rejected": -0.93359375,
745
+ "step": 470
746
+ },
747
+ {
748
+ "epoch": 1.5335463258785942,
749
+ "grad_norm": 16.926918987794753,
750
+ "learning_rate": 2.715976331360947e-07,
751
+ "logits/chosen": -1.625,
752
+ "logits/rejected": -1.578125,
753
+ "logps/chosen": -245.0,
754
+ "logps/rejected": -266.0,
755
+ "loss": 0.3938,
756
+ "rewards/accuracies": 0.762499988079071,
757
+ "rewards/chosen": 0.2197265625,
758
+ "rewards/margins": 1.0078125,
759
+ "rewards/rejected": -0.78515625,
760
+ "step": 480
761
+ },
762
+ {
763
+ "epoch": 1.5654952076677318,
764
+ "grad_norm": 19.019339456655477,
765
+ "learning_rate": 2.6568047337278106e-07,
766
+ "logits/chosen": -1.6328125,
767
+ "logits/rejected": -1.625,
768
+ "logps/chosen": -232.0,
769
+ "logps/rejected": -272.0,
770
+ "loss": 0.3621,
771
+ "rewards/accuracies": 0.887499988079071,
772
+ "rewards/chosen": 0.369140625,
773
+ "rewards/margins": 1.3125,
774
+ "rewards/rejected": -0.9453125,
775
+ "step": 490
776
+ },
777
+ {
778
+ "epoch": 1.5974440894568689,
779
+ "grad_norm": 19.071902381545915,
780
+ "learning_rate": 2.5976331360946746e-07,
781
+ "logits/chosen": -1.5859375,
782
+ "logits/rejected": -1.6015625,
783
+ "logps/chosen": -231.0,
784
+ "logps/rejected": -266.0,
785
+ "loss": 0.4204,
786
+ "rewards/accuracies": 0.8125,
787
+ "rewards/chosen": 0.314453125,
788
+ "rewards/margins": 1.1796875,
789
+ "rewards/rejected": -0.8671875,
790
+ "step": 500
791
+ },
792
+ {
793
+ "epoch": 1.6293929712460065,
794
+ "grad_norm": 15.876124711628384,
795
+ "learning_rate": 2.538461538461538e-07,
796
+ "logits/chosen": -1.6171875,
797
+ "logits/rejected": -1.609375,
798
+ "logps/chosen": -236.0,
799
+ "logps/rejected": -262.0,
800
+ "loss": 0.3832,
801
+ "rewards/accuracies": 0.8125,
802
+ "rewards/chosen": 0.365234375,
803
+ "rewards/margins": 1.2109375,
804
+ "rewards/rejected": -0.84375,
805
+ "step": 510
806
+ },
807
+ {
808
+ "epoch": 1.6613418530351438,
809
+ "grad_norm": 14.900626138023746,
810
+ "learning_rate": 2.479289940828402e-07,
811
+ "logits/chosen": -1.578125,
812
+ "logits/rejected": -1.5625,
813
+ "logps/chosen": -232.0,
814
+ "logps/rejected": -268.0,
815
+ "loss": 0.3459,
816
+ "rewards/accuracies": 0.8500000238418579,
817
+ "rewards/chosen": 0.458984375,
818
+ "rewards/margins": 1.46875,
819
+ "rewards/rejected": -1.015625,
820
+ "step": 520
821
+ },
822
+ {
823
+ "epoch": 1.6932907348242812,
824
+ "grad_norm": 19.494063656117618,
825
+ "learning_rate": 2.420118343195266e-07,
826
+ "logits/chosen": -1.6015625,
827
+ "logits/rejected": -1.625,
828
+ "logps/chosen": -230.0,
829
+ "logps/rejected": -278.0,
830
+ "loss": 0.3752,
831
+ "rewards/accuracies": 0.7875000238418579,
832
+ "rewards/chosen": 0.3671875,
833
+ "rewards/margins": 1.328125,
834
+ "rewards/rejected": -0.96484375,
835
+ "step": 530
836
+ },
837
+ {
838
+ "epoch": 1.7252396166134185,
839
+ "grad_norm": 15.766044347009432,
840
+ "learning_rate": 2.36094674556213e-07,
841
+ "logits/chosen": -1.59375,
842
+ "logits/rejected": -1.6171875,
843
+ "logps/chosen": -231.0,
844
+ "logps/rejected": -270.0,
845
+ "loss": 0.3902,
846
+ "rewards/accuracies": 0.8500000238418579,
847
+ "rewards/chosen": 0.392578125,
848
+ "rewards/margins": 1.3359375,
849
+ "rewards/rejected": -0.9375,
850
+ "step": 540
851
+ },
852
+ {
853
+ "epoch": 1.7571884984025559,
854
+ "grad_norm": 17.890212932549957,
855
+ "learning_rate": 2.301775147928994e-07,
856
+ "logits/chosen": -1.59375,
857
+ "logits/rejected": -1.6328125,
858
+ "logps/chosen": -241.0,
859
+ "logps/rejected": -256.0,
860
+ "loss": 0.3833,
861
+ "rewards/accuracies": 0.800000011920929,
862
+ "rewards/chosen": 0.384765625,
863
+ "rewards/margins": 0.9921875,
864
+ "rewards/rejected": -0.609375,
865
+ "step": 550
866
+ },
867
+ {
868
+ "epoch": 1.7891373801916934,
869
+ "grad_norm": 17.977480288742196,
870
+ "learning_rate": 2.242603550295858e-07,
871
+ "logits/chosen": -1.578125,
872
+ "logits/rejected": -1.625,
873
+ "logps/chosen": -239.0,
874
+ "logps/rejected": -270.0,
875
+ "loss": 0.3717,
876
+ "rewards/accuracies": 0.8125,
877
+ "rewards/chosen": 0.259765625,
878
+ "rewards/margins": 1.078125,
879
+ "rewards/rejected": -0.8203125,
880
+ "step": 560
881
+ },
882
+ {
883
+ "epoch": 1.8210862619808306,
884
+ "grad_norm": 17.31063553493517,
885
+ "learning_rate": 2.1834319526627217e-07,
886
+ "logits/chosen": -1.6015625,
887
+ "logits/rejected": -1.59375,
888
+ "logps/chosen": -233.0,
889
+ "logps/rejected": -280.0,
890
+ "loss": 0.3695,
891
+ "rewards/accuracies": 0.9125000238418579,
892
+ "rewards/chosen": 0.283203125,
893
+ "rewards/margins": 1.28125,
894
+ "rewards/rejected": -0.99609375,
895
+ "step": 570
896
+ },
897
+ {
898
+ "epoch": 1.8530351437699681,
899
+ "grad_norm": 21.27064447874725,
900
+ "learning_rate": 2.1242603550295858e-07,
901
+ "logits/chosen": -1.578125,
902
+ "logits/rejected": -1.6015625,
903
+ "logps/chosen": -242.0,
904
+ "logps/rejected": -276.0,
905
+ "loss": 0.3861,
906
+ "rewards/accuracies": 0.7875000238418579,
907
+ "rewards/chosen": 0.234375,
908
+ "rewards/margins": 1.0546875,
909
+ "rewards/rejected": -0.8203125,
910
+ "step": 580
911
+ },
912
+ {
913
+ "epoch": 1.8849840255591053,
914
+ "grad_norm": 19.92306872555079,
915
+ "learning_rate": 2.0650887573964496e-07,
916
+ "logits/chosen": -1.5859375,
917
+ "logits/rejected": -1.5859375,
918
+ "logps/chosen": -235.0,
919
+ "logps/rejected": -264.0,
920
+ "loss": 0.3873,
921
+ "rewards/accuracies": 0.7875000238418579,
922
+ "rewards/chosen": 0.37890625,
923
+ "rewards/margins": 1.1015625,
924
+ "rewards/rejected": -0.72265625,
925
+ "step": 590
926
+ },
927
+ {
928
+ "epoch": 1.9169329073482428,
929
+ "grad_norm": 22.677693548477258,
930
+ "learning_rate": 2.0059171597633133e-07,
931
+ "logits/chosen": -1.6171875,
932
+ "logits/rejected": -1.6171875,
933
+ "logps/chosen": -240.0,
934
+ "logps/rejected": -258.0,
935
+ "loss": 0.4016,
936
+ "rewards/accuracies": 0.800000011920929,
937
+ "rewards/chosen": 0.2314453125,
938
+ "rewards/margins": 0.984375,
939
+ "rewards/rejected": -0.75390625,
940
+ "step": 600
941
+ },
942
+ {
943
+ "epoch": 1.9488817891373802,
944
+ "grad_norm": 21.51355955572424,
945
+ "learning_rate": 1.9467455621301774e-07,
946
+ "logits/chosen": -1.578125,
947
+ "logits/rejected": -1.671875,
948
+ "logps/chosen": -234.0,
949
+ "logps/rejected": -266.0,
950
+ "loss": 0.375,
951
+ "rewards/accuracies": 0.800000011920929,
952
+ "rewards/chosen": 0.25390625,
953
+ "rewards/margins": 1.0859375,
954
+ "rewards/rejected": -0.83203125,
955
+ "step": 610
956
+ },
957
+ {
958
+ "epoch": 1.9808306709265175,
959
+ "grad_norm": 14.74606499354998,
960
+ "learning_rate": 1.8875739644970412e-07,
961
+ "logits/chosen": -1.578125,
962
+ "logits/rejected": -1.59375,
963
+ "logps/chosen": -243.0,
964
+ "logps/rejected": -268.0,
965
+ "loss": 0.3809,
966
+ "rewards/accuracies": 0.8125,
967
+ "rewards/chosen": 0.03759765625,
968
+ "rewards/margins": 1.1328125,
969
+ "rewards/rejected": -1.09375,
970
+ "step": 620
971
+ },
972
+ {
973
+ "epoch": 2.0,
974
+ "eval_logits/chosen": -1.6171875,
975
+ "eval_logits/rejected": -1.59375,
976
+ "eval_logps/chosen": -244.0,
977
+ "eval_logps/rejected": -260.0,
978
+ "eval_loss": 0.5935156345367432,
979
+ "eval_rewards/accuracies": 0.6428571343421936,
980
+ "eval_rewards/chosen": -0.0556640625,
981
+ "eval_rewards/margins": 0.63671875,
982
+ "eval_rewards/rejected": -0.69140625,
983
+ "eval_runtime": 12.2896,
984
+ "eval_samples_per_second": 16.274,
985
+ "eval_steps_per_second": 0.57,
986
+ "step": 626
987
  }
988
  ],
989
  "logging_steps": 10,