ToastyPigeon commited on
Commit
58f2da2
·
verified ·
1 Parent(s): 85a7f26

Training in progress, step 156, checkpoint

Browse files
Files changed (28) hide show
  1. last-checkpoint/adapter_model.safetensors +1 -1
  2. last-checkpoint/global_step156/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  3. last-checkpoint/global_step156/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
  4. last-checkpoint/global_step156/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
  5. last-checkpoint/global_step156/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
  6. last-checkpoint/global_step156/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt +3 -0
  7. last-checkpoint/global_step156/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt +3 -0
  8. last-checkpoint/global_step156/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt +3 -0
  9. last-checkpoint/global_step156/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt +3 -0
  10. last-checkpoint/global_step156/zero_pp_rank_0_mp_rank_00_model_states.pt +3 -0
  11. last-checkpoint/global_step156/zero_pp_rank_1_mp_rank_00_model_states.pt +3 -0
  12. last-checkpoint/global_step156/zero_pp_rank_2_mp_rank_00_model_states.pt +3 -0
  13. last-checkpoint/global_step156/zero_pp_rank_3_mp_rank_00_model_states.pt +3 -0
  14. last-checkpoint/global_step156/zero_pp_rank_4_mp_rank_00_model_states.pt +3 -0
  15. last-checkpoint/global_step156/zero_pp_rank_5_mp_rank_00_model_states.pt +3 -0
  16. last-checkpoint/global_step156/zero_pp_rank_6_mp_rank_00_model_states.pt +3 -0
  17. last-checkpoint/global_step156/zero_pp_rank_7_mp_rank_00_model_states.pt +3 -0
  18. last-checkpoint/latest +1 -1
  19. last-checkpoint/rng_state_0.pth +1 -1
  20. last-checkpoint/rng_state_1.pth +1 -1
  21. last-checkpoint/rng_state_2.pth +1 -1
  22. last-checkpoint/rng_state_3.pth +1 -1
  23. last-checkpoint/rng_state_4.pth +1 -1
  24. last-checkpoint/rng_state_5.pth +1 -1
  25. last-checkpoint/rng_state_6.pth +1 -1
  26. last-checkpoint/rng_state_7.pth +1 -1
  27. last-checkpoint/scheduler.pt +1 -1
  28. last-checkpoint/trainer_state.json +284 -3
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f881f7af17e8e692fe8e56b0bb3efdf3d1b82897cd46e96e09ffe05b97b0277
3
  size 550593856
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:075fad7910ad5434f099ab9697f7f34d98149f8051c6b928bc1fcda4d81a34cf
3
  size 550593856
last-checkpoint/global_step156/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:548a32937f39dcb1efd25459a1c324dc13150384367f773d2dd8448c92667292
3
+ size 243590464
last-checkpoint/global_step156/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:933a00a04cf11d99eeabc52feb9b90fe01dd428bf5e1c284aea7a2d9d4b5d75c
3
+ size 243590464
last-checkpoint/global_step156/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de149628d16885054f37021368e6dc7250b24a86d676fca4c7b73c052bc2a4ee
3
+ size 243590464
last-checkpoint/global_step156/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16c2a1903e9427ca8cfc0c202e8aa9f0e5165e722912c3d6957fd3379ca85ba8
3
+ size 243590464
last-checkpoint/global_step156/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3da911f1de45691c9ecb869622806d3dde56d75e3d47818f3033435c1bd25e28
3
+ size 243590464
last-checkpoint/global_step156/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff36ec5a25eee06afd325568cd992f9a94c2c521b70320d9e272376429ac2663
3
+ size 243590464
last-checkpoint/global_step156/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7c9560be272f0a903f912fef8dfcbbd8ffb32a2467848964432c04fd6982526
3
+ size 243590464
last-checkpoint/global_step156/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ef76f92739b8fdf88dd6d6d990885b8e2c7713385d5a8b4f4be3b03f98a6178
3
+ size 243590464
last-checkpoint/global_step156/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:826faad08e897171bea38c0247ede4f75e080d152d7b926bcc2aae1f38dc241d
3
+ size 211435686
last-checkpoint/global_step156/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf376209455a315841c1ccc3f778555f40052195e31400dd3a4308d98bfdb581
3
+ size 211435686
last-checkpoint/global_step156/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c34502a6ce26d4b43c5bdf522735779070cde32a1c44a278e866c690ba32dc24
3
+ size 211435686
last-checkpoint/global_step156/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f178c64dcb2c4a64925325012066e8e57e05dad95b5c1a9c5c4b3a3de678c7c3
3
+ size 211435686
last-checkpoint/global_step156/zero_pp_rank_4_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e898235613f32df36167407e9fcdd27b2e954facaa00f5b5374a5ef019d0126
3
+ size 211435686
last-checkpoint/global_step156/zero_pp_rank_5_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:605de77a7d38dc8ad8ad07b662756b8d65a0c49c8d2071b9519b8375c19700f6
3
+ size 211435686
last-checkpoint/global_step156/zero_pp_rank_6_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45bdcb91d93dc78080afd1d40dacac300abdc5046169b948fd228f2d50d8668b
3
+ size 211435686
last-checkpoint/global_step156/zero_pp_rank_7_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01a71440de4b435aa80d6384b5767ae57296096e58d5939720ff88e2dbf3e97f
3
+ size 211435686
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step117
 
1
+ global_step156
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aac43c4e4aa943b1080df7e7983872659b6a36d1bde144f8865b626eeee7f434
3
  size 15920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71b89abcc0ce5c407fb9d4a92dbfe3b45e6c73df2570fd239cc5b08cb8a1fa73
3
  size 15920
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f7c947232f2bdf46ca9ace1c9ebaf1e33912f79599e506f7f7c8ff48a11c7b15
3
  size 15920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fed8f9051879059e4d4a6c0268a1c07bd3d17a63ff9695dbecba7f759b48265
3
  size 15920
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8cfa2ab32c5e9f8e4c49775769b133f4238f016c4e48e8b12b51160d4afbf783
3
  size 15920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5cbe3d657365bc422108101740b07139f39ec40e3d2fa73b70f4ab763f46ca0
3
  size 15920
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c73c8b459dde07f85f2cffa5de5fb0bf136e4a80afc50d2475bc570d14a65b2e
3
  size 15920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dafe72f7606d70daf9651f02eb33505031fd74c64c42caa3b416885081a3a279
3
  size 15920
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d9533672e6284f7cd4c7c4acb5ad621a319e75dbe8a3e90685943824a71eea36
3
  size 15920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c62d508b5b72fe3ae01dd0978107829aac79e266e3887fdb5b71e9ab552a058
3
  size 15920
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a9be2d2ec0fbf037c14fb0f5f0467f7c960e90414356feec416f8eeb1684671
3
  size 15920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2af92289a5626dcb7fa831a68cf4de5926abfbbf164dcbd6c3968dc0685c46d8
3
  size 15920
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c6a4539a42d09819ea8a238c4dc3e947e079d2a1390a7babff9377b3718e2a09
3
  size 15920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d97e7ac49adbd28762ed99d933cc4505e066155bf934c98c26916b8462bde41
3
  size 15920
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e097a64585a03993feb0a88a81b38258f3325c6b6c17bdedb7335173ed053ac4
3
  size 15920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ca057b2b9856c6b9c98bc92f3b0e1d16858df77c847fd5298f787624eabf383
3
  size 15920
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:924db454eec3725afade5f0aae6c3ada154b140ee15da5b50e9bc51987b43065
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa19ded294b26c77ff9357b30aeec2441cdd5bbf5a173650ff5ac1680ce0a913
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.3046875,
5
  "eval_steps": 39,
6
- "global_step": 117,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -858,6 +858,287 @@
858
  "eval_samples_per_second": 1.223,
859
  "eval_steps_per_second": 0.153,
860
  "step": 117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
861
  }
862
  ],
863
  "logging_steps": 1,
@@ -877,7 +1158,7 @@
877
  "attributes": {}
878
  }
879
  },
880
- "total_flos": 38677656895488.0,
881
  "train_batch_size": 1,
882
  "trial_name": null,
883
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.40625,
5
  "eval_steps": 39,
6
+ "global_step": 156,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
858
  "eval_samples_per_second": 1.223,
859
  "eval_steps_per_second": 0.153,
860
  "step": 117
861
+ },
862
+ {
863
+ "epoch": 0.3072916666666667,
864
+ "grad_norm": 0.14857253164474957,
865
+ "learning_rate": 8.484051962083579e-05,
866
+ "loss": 2.3431,
867
+ "step": 118
868
+ },
869
+ {
870
+ "epoch": 0.3098958333333333,
871
+ "grad_norm": 0.14191988741229736,
872
+ "learning_rate": 8.454870245003141e-05,
873
+ "loss": 2.3861,
874
+ "step": 119
875
+ },
876
+ {
877
+ "epoch": 0.3125,
878
+ "grad_norm": 0.1516024307266096,
879
+ "learning_rate": 8.425468421509349e-05,
880
+ "loss": 2.411,
881
+ "step": 120
882
+ },
883
+ {
884
+ "epoch": 0.3151041666666667,
885
+ "grad_norm": 0.15002982823803926,
886
+ "learning_rate": 8.395848681725416e-05,
887
+ "loss": 2.49,
888
+ "step": 121
889
+ },
890
+ {
891
+ "epoch": 0.3177083333333333,
892
+ "grad_norm": 0.14904411125984457,
893
+ "learning_rate": 8.366013232007002e-05,
894
+ "loss": 2.2998,
895
+ "step": 122
896
+ },
897
+ {
898
+ "epoch": 0.3203125,
899
+ "grad_norm": 0.15513959182528284,
900
+ "learning_rate": 8.335964294777862e-05,
901
+ "loss": 2.4748,
902
+ "step": 123
903
+ },
904
+ {
905
+ "epoch": 0.3229166666666667,
906
+ "grad_norm": 0.16028597088352084,
907
+ "learning_rate": 8.305704108364301e-05,
908
+ "loss": 2.5415,
909
+ "step": 124
910
+ },
911
+ {
912
+ "epoch": 0.3255208333333333,
913
+ "grad_norm": 0.1382845005964039,
914
+ "learning_rate": 8.275234926828446e-05,
915
+ "loss": 2.3863,
916
+ "step": 125
917
+ },
918
+ {
919
+ "epoch": 0.328125,
920
+ "grad_norm": 0.13146327268638525,
921
+ "learning_rate": 8.244559019800328e-05,
922
+ "loss": 2.2718,
923
+ "step": 126
924
+ },
925
+ {
926
+ "epoch": 0.3307291666666667,
927
+ "grad_norm": 0.14918540441469405,
928
+ "learning_rate": 8.213678672308841e-05,
929
+ "loss": 2.29,
930
+ "step": 127
931
+ },
932
+ {
933
+ "epoch": 0.3333333333333333,
934
+ "grad_norm": 0.14572964949235084,
935
+ "learning_rate": 8.182596184611514e-05,
936
+ "loss": 2.3865,
937
+ "step": 128
938
+ },
939
+ {
940
+ "epoch": 0.3359375,
941
+ "grad_norm": 0.1467709610422986,
942
+ "learning_rate": 8.151313872023172e-05,
943
+ "loss": 2.3566,
944
+ "step": 129
945
+ },
946
+ {
947
+ "epoch": 0.3385416666666667,
948
+ "grad_norm": 0.1696428030171741,
949
+ "learning_rate": 8.119834064743469e-05,
950
+ "loss": 2.4145,
951
+ "step": 130
952
+ },
953
+ {
954
+ "epoch": 0.3411458333333333,
955
+ "grad_norm": 0.1567212274267596,
956
+ "learning_rate": 8.088159107683314e-05,
957
+ "loss": 2.3996,
958
+ "step": 131
959
+ },
960
+ {
961
+ "epoch": 0.34375,
962
+ "grad_norm": 0.16059171688564705,
963
+ "learning_rate": 8.056291360290201e-05,
964
+ "loss": 2.4796,
965
+ "step": 132
966
+ },
967
+ {
968
+ "epoch": 0.3463541666666667,
969
+ "grad_norm": 0.17528470844982455,
970
+ "learning_rate": 8.024233196372453e-05,
971
+ "loss": 2.3711,
972
+ "step": 133
973
+ },
974
+ {
975
+ "epoch": 0.3489583333333333,
976
+ "grad_norm": 0.15733889602776618,
977
+ "learning_rate": 7.9919870039224e-05,
978
+ "loss": 2.2677,
979
+ "step": 134
980
+ },
981
+ {
982
+ "epoch": 0.3515625,
983
+ "grad_norm": 0.14873870088112393,
984
+ "learning_rate": 7.959555184938495e-05,
985
+ "loss": 2.4515,
986
+ "step": 135
987
+ },
988
+ {
989
+ "epoch": 0.3541666666666667,
990
+ "grad_norm": 0.15417965735327782,
991
+ "learning_rate": 7.926940155246397e-05,
992
+ "loss": 2.4285,
993
+ "step": 136
994
+ },
995
+ {
996
+ "epoch": 0.3567708333333333,
997
+ "grad_norm": 0.1431617281765595,
998
+ "learning_rate": 7.894144344319014e-05,
999
+ "loss": 2.2096,
1000
+ "step": 137
1001
+ },
1002
+ {
1003
+ "epoch": 0.359375,
1004
+ "grad_norm": 0.1582812354896834,
1005
+ "learning_rate": 7.861170195095537e-05,
1006
+ "loss": 2.5397,
1007
+ "step": 138
1008
+ },
1009
+ {
1010
+ "epoch": 0.3619791666666667,
1011
+ "grad_norm": 0.17372786439002758,
1012
+ "learning_rate": 7.828020163799455e-05,
1013
+ "loss": 2.4293,
1014
+ "step": 139
1015
+ },
1016
+ {
1017
+ "epoch": 0.3645833333333333,
1018
+ "grad_norm": 0.12985922414383083,
1019
+ "learning_rate": 7.794696719755612e-05,
1020
+ "loss": 2.3064,
1021
+ "step": 140
1022
+ },
1023
+ {
1024
+ "epoch": 0.3671875,
1025
+ "grad_norm": 0.14444453543114258,
1026
+ "learning_rate": 7.761202345206249e-05,
1027
+ "loss": 2.4924,
1028
+ "step": 141
1029
+ },
1030
+ {
1031
+ "epoch": 0.3697916666666667,
1032
+ "grad_norm": 0.14361378320872797,
1033
+ "learning_rate": 7.727539535126118e-05,
1034
+ "loss": 2.485,
1035
+ "step": 142
1036
+ },
1037
+ {
1038
+ "epoch": 0.3723958333333333,
1039
+ "grad_norm": 0.14506765397834698,
1040
+ "learning_rate": 7.69371079703662e-05,
1041
+ "loss": 2.4081,
1042
+ "step": 143
1043
+ },
1044
+ {
1045
+ "epoch": 0.375,
1046
+ "grad_norm": 0.15082242594669004,
1047
+ "learning_rate": 7.65971865081904e-05,
1048
+ "loss": 2.4042,
1049
+ "step": 144
1050
+ },
1051
+ {
1052
+ "epoch": 0.3776041666666667,
1053
+ "grad_norm": 0.14811116555183387,
1054
+ "learning_rate": 7.625565628526818e-05,
1055
+ "loss": 2.5335,
1056
+ "step": 145
1057
+ },
1058
+ {
1059
+ "epoch": 0.3802083333333333,
1060
+ "grad_norm": 0.14726265807121236,
1061
+ "learning_rate": 7.591254274196959e-05,
1062
+ "loss": 2.2424,
1063
+ "step": 146
1064
+ },
1065
+ {
1066
+ "epoch": 0.3828125,
1067
+ "grad_norm": 0.1582839530592079,
1068
+ "learning_rate": 7.556787143660521e-05,
1069
+ "loss": 2.3499,
1070
+ "step": 147
1071
+ },
1072
+ {
1073
+ "epoch": 0.3854166666666667,
1074
+ "grad_norm": 0.15670436913091046,
1075
+ "learning_rate": 7.522166804352226e-05,
1076
+ "loss": 2.4295,
1077
+ "step": 148
1078
+ },
1079
+ {
1080
+ "epoch": 0.3880208333333333,
1081
+ "grad_norm": 0.14730612216140554,
1082
+ "learning_rate": 7.487395835119231e-05,
1083
+ "loss": 2.3101,
1084
+ "step": 149
1085
+ },
1086
+ {
1087
+ "epoch": 0.390625,
1088
+ "grad_norm": 0.15120580103738476,
1089
+ "learning_rate": 7.452476826029011e-05,
1090
+ "loss": 2.4888,
1091
+ "step": 150
1092
+ },
1093
+ {
1094
+ "epoch": 0.3932291666666667,
1095
+ "grad_norm": 0.15081791485599214,
1096
+ "learning_rate": 7.417412378176446e-05,
1097
+ "loss": 2.3946,
1098
+ "step": 151
1099
+ },
1100
+ {
1101
+ "epoch": 0.3958333333333333,
1102
+ "grad_norm": 0.13244742107918075,
1103
+ "learning_rate": 7.382205103490043e-05,
1104
+ "loss": 2.2704,
1105
+ "step": 152
1106
+ },
1107
+ {
1108
+ "epoch": 0.3984375,
1109
+ "grad_norm": 0.14571632653572605,
1110
+ "learning_rate": 7.346857624537407e-05,
1111
+ "loss": 2.4644,
1112
+ "step": 153
1113
+ },
1114
+ {
1115
+ "epoch": 0.4010416666666667,
1116
+ "grad_norm": 0.15252862923031207,
1117
+ "learning_rate": 7.311372574329854e-05,
1118
+ "loss": 2.554,
1119
+ "step": 154
1120
+ },
1121
+ {
1122
+ "epoch": 0.4036458333333333,
1123
+ "grad_norm": 0.14927980854491554,
1124
+ "learning_rate": 7.275752596126308e-05,
1125
+ "loss": 2.3804,
1126
+ "step": 155
1127
+ },
1128
+ {
1129
+ "epoch": 0.40625,
1130
+ "grad_norm": 0.1406590744757482,
1131
+ "learning_rate": 7.240000343236385e-05,
1132
+ "loss": 2.2504,
1133
+ "step": 156
1134
+ },
1135
+ {
1136
+ "epoch": 0.40625,
1137
+ "eval_loss": 2.408146619796753,
1138
+ "eval_runtime": 65.4856,
1139
+ "eval_samples_per_second": 1.222,
1140
+ "eval_steps_per_second": 0.153,
1141
+ "step": 156
1142
  }
1143
  ],
1144
  "logging_steps": 1,
 
1158
  "attributes": {}
1159
  }
1160
  },
1161
+ "total_flos": 51570209193984.0,
1162
  "train_batch_size": 1,
1163
  "trial_name": null,
1164
  "trial_params": null