joelniklaus commited on
Commit
74ce419
1 Parent(s): a408936

Training in progress, step 850000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:49fd76924702de3e659b86f97421c9807b70e5ff8cc52aaf1b212918e8bd502c
3
  size 885325017
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b773563900b2c81dad80f7a8f89256cd4cb7da819c85a112e1f43b4f4c78b53
3
  size 885325017
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:467769c5f591bfe9b33045522d84e361eefd928fd5cf859d5018a4e11e294466
3
  size 442675755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa8c588445c933ccf19ab09e882c29ac9e70c3ffd69d14173d2c426535bbfe5c
3
  size 442675755
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64c612be07a67a82095288dd6622b00b1f9b3d317970cbe202fcdcc7cfb804a4
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca454c0bb0e9ba35f7134e31d117445ec3697919c26bd9eb945514b70078e43b
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64c612be07a67a82095288dd6622b00b1f9b3d317970cbe202fcdcc7cfb804a4
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca454c0bb0e9ba35f7134e31d117445ec3697919c26bd9eb945514b70078e43b
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64c612be07a67a82095288dd6622b00b1f9b3d317970cbe202fcdcc7cfb804a4
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca454c0bb0e9ba35f7134e31d117445ec3697919c26bd9eb945514b70078e43b
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64c612be07a67a82095288dd6622b00b1f9b3d317970cbe202fcdcc7cfb804a4
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca454c0bb0e9ba35f7134e31d117445ec3697919c26bd9eb945514b70078e43b
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64c612be07a67a82095288dd6622b00b1f9b3d317970cbe202fcdcc7cfb804a4
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca454c0bb0e9ba35f7134e31d117445ec3697919c26bd9eb945514b70078e43b
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64c612be07a67a82095288dd6622b00b1f9b3d317970cbe202fcdcc7cfb804a4
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca454c0bb0e9ba35f7134e31d117445ec3697919c26bd9eb945514b70078e43b
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64c612be07a67a82095288dd6622b00b1f9b3d317970cbe202fcdcc7cfb804a4
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca454c0bb0e9ba35f7134e31d117445ec3697919c26bd9eb945514b70078e43b
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64c612be07a67a82095288dd6622b00b1f9b3d317970cbe202fcdcc7cfb804a4
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca454c0bb0e9ba35f7134e31d117445ec3697919c26bd9eb945514b70078e43b
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:50e51b9224ded3ddffee57f26ec45414409de0232579ddafb7f3e083076fa4c5
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:adedebe0cc7e07de957a9e2967d6e9c3934a9fdca3245f46a29d125e5e36192e
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.332836,
5
- "global_step": 800000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -4934,11 +4934,319 @@
4934
  "eval_samples_per_second": 283.443,
4935
  "eval_steps_per_second": 2.268,
4936
  "step": 800000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4937
  }
4938
  ],
4939
  "max_steps": 1000000,
4940
  "num_train_epochs": 9223372036854775807,
4941
- "total_flos": 1.3476327307358503e+19,
4942
  "trial_name": null,
4943
  "trial_params": null
4944
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.382836,
5
+ "global_step": 850000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
4934
  "eval_samples_per_second": 283.443,
4935
  "eval_steps_per_second": 2.268,
4936
  "step": 800000
4937
+ },
4938
+ {
4939
+ "epoch": 1.33,
4940
+ "learning_rate": 1.0441632244932237e-05,
4941
+ "loss": 0.7962,
4942
+ "step": 801000
4943
+ },
4944
+ {
4945
+ "epoch": 1.33,
4946
+ "learning_rate": 1.0340722563656107e-05,
4947
+ "loss": 0.7808,
4948
+ "step": 802000
4949
+ },
4950
+ {
4951
+ "epoch": 1.34,
4952
+ "learning_rate": 1.0240246589884044e-05,
4953
+ "loss": 0.6938,
4954
+ "step": 803000
4955
+ },
4956
+ {
4957
+ "epoch": 1.34,
4958
+ "learning_rate": 1.0140205422405214e-05,
4959
+ "loss": 0.7039,
4960
+ "step": 804000
4961
+ },
4962
+ {
4963
+ "epoch": 1.34,
4964
+ "learning_rate": 1.0040600155253765e-05,
4965
+ "loss": 0.6873,
4966
+ "step": 805000
4967
+ },
4968
+ {
4969
+ "epoch": 1.34,
4970
+ "learning_rate": 9.941431877696955e-06,
4971
+ "loss": 0.7068,
4972
+ "step": 806000
4973
+ },
4974
+ {
4975
+ "epoch": 1.34,
4976
+ "learning_rate": 9.842701674223187e-06,
4977
+ "loss": 0.7202,
4978
+ "step": 807000
4979
+ },
4980
+ {
4981
+ "epoch": 1.34,
4982
+ "learning_rate": 9.744410624530148e-06,
4983
+ "loss": 0.7426,
4984
+ "step": 808000
4985
+ },
4986
+ {
4987
+ "epoch": 1.34,
4988
+ "learning_rate": 9.646559803512994e-06,
4989
+ "loss": 0.8101,
4990
+ "step": 809000
4991
+ },
4992
+ {
4993
+ "epoch": 1.34,
4994
+ "learning_rate": 9.549150281252633e-06,
4995
+ "loss": 0.8947,
4996
+ "step": 810000
4997
+ },
4998
+ {
4999
+ "epoch": 1.34,
5000
+ "learning_rate": 9.452183123004e-06,
5001
+ "loss": 0.8125,
5002
+ "step": 811000
5003
+ },
5004
+ {
5005
+ "epoch": 1.34,
5006
+ "learning_rate": 9.355659389184396e-06,
5007
+ "loss": 0.761,
5008
+ "step": 812000
5009
+ },
5010
+ {
5011
+ "epoch": 1.35,
5012
+ "learning_rate": 9.259580135361929e-06,
5013
+ "loss": 0.7573,
5014
+ "step": 813000
5015
+ },
5016
+ {
5017
+ "epoch": 1.35,
5018
+ "learning_rate": 9.163946412243896e-06,
5019
+ "loss": 0.6991,
5020
+ "step": 814000
5021
+ },
5022
+ {
5023
+ "epoch": 1.35,
5024
+ "learning_rate": 9.068759265665384e-06,
5025
+ "loss": 0.6794,
5026
+ "step": 815000
5027
+ },
5028
+ {
5029
+ "epoch": 1.35,
5030
+ "learning_rate": 8.974019736577777e-06,
5031
+ "loss": 0.6997,
5032
+ "step": 816000
5033
+ },
5034
+ {
5035
+ "epoch": 1.35,
5036
+ "learning_rate": 8.879728861037384e-06,
5037
+ "loss": 0.7045,
5038
+ "step": 817000
5039
+ },
5040
+ {
5041
+ "epoch": 1.35,
5042
+ "learning_rate": 8.785887670194138e-06,
5043
+ "loss": 0.6992,
5044
+ "step": 818000
5045
+ },
5046
+ {
5047
+ "epoch": 1.35,
5048
+ "learning_rate": 8.692497190280224e-06,
5049
+ "loss": 0.7273,
5050
+ "step": 819000
5051
+ },
5052
+ {
5053
+ "epoch": 1.35,
5054
+ "learning_rate": 8.599558442598998e-06,
5055
+ "loss": 0.7421,
5056
+ "step": 820000
5057
+ },
5058
+ {
5059
+ "epoch": 1.35,
5060
+ "learning_rate": 8.507072443513702e-06,
5061
+ "loss": 0.7771,
5062
+ "step": 821000
5063
+ },
5064
+ {
5065
+ "epoch": 1.35,
5066
+ "learning_rate": 8.415040204436426e-06,
5067
+ "loss": 0.8458,
5068
+ "step": 822000
5069
+ },
5070
+ {
5071
+ "epoch": 1.36,
5072
+ "learning_rate": 8.323462731816961e-06,
5073
+ "loss": 0.8485,
5074
+ "step": 823000
5075
+ },
5076
+ {
5077
+ "epoch": 1.36,
5078
+ "learning_rate": 8.232341027131885e-06,
5079
+ "loss": 0.851,
5080
+ "step": 824000
5081
+ },
5082
+ {
5083
+ "epoch": 1.36,
5084
+ "learning_rate": 8.141676086873572e-06,
5085
+ "loss": 0.7009,
5086
+ "step": 825000
5087
+ },
5088
+ {
5089
+ "epoch": 1.36,
5090
+ "learning_rate": 8.051468902539272e-06,
5091
+ "loss": 0.7546,
5092
+ "step": 826000
5093
+ },
5094
+ {
5095
+ "epoch": 1.36,
5096
+ "learning_rate": 7.96172046062032e-06,
5097
+ "loss": 0.7046,
5098
+ "step": 827000
5099
+ },
5100
+ {
5101
+ "epoch": 1.36,
5102
+ "learning_rate": 7.872431742591268e-06,
5103
+ "loss": 0.6935,
5104
+ "step": 828000
5105
+ },
5106
+ {
5107
+ "epoch": 1.36,
5108
+ "learning_rate": 7.783603724899257e-06,
5109
+ "loss": 0.6985,
5110
+ "step": 829000
5111
+ },
5112
+ {
5113
+ "epoch": 1.36,
5114
+ "learning_rate": 7.695237378953223e-06,
5115
+ "loss": 0.7322,
5116
+ "step": 830000
5117
+ },
5118
+ {
5119
+ "epoch": 1.36,
5120
+ "learning_rate": 7.607333671113409e-06,
5121
+ "loss": 0.7486,
5122
+ "step": 831000
5123
+ },
5124
+ {
5125
+ "epoch": 1.36,
5126
+ "learning_rate": 7.519893562680663e-06,
5127
+ "loss": 0.7589,
5128
+ "step": 832000
5129
+ },
5130
+ {
5131
+ "epoch": 1.37,
5132
+ "learning_rate": 7.432918009885997e-06,
5133
+ "loss": 0.8758,
5134
+ "step": 833000
5135
+ },
5136
+ {
5137
+ "epoch": 1.37,
5138
+ "learning_rate": 7.3464079638801365e-06,
5139
+ "loss": 0.8151,
5140
+ "step": 834000
5141
+ },
5142
+ {
5143
+ "epoch": 1.37,
5144
+ "learning_rate": 7.260364370723044e-06,
5145
+ "loss": 0.852,
5146
+ "step": 835000
5147
+ },
5148
+ {
5149
+ "epoch": 1.37,
5150
+ "learning_rate": 7.174788171373731e-06,
5151
+ "loss": 0.6948,
5152
+ "step": 836000
5153
+ },
5154
+ {
5155
+ "epoch": 1.37,
5156
+ "learning_rate": 7.089680301679752e-06,
5157
+ "loss": 0.7435,
5158
+ "step": 837000
5159
+ },
5160
+ {
5161
+ "epoch": 1.37,
5162
+ "learning_rate": 7.005041692367154e-06,
5163
+ "loss": 0.6992,
5164
+ "step": 838000
5165
+ },
5166
+ {
5167
+ "epoch": 1.37,
5168
+ "learning_rate": 6.92087326903022e-06,
5169
+ "loss": 0.6888,
5170
+ "step": 839000
5171
+ },
5172
+ {
5173
+ "epoch": 1.37,
5174
+ "learning_rate": 6.837175952121306e-06,
5175
+ "loss": 0.7103,
5176
+ "step": 840000
5177
+ },
5178
+ {
5179
+ "epoch": 1.37,
5180
+ "learning_rate": 6.753950656940905e-06,
5181
+ "loss": 0.7271,
5182
+ "step": 841000
5183
+ },
5184
+ {
5185
+ "epoch": 1.37,
5186
+ "learning_rate": 6.671198293627479e-06,
5187
+ "loss": 0.7495,
5188
+ "step": 842000
5189
+ },
5190
+ {
5191
+ "epoch": 1.38,
5192
+ "learning_rate": 6.588919767147639e-06,
5193
+ "loss": 0.7668,
5194
+ "step": 843000
5195
+ },
5196
+ {
5197
+ "epoch": 1.38,
5198
+ "learning_rate": 6.5071159772861436e-06,
5199
+ "loss": 0.8816,
5200
+ "step": 844000
5201
+ },
5202
+ {
5203
+ "epoch": 1.38,
5204
+ "learning_rate": 6.425787818636131e-06,
5205
+ "loss": 0.7828,
5206
+ "step": 845000
5207
+ },
5208
+ {
5209
+ "epoch": 1.38,
5210
+ "learning_rate": 6.344936180589351e-06,
5211
+ "loss": 0.8388,
5212
+ "step": 846000
5213
+ },
5214
+ {
5215
+ "epoch": 1.38,
5216
+ "learning_rate": 6.264561947326331e-06,
5217
+ "loss": 0.7169,
5218
+ "step": 847000
5219
+ },
5220
+ {
5221
+ "epoch": 1.38,
5222
+ "learning_rate": 6.184665997806832e-06,
5223
+ "loss": 0.7238,
5224
+ "step": 848000
5225
+ },
5226
+ {
5227
+ "epoch": 1.38,
5228
+ "learning_rate": 6.1052492057601275e-06,
5229
+ "loss": 0.6972,
5230
+ "step": 849000
5231
+ },
5232
+ {
5233
+ "epoch": 1.38,
5234
+ "learning_rate": 6.026312439675552e-06,
5235
+ "loss": 0.69,
5236
+ "step": 850000
5237
+ },
5238
+ {
5239
+ "epoch": 1.38,
5240
+ "eval_loss": 0.6436578035354614,
5241
+ "eval_runtime": 15.1707,
5242
+ "eval_samples_per_second": 329.583,
5243
+ "eval_steps_per_second": 2.637,
5244
+ "step": 850000
5245
  }
5246
  ],
5247
  "max_steps": 1000000,
5248
  "num_train_epochs": 9223372036854775807,
5249
+ "total_flos": 1.4318597237649703e+19,
5250
  "trial_name": null,
5251
  "trial_params": null
5252
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:467769c5f591bfe9b33045522d84e361eefd928fd5cf859d5018a4e11e294466
3
  size 442675755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa8c588445c933ccf19ab09e882c29ac9e70c3ffd69d14173d2c426535bbfe5c
3
  size 442675755
runs/Mar21_22-49-35_t1v-n-fb892c44-w-0/events.out.tfevents.1679439157.t1v-n-fb892c44-w-0.15492.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87331d11ce840ee4bb433e73c8d1b3b6770c81ffc34485cab7a0678336d2e30c
3
- size 36935
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcedd3fd7f5318c01bcd646dae9d09107e755e15e5559754e8cc1a9c4607cff0
3
+ size 45211