Rakhman16 commited on
Commit
c7057d3
·
verified ·
1 Parent(s): 565424b

Training in progress, step 1000, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:52b066dd76450f1fc2cd5dd08d20ae972b8a94050621292c31d8235f96e7108a
3
  size 891558696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb86b7f70b45053cb6d55ae4376642fa03d54495b5c27e78b0cd2d46a00b5c0b
3
  size 891558696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc8240f13191a1e88e705958073d032c3ea3f1e76fd691d5ec8e01e44a4b1264
3
  size 1783272762
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d232a9a6c756331696e700546387d842e2067cae42866a06dd49ad2965fbaa1e
3
  size 1783272762
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f21b33c421be30ae2ef87f38ded5d07cf75a2ddfe995129adfae900c14c14b42
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b1952f58fa52e592468a775e85ac7e72d5b2b6d4cdb2766a2b20ab5e3f11899
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f138ad5145d2f28383f769cd6a0c1fe56a4d753047d159ec5a6175b52316e4c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e999a568b2d0f01998dabb86d34bac49ae5618451719e3426ca61159d87ee5c4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.3375319242477417,
3
- "best_model_checkpoint": "./fine-tuned/checkpoint-500",
4
- "epoch": 0.8312551953449709,
5
  "eval_steps": 100,
6
- "global_step": 500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -117,6 +117,116 @@
117
  "eval_samples_per_second": 12.924,
118
  "eval_steps_per_second": 1.622,
119
  "step": 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  }
121
  ],
122
  "logging_steps": 50,
@@ -136,7 +246,7 @@
136
  "attributes": {}
137
  }
138
  },
139
- "total_flos": 4871663124480000.0,
140
  "train_batch_size": 8,
141
  "trial_name": null,
142
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.3063213527202606,
3
+ "best_model_checkpoint": "./fine-tuned/checkpoint-1000",
4
+ "epoch": 1.6625103906899419,
5
  "eval_steps": 100,
6
+ "global_step": 1000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
117
  "eval_samples_per_second": 12.924,
118
  "eval_steps_per_second": 1.622,
119
  "step": 500
120
+ },
121
+ {
122
+ "epoch": 0.914380714879468,
123
+ "grad_norm": 60626.75,
124
+ "learning_rate": 3.856073211314476e-05,
125
+ "loss": 0.405,
126
+ "step": 550
127
+ },
128
+ {
129
+ "epoch": 0.9975062344139651,
130
+ "grad_norm": 51910.04296875,
131
+ "learning_rate": 3.752079866888519e-05,
132
+ "loss": 0.4186,
133
+ "step": 600
134
+ },
135
+ {
136
+ "epoch": 0.9975062344139651,
137
+ "eval_loss": 0.3285529315471649,
138
+ "eval_runtime": 38.3187,
139
+ "eval_samples_per_second": 12.892,
140
+ "eval_steps_per_second": 1.618,
141
+ "step": 600
142
+ },
143
+ {
144
+ "epoch": 1.0806317539484622,
145
+ "grad_norm": 48182.8046875,
146
+ "learning_rate": 3.6480865224625625e-05,
147
+ "loss": 0.3925,
148
+ "step": 650
149
+ },
150
+ {
151
+ "epoch": 1.1637572734829593,
152
+ "grad_norm": 51930.34765625,
153
+ "learning_rate": 3.544093178036606e-05,
154
+ "loss": 0.3705,
155
+ "step": 700
156
+ },
157
+ {
158
+ "epoch": 1.1637572734829593,
159
+ "eval_loss": 0.3227428197860718,
160
+ "eval_runtime": 38.1185,
161
+ "eval_samples_per_second": 12.96,
162
+ "eval_steps_per_second": 1.627,
163
+ "step": 700
164
+ },
165
+ {
166
+ "epoch": 1.2468827930174564,
167
+ "grad_norm": 60283.91015625,
168
+ "learning_rate": 3.4400998336106495e-05,
169
+ "loss": 0.3823,
170
+ "step": 750
171
+ },
172
+ {
173
+ "epoch": 1.3300083125519535,
174
+ "grad_norm": 55843.62109375,
175
+ "learning_rate": 3.336106489184692e-05,
176
+ "loss": 0.3763,
177
+ "step": 800
178
+ },
179
+ {
180
+ "epoch": 1.3300083125519535,
181
+ "eval_loss": 0.3174193501472473,
182
+ "eval_runtime": 38.4517,
183
+ "eval_samples_per_second": 12.847,
184
+ "eval_steps_per_second": 1.612,
185
+ "step": 800
186
+ },
187
+ {
188
+ "epoch": 1.4131338320864506,
189
+ "grad_norm": 40623.88671875,
190
+ "learning_rate": 3.232113144758736e-05,
191
+ "loss": 0.3509,
192
+ "step": 850
193
+ },
194
+ {
195
+ "epoch": 1.4962593516209477,
196
+ "grad_norm": 57212.0546875,
197
+ "learning_rate": 3.128119800332779e-05,
198
+ "loss": 0.3624,
199
+ "step": 900
200
+ },
201
+ {
202
+ "epoch": 1.4962593516209477,
203
+ "eval_loss": 0.31218209862709045,
204
+ "eval_runtime": 38.1558,
205
+ "eval_samples_per_second": 12.947,
206
+ "eval_steps_per_second": 1.625,
207
+ "step": 900
208
+ },
209
+ {
210
+ "epoch": 1.5793848711554448,
211
+ "grad_norm": 49043.25390625,
212
+ "learning_rate": 3.0241264559068223e-05,
213
+ "loss": 0.3656,
214
+ "step": 950
215
+ },
216
+ {
217
+ "epoch": 1.6625103906899419,
218
+ "grad_norm": 50702.92578125,
219
+ "learning_rate": 2.9201331114808654e-05,
220
+ "loss": 0.3741,
221
+ "step": 1000
222
+ },
223
+ {
224
+ "epoch": 1.6625103906899419,
225
+ "eval_loss": 0.3063213527202606,
226
+ "eval_runtime": 38.2912,
227
+ "eval_samples_per_second": 12.901,
228
+ "eval_steps_per_second": 1.619,
229
+ "step": 1000
230
  }
231
  ],
232
  "logging_steps": 50,
 
246
  "attributes": {}
247
  }
248
  },
249
+ "total_flos": 9742717291069440.0,
250
  "train_batch_size": 8,
251
  "trial_name": null,
252
  "trial_params": null