besimray commited on
Commit
05142d2
·
verified ·
1 Parent(s): b676857

Training in progress, step 30, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a84deee13dd315e8fcbcb28d22e374b4e919e8d119644daac24c63ee4b326580
3
  size 90207248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebf24e1bf62bae17c3e161bca0770f55c82c7c4085ecace43a9d47b1c064ae27
3
  size 90207248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0ab33aabb23561c4a90afb5ee894e1faaf2a53a68f4feafcc5727886b373b7cf
3
  size 46057082
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dad4b57dadf9775150401312c0fcb2799d7d6f020292d3017cdaa17644865c1
3
  size 46057082
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d8414ba48354825ad20b0a3d80cc23ff1d239366d3e0da53cb9bbe2c2455ccc
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b83137896c579d86619217aca9c0b81561f6e1913616a6fe74d20b7182e4287f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:321b462a2538632d6d720f0cf198c8f471dee11f51db9b50cc50d1fa7f132bbe
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60663a94a33586da5717f6f80de424ce9fe5b18a8c8d13d4ca09aa40f102443b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.7350926399230957,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-20",
4
- "epoch": 0.0056477232615601836,
5
  "eval_steps": 10,
6
- "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -171,6 +171,84 @@
171
  "eval_samples_per_second": 5.569,
172
  "eval_steps_per_second": 5.569,
173
  "step": 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
  }
175
  ],
176
  "logging_steps": 1,
@@ -199,7 +277,7 @@
199
  "attributes": {}
200
  }
201
  },
202
- "total_flos": 1957607170375680.0,
203
  "train_batch_size": 1,
204
  "trial_name": null,
205
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.6901509761810303,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-30",
4
+ "epoch": 0.008471584892340275,
5
  "eval_steps": 10,
6
+ "global_step": 30,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
171
  "eval_samples_per_second": 5.569,
172
  "eval_steps_per_second": 5.569,
173
  "step": 20
174
+ },
175
+ {
176
+ "epoch": 0.005930109424638193,
177
+ "grad_norm": 0.876990020275116,
178
+ "learning_rate": 0.00019975141040730207,
179
+ "loss": 1.2469,
180
+ "step": 21
181
+ },
182
+ {
183
+ "epoch": 0.006212495587716202,
184
+ "grad_norm": 1.1722772121429443,
185
+ "learning_rate": 0.0001997041811497882,
186
+ "loss": 2.2798,
187
+ "step": 22
188
+ },
189
+ {
190
+ "epoch": 0.006494881750794211,
191
+ "grad_norm": 0.9232650995254517,
192
+ "learning_rate": 0.00019965285344390184,
193
+ "loss": 2.5319,
194
+ "step": 23
195
+ },
196
+ {
197
+ "epoch": 0.00677726791387222,
198
+ "grad_norm": 1.6948198080062866,
199
+ "learning_rate": 0.00019959742939952392,
200
+ "loss": 1.6864,
201
+ "step": 24
202
+ },
203
+ {
204
+ "epoch": 0.0070596540769502295,
205
+ "grad_norm": 1.277117133140564,
206
+ "learning_rate": 0.00019953791129491983,
207
+ "loss": 2.1785,
208
+ "step": 25
209
+ },
210
+ {
211
+ "epoch": 0.007342040240028239,
212
+ "grad_norm": 3.347221851348877,
213
+ "learning_rate": 0.00019947430157664576,
214
+ "loss": 1.5917,
215
+ "step": 26
216
+ },
217
+ {
218
+ "epoch": 0.007624426403106248,
219
+ "grad_norm": 1.2190711498260498,
220
+ "learning_rate": 0.00019940660285944803,
221
+ "loss": 2.3655,
222
+ "step": 27
223
+ },
224
+ {
225
+ "epoch": 0.007906812566184257,
226
+ "grad_norm": 1.9080499410629272,
227
+ "learning_rate": 0.00019933481792615583,
228
+ "loss": 2.0602,
229
+ "step": 28
230
+ },
231
+ {
232
+ "epoch": 0.008189198729262267,
233
+ "grad_norm": 1.5589416027069092,
234
+ "learning_rate": 0.0001992589497275665,
235
+ "loss": 1.9817,
236
+ "step": 29
237
+ },
238
+ {
239
+ "epoch": 0.008471584892340275,
240
+ "grad_norm": 2.701538562774658,
241
+ "learning_rate": 0.0001991790013823246,
242
+ "loss": 1.434,
243
+ "step": 30
244
+ },
245
+ {
246
+ "epoch": 0.008471584892340275,
247
+ "eval_loss": 1.6901509761810303,
248
+ "eval_runtime": 134.7409,
249
+ "eval_samples_per_second": 5.537,
250
+ "eval_steps_per_second": 5.537,
251
+ "step": 30
252
  }
253
  ],
254
  "logging_steps": 1,
 
277
  "attributes": {}
278
  }
279
  },
280
+ "total_flos": 2936410755563520.0,
281
  "train_batch_size": 1,
282
  "trial_name": null,
283
  "trial_params": null