besimray commited on
Commit
261d800
1 Parent(s): f60a6b4

Training in progress, step 30, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a0222c0047535996deb8c8cb559888e4d66b3038044de3ae8bf08f3bd010cc02
3
  size 22573704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e02bf26d5b2401ec7dc326297f3ee2388f15d11930d5efb6984ae8f6428a10f9
3
  size 22573704
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9677e62c9c2ab535a7409c2bbd60fd7316b6431801418e2a7ba28e522edbfac2
3
  size 11710970
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da94be3ae0a64e853b92443c0a2c39df2e4402a3604ed63a25872c61f1cc51db
3
  size 11710970
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd641d2c2a7f8e3282e19ce9e5df14c6531555bf46535a1b63e4e32f7c2a55e1
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27827d7d71d66eac185d181a061a3fc686c05fceae71aefa31bdc9f272ad8dc6
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f3583f6fb6381489c8d29ab09722642111b0d57df55aa7b4c72ce687a83cdee
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b01233c08a586038ebf1cf3e5cbb4f41b3484fab28bfbbe42cb46fd4e382bde
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.784389317035675,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-20",
4
- "epoch": 1.1428571428571428,
5
  "eval_steps": 10,
6
- "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -171,6 +171,84 @@
171
  "eval_samples_per_second": 7.191,
172
  "eval_steps_per_second": 1.918,
173
  "step": 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
  }
175
  ],
176
  "logging_steps": 1,
@@ -199,7 +277,7 @@
199
  "attributes": {}
200
  }
201
  },
202
- "total_flos": 7697456494018560.0,
203
  "train_batch_size": 4,
204
  "trial_name": null,
205
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.7271688580513,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-30",
4
+ "epoch": 1.7142857142857144,
5
  "eval_steps": 10,
6
+ "global_step": 30,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
171
  "eval_samples_per_second": 7.191,
172
  "eval_steps_per_second": 1.918,
173
  "step": 20
174
+ },
175
+ {
176
+ "epoch": 1.2,
177
+ "grad_norm": 0.35325202345848083,
178
+ "learning_rate": 8.47037097610317e-05,
179
+ "loss": 0.7718,
180
+ "step": 21
181
+ },
182
+ {
183
+ "epoch": 1.2571428571428571,
184
+ "grad_norm": 0.30332982540130615,
185
+ "learning_rate": 8.198365107794457e-05,
186
+ "loss": 0.6796,
187
+ "step": 22
188
+ },
189
+ {
190
+ "epoch": 1.3142857142857143,
191
+ "grad_norm": 0.365041047334671,
192
+ "learning_rate": 7.909294577789766e-05,
193
+ "loss": 0.7059,
194
+ "step": 23
195
+ },
196
+ {
197
+ "epoch": 1.3714285714285714,
198
+ "grad_norm": 0.2987039387226105,
199
+ "learning_rate": 7.604701702439651e-05,
200
+ "loss": 0.6808,
201
+ "step": 24
202
+ },
203
+ {
204
+ "epoch": 1.4285714285714286,
205
+ "grad_norm": 0.3556249141693115,
206
+ "learning_rate": 7.286211616523193e-05,
207
+ "loss": 0.7061,
208
+ "step": 25
209
+ },
210
+ {
211
+ "epoch": 1.4857142857142858,
212
+ "grad_norm": 0.37903037667274475,
213
+ "learning_rate": 6.95552360245078e-05,
214
+ "loss": 0.736,
215
+ "step": 26
216
+ },
217
+ {
218
+ "epoch": 1.5428571428571427,
219
+ "grad_norm": 0.36219486594200134,
220
+ "learning_rate": 6.614402023857232e-05,
221
+ "loss": 0.7291,
222
+ "step": 27
223
+ },
224
+ {
225
+ "epoch": 1.6,
226
+ "grad_norm": 0.31098002195358276,
227
+ "learning_rate": 6.264666911958404e-05,
228
+ "loss": 0.6836,
229
+ "step": 28
230
+ },
231
+ {
232
+ "epoch": 1.657142857142857,
233
+ "grad_norm": 0.3429687023162842,
234
+ "learning_rate": 5.908184254897182e-05,
235
+ "loss": 0.7052,
236
+ "step": 29
237
+ },
238
+ {
239
+ "epoch": 1.7142857142857144,
240
+ "grad_norm": 0.3376871645450592,
241
+ "learning_rate": 5.546856041889373e-05,
242
+ "loss": 0.678,
243
+ "step": 30
244
+ },
245
+ {
246
+ "epoch": 1.7142857142857144,
247
+ "eval_loss": 0.7271688580513,
248
+ "eval_runtime": 2.0876,
249
+ "eval_samples_per_second": 7.185,
250
+ "eval_steps_per_second": 1.916,
251
+ "step": 30
252
  }
253
  ],
254
  "logging_steps": 1,
 
277
  "attributes": {}
278
  }
279
  },
280
+ "total_flos": 1.154618474102784e+16,
281
  "train_batch_size": 4,
282
  "trial_name": null,
283
  "trial_params": null