besimray commited on
Commit
bae1b45
·
verified ·
1 Parent(s): 4dd50ff

Training in progress, step 40, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:334c486ae7a9ec594d6cfbc08974813365d74f826bb2fdfaf626162f2d0159fb
3
  size 90207248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63d4bb6a956fb56cd0088dc28547952d24c08f2eb86d2aec9d4b21483adb9177
3
  size 90207248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c4b55ec3d86252fd387d6c009c1bdf36fb7f41fddcf6b88277b42c1126ff809d
3
  size 46057082
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bc10445b82405fb6d37d22c7fe973fefbd6d5e7a4eae5c80f3b4858a86a96da
3
  size 46057082
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b83137896c579d86619217aca9c0b81561f6e1913616a6fe74d20b7182e4287f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d872420bc3dc74bb7836dfa2f4957583cebe65bc0ce2ca2cd3f24b22a86bb602
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:60663a94a33586da5717f6f80de424ce9fe5b18a8c8d13d4ca09aa40f102443b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f924e37bc06756f5535d9fa2079568e2b7869291abee642e58937943662c2f6f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.6834224462509155,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-30",
4
- "epoch": 0.008471584892340275,
5
  "eval_steps": 10,
6
- "global_step": 30,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -249,6 +249,84 @@
249
  "eval_samples_per_second": 5.597,
250
  "eval_steps_per_second": 5.597,
251
  "step": 30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  }
253
  ],
254
  "logging_steps": 1,
@@ -277,7 +355,7 @@
277
  "attributes": {}
278
  }
279
  },
280
- "total_flos": 2936410755563520.0,
281
  "train_batch_size": 1,
282
  "trial_name": null,
283
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.6503487825393677,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-40",
4
+ "epoch": 0.011295446523120367,
5
  "eval_steps": 10,
6
+ "global_step": 40,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
249
  "eval_samples_per_second": 5.597,
250
  "eval_steps_per_second": 5.597,
251
  "step": 30
252
+ },
253
+ {
254
+ "epoch": 0.008753971055418284,
255
+ "grad_norm": 2.855771780014038,
256
+ "learning_rate": 0.00019909497617679348,
257
+ "loss": 2.608,
258
+ "step": 31
259
+ },
260
+ {
261
+ "epoch": 0.009036357218496294,
262
+ "grad_norm": 2.0999650955200195,
263
+ "learning_rate": 0.0001990068775649202,
264
+ "loss": 1.6182,
265
+ "step": 32
266
+ },
267
+ {
268
+ "epoch": 0.009318743381574303,
269
+ "grad_norm": 5.745433330535889,
270
+ "learning_rate": 0.00019891470916809362,
271
+ "loss": 1.6578,
272
+ "step": 33
273
+ },
274
+ {
275
+ "epoch": 0.009601129544652313,
276
+ "grad_norm": 1.7802093029022217,
277
+ "learning_rate": 0.00019881847477499557,
278
+ "loss": 1.8047,
279
+ "step": 34
280
+ },
281
+ {
282
+ "epoch": 0.009883515707730321,
283
+ "grad_norm": 3.094785451889038,
284
+ "learning_rate": 0.00019871817834144504,
285
+ "loss": 1.2005,
286
+ "step": 35
287
+ },
288
+ {
289
+ "epoch": 0.01016590187080833,
290
+ "grad_norm": 2.3366200923919678,
291
+ "learning_rate": 0.0001986138239902355,
292
+ "loss": 2.0731,
293
+ "step": 36
294
+ },
295
+ {
296
+ "epoch": 0.01044828803388634,
297
+ "grad_norm": 3.872102737426758,
298
+ "learning_rate": 0.0001985054160109657,
299
+ "loss": 1.301,
300
+ "step": 37
301
+ },
302
+ {
303
+ "epoch": 0.010730674196964348,
304
+ "grad_norm": 1.3710724115371704,
305
+ "learning_rate": 0.00019839295885986296,
306
+ "loss": 1.4742,
307
+ "step": 38
308
+ },
309
+ {
310
+ "epoch": 0.011013060360042359,
311
+ "grad_norm": 2.481275796890259,
312
+ "learning_rate": 0.0001982764571596004,
313
+ "loss": 1.5866,
314
+ "step": 39
315
+ },
316
+ {
317
+ "epoch": 0.011295446523120367,
318
+ "grad_norm": 1.8309324979782104,
319
+ "learning_rate": 0.00019815591569910654,
320
+ "loss": 2.644,
321
+ "step": 40
322
+ },
323
+ {
324
+ "epoch": 0.011295446523120367,
325
+ "eval_loss": 1.6503487825393677,
326
+ "eval_runtime": 133.397,
327
+ "eval_samples_per_second": 5.592,
328
+ "eval_steps_per_second": 5.592,
329
+ "step": 40
330
  }
331
  ],
332
  "logging_steps": 1,
 
355
  "attributes": {}
356
  }
357
  },
358
+ "total_flos": 3915214340751360.0,
359
  "train_batch_size": 1,
360
  "trial_name": null,
361
  "trial_params": null