besimray commited on
Commit
d46d07a
·
verified ·
1 Parent(s): eb29803

Training in progress, step 60, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b3ebae6745992a9891531c7f9dc21af61684fec4cad0bdb8ec71e2d7e0190098
3
  size 90207248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1349c39706954f73b87ce2e0d506e00758e8b213bb17e1a6dd1aed5c3f715b64
3
  size 90207248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:086c8bf4279e90d468163e6119ad5a62eca1c60efa475a439bfc8e41ebe89eaa
3
  size 46057082
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76e9ed812ac31db5016397a98d34a65c97abc515da8c38a3f1a9ed81553a05d4
3
  size 46057082
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:93b715727f08f3bec1a4243110369e5be33fa530d65244e67bf09606e5d6da48
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78f4cc9cdab22e1e0f660a41fa1d4cd32f78600a13210572d12e13002a5dbf56
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfc2bf0eccc6c4e85c949c664a83bcd160767da77920eebf352a6f7f7c4c9b2e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54b996514a941dd419a3f7869454171b960cb51cf1b91d9b10dbdcf1b1e50a10
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.6434087753295898,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.014119308153900459,
5
  "eval_steps": 10,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -405,6 +405,84 @@
405
  "eval_samples_per_second": 5.593,
406
  "eval_steps_per_second": 5.593,
407
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408
  }
409
  ],
410
  "logging_steps": 1,
@@ -433,7 +511,7 @@
433
  "attributes": {}
434
  }
435
  },
436
- "total_flos": 4894017925939200.0,
437
  "train_batch_size": 1,
438
  "trial_name": null,
439
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.633779525756836,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-60",
4
+ "epoch": 0.01694316978468055,
5
  "eval_steps": 10,
6
+ "global_step": 60,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
405
  "eval_samples_per_second": 5.593,
406
  "eval_steps_per_second": 5.593,
407
  "step": 50
408
+ },
409
+ {
410
+ "epoch": 0.014401694316978467,
411
+ "grad_norm": 2.5457074642181396,
412
+ "learning_rate": 0.00019656487088855592,
413
+ "loss": 1.5371,
414
+ "step": 51
415
+ },
416
+ {
417
+ "epoch": 0.014684080480056478,
418
+ "grad_norm": 2.4005753993988037,
419
+ "learning_rate": 0.00019639628606958533,
420
+ "loss": 1.0716,
421
+ "step": 52
422
+ },
423
+ {
424
+ "epoch": 0.014966466643134486,
425
+ "grad_norm": 2.4281392097473145,
426
+ "learning_rate": 0.0001962237387768529,
427
+ "loss": 2.1807,
428
+ "step": 53
429
+ },
430
+ {
431
+ "epoch": 0.015248852806212496,
432
+ "grad_norm": 2.5013136863708496,
433
+ "learning_rate": 0.00019604723610310194,
434
+ "loss": 1.0288,
435
+ "step": 54
436
+ },
437
+ {
438
+ "epoch": 0.015531238969290505,
439
+ "grad_norm": 2.0805702209472656,
440
+ "learning_rate": 0.00019586678530366606,
441
+ "loss": 1.1582,
442
+ "step": 55
443
+ },
444
+ {
445
+ "epoch": 0.015813625132368513,
446
+ "grad_norm": 7.539531707763672,
447
+ "learning_rate": 0.00019568239379617088,
448
+ "loss": 1.7791,
449
+ "step": 56
450
+ },
451
+ {
452
+ "epoch": 0.016096011295446522,
453
+ "grad_norm": 1.5069524049758911,
454
+ "learning_rate": 0.00019549406916022905,
455
+ "loss": 1.6449,
456
+ "step": 57
457
+ },
458
+ {
459
+ "epoch": 0.016378397458524534,
460
+ "grad_norm": 1.1627360582351685,
461
+ "learning_rate": 0.00019530181913712872,
462
+ "loss": 1.2881,
463
+ "step": 58
464
+ },
465
+ {
466
+ "epoch": 0.016660783621602542,
467
+ "grad_norm": 5.9471282958984375,
468
+ "learning_rate": 0.00019510565162951537,
469
+ "loss": 1.6948,
470
+ "step": 59
471
+ },
472
+ {
473
+ "epoch": 0.01694316978468055,
474
+ "grad_norm": 1.4962432384490967,
475
+ "learning_rate": 0.00019490557470106686,
476
+ "loss": 1.3254,
477
+ "step": 60
478
+ },
479
+ {
480
+ "epoch": 0.01694316978468055,
481
+ "eval_loss": 1.633779525756836,
482
+ "eval_runtime": 133.2937,
483
+ "eval_samples_per_second": 5.597,
484
+ "eval_steps_per_second": 5.597,
485
+ "step": 60
486
  }
487
  ],
488
  "logging_steps": 1,
 
511
  "attributes": {}
512
  }
513
  },
514
+ "total_flos": 5872821511127040.0,
515
  "train_batch_size": 1,
516
  "trial_name": null,
517
  "trial_params": null