RodrigoSalazar-U commited on
Commit
5fea84f
·
verified ·
1 Parent(s): eb9c88a

Training in progress, step 8500, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0d87811e78f2411bb51f67277ec4ff3e7128aeba1a8f60f6083ffcf5e499137c
3
  size 4785762744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:521e4efd5377e853a24b9e89c6aab6af4b9c4cd2d9e34944c2b8bf1d605ab7da
3
  size 4785762744
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:72baa3d9ed25be64c5f61bc4dcd4ad23809cf47c521105da8c0a4a00395ac985
3
  size 3497859804
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:872aeb7b9e23ef004bd52b7a5f63d3f3a2758ed6247fde9119589cd936404441
3
  size 3497859804
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:acc2d65074a2fcc8399dba6d3c0a62d0568496e8b5831e17319a7dcd95d56dc4
3
  size 14308
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56ada7a9076e894e38e63b2e547d8bfe564d09412403a53a9fd4f37e79358b8d
3
  size 14308
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:833d77c31bdb2ba7eb60d0271717ab3310ed32ad11e10d8490b2d98bed4a9687
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6266f19ec12f14bd9a4d4ed0cba0ac52c21e3e395901947dc6348d59d7ad7dc1
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 3.81770460510618,
5
  "eval_steps": 500,
6
- "global_step": 8000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -567,6 +567,41 @@
567
  "learning_rate": 1.6065010321302785e-05,
568
  "loss": 0.2601,
569
  "step": 8000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
570
  }
571
  ],
572
  "logging_steps": 100,
@@ -586,7 +621,7 @@
586
  "attributes": {}
587
  }
588
  },
589
- "total_flos": 4.096777803147854e+18,
590
  "train_batch_size": 16,
591
  "trial_name": null,
592
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 4.056311142925316,
5
  "eval_steps": 500,
6
+ "global_step": 8500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
567
  "learning_rate": 1.6065010321302785e-05,
568
  "loss": 0.2601,
569
  "step": 8000
570
+ },
571
+ {
572
+ "epoch": 3.8654259126700072,
573
+ "grad_norm": 1.5045113563537598,
574
+ "learning_rate": 1.486034111767885e-05,
575
+ "loss": 0.2628,
576
+ "step": 8100
577
+ },
578
+ {
579
+ "epoch": 3.913147220233834,
580
+ "grad_norm": 1.5543336868286133,
581
+ "learning_rate": 1.3694693963836646e-05,
582
+ "loss": 0.2626,
583
+ "step": 8200
584
+ },
585
+ {
586
+ "epoch": 3.9608685277976616,
587
+ "grad_norm": 1.1248536109924316,
588
+ "learning_rate": 1.2569363292981106e-05,
589
+ "loss": 0.257,
590
+ "step": 8300
591
+ },
592
+ {
593
+ "epoch": 4.008589835361489,
594
+ "grad_norm": 0.8004603981971741,
595
+ "learning_rate": 1.1485598767483852e-05,
596
+ "loss": 0.2457,
597
+ "step": 8400
598
+ },
599
+ {
600
+ "epoch": 4.056311142925316,
601
+ "grad_norm": 0.8413916826248169,
602
+ "learning_rate": 1.0444603891152616e-05,
603
+ "loss": 0.1514,
604
+ "step": 8500
605
  }
606
  ],
607
  "logging_steps": 100,
 
621
  "attributes": {}
622
  }
623
  },
624
+ "total_flos": 4.3528844255689605e+18,
625
  "train_batch_size": 16,
626
  "trial_name": null,
627
  "trial_params": null