SpideyDLK commited on
Commit
7d955da
·
verified ·
1 Parent(s): add738c

Training in progress, step 21200, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f2ccbaa3366fc91a9caa865d514bdc5bc7272bb243e4623552358daf6c381842
3
  size 1262135480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16dbb1a04572952218422ea39cf7570a42446d5f087786783ad33828643993bc
3
  size 1262135480
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd4cc3504193d269c1271fcbb4b8624ac22d68154afdcd94625f3fb1b458ee8d
3
  size 2490815798
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a216eb1303cd5b71afad6f5a91b0b5e3768ab04fd074af690869ca5bedb8926f
3
  size 2490815798
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2f7bbcf5ff7ad61af1c3ef61aa10d73d1160259d8b5010c2a4340676fa439df1
3
- size 14308
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ad8ec50c3c1c91dac2f5655527d8594f2033e004db09ad65dd9a85d7afb91a4
3
+ size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:af549fa9698688de4d4da8eafe34f4994b7fb3147b788357d90bc61bb8d36a8a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3f2e6e05de398f17a37f5b322d92a7ed3bd1f1f712be1a3b88b12b3511e8906
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 14.111261872455902,
5
  "eval_steps": 400,
6
- "global_step": 20800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -839,6 +839,22 @@
839
  "eval_steps_per_second": 0.834,
840
  "eval_wer": 0.060314281312586655,
841
  "step": 20800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
  ],
844
  "logging_steps": 400,
@@ -846,7 +862,7 @@
846
  "num_input_tokens_seen": 0,
847
  "num_train_epochs": 30,
848
  "save_steps": 400,
849
- "total_flos": 6.345721739092566e+19,
850
  "train_batch_size": 8,
851
  "trial_name": null,
852
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 14.382632293080054,
5
  "eval_steps": 400,
6
+ "global_step": 21200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
839
  "eval_steps_per_second": 0.834,
840
  "eval_wer": 0.060314281312586655,
841
  "step": 20800
842
+ },
843
+ {
844
+ "epoch": 14.38,
845
+ "grad_norm": 1.34682297706604,
846
+ "learning_rate": 0.00015795974382433665,
847
+ "loss": 0.075,
848
+ "step": 21200
849
+ },
850
+ {
851
+ "epoch": 14.38,
852
+ "eval_loss": 0.03607061505317688,
853
+ "eval_runtime": 209.844,
854
+ "eval_samples_per_second": 6.667,
855
+ "eval_steps_per_second": 0.834,
856
+ "eval_wer": 0.06262517331690032,
857
+ "step": 21200
858
  }
859
  ],
860
  "logging_steps": 400,
 
862
  "num_input_tokens_seen": 0,
863
  "num_train_epochs": 30,
864
  "save_steps": 400,
865
+ "total_flos": 6.467377957667312e+19,
866
  "train_batch_size": 8,
867
  "trial_name": null,
868
  "trial_params": null