SpideyDLK commited on
Commit
abab94c
1 Parent(s): 44a7777

Training in progress, step 21600, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16dbb1a04572952218422ea39cf7570a42446d5f087786783ad33828643993bc
3
  size 1262135480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae3ae3d3f6eed21e23e402626f9b5d36d0eaf722fe8b6b3668eaa433af56d43d
3
  size 1262135480
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a216eb1303cd5b71afad6f5a91b0b5e3768ab04fd074af690869ca5bedb8926f
3
  size 2490815798
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d63d4faba976ec1e6b945518fe24c679be5a3f42c8e71fb2eaa42392c14dcd4b
3
  size 2490815798
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ad8ec50c3c1c91dac2f5655527d8594f2033e004db09ad65dd9a85d7afb91a4
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd6fe92ded35c583b056abd78291bd133b79c7076da34fc990f7b299e212c88d
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a3f2e6e05de398f17a37f5b322d92a7ed3bd1f1f712be1a3b88b12b3511e8906
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47997405a3602f8cade0562119b11b6490f526c5b0319017e96e10ae9857494a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 14.382632293080054,
5
  "eval_steps": 400,
6
- "global_step": 21200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -855,6 +855,22 @@
855
  "eval_steps_per_second": 0.834,
856
  "eval_wer": 0.06262517331690032,
857
  "step": 21200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
858
  }
859
  ],
860
  "logging_steps": 400,
@@ -862,7 +878,7 @@
862
  "num_input_tokens_seen": 0,
863
  "num_train_epochs": 30,
864
  "save_steps": 400,
865
- "total_flos": 6.467377957667312e+19,
866
  "train_batch_size": 8,
867
  "trial_name": null,
868
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 14.654002713704207,
5
  "eval_steps": 400,
6
+ "global_step": 21600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
855
  "eval_steps_per_second": 0.834,
856
  "eval_wer": 0.06262517331690032,
857
  "step": 21200
858
+ },
859
+ {
860
+ "epoch": 14.65,
861
+ "grad_norm": 0.1968027800321579,
862
+ "learning_rate": 0.00015521500457456541,
863
+ "loss": 0.0715,
864
+ "step": 21600
865
+ },
866
+ {
867
+ "epoch": 14.65,
868
+ "eval_loss": 0.03143769130110741,
869
+ "eval_runtime": 212.2,
870
+ "eval_samples_per_second": 6.593,
871
+ "eval_steps_per_second": 0.825,
872
+ "eval_wer": 0.057464181173933135,
873
+ "step": 21600
874
  }
875
  ],
876
  "logging_steps": 400,
 
878
  "num_input_tokens_seen": 0,
879
  "num_train_epochs": 30,
880
  "save_steps": 400,
881
+ "total_flos": 6.58884399297287e+19,
882
  "train_batch_size": 8,
883
  "trial_name": null,
884
  "trial_params": null