SpideyDLK commited on
Commit
a8cc54f
·
verified ·
1 Parent(s): ee4f0c8

Training in progress, step 24000, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f27f5b61c1f90948dd0010ae8fe09b2ee05d65814c1485c88a416751b2977ce6
3
  size 1262135480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39204bd8c2effd818050f284c17a150073ae020ee23cd64f286987976e50533b
3
  size 1262135480
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c8c7327b4481ffedc3f87866a5c81e8bef2081ee0e7564ef993f87477b497ca6
3
  size 2490815798
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24afea734dd4236bf84afdf310768accb6affaddd0956339a57c60d2fea96b82
3
  size 2490815798
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fa7e40ee09d6a17e6a9f525685c6dd28582a8bf9dd51f9ac2e205828da6e88ad
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dafea8a7d8992903186fb33a4a431ffac98c6764650c46ce6f4f3880d2734c1a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f92ad734640125620ead43ba6f565701d742f7f96a1acb5b572295c79e960c56
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd45939ba7a4f7f568ef5975dddbd1e6ecf925ae50169882721525fdfd0f9abc
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 16.010854816824967,
5
  "eval_steps": 400,
6
- "global_step": 23600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -951,6 +951,22 @@
951
  "eval_steps_per_second": 0.823,
952
  "eval_wer": 0.054845170235710984,
953
  "step": 23600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
954
  }
955
  ],
956
  "logging_steps": 400,
@@ -958,7 +974,7 @@
958
  "num_input_tokens_seen": 0,
959
  "num_train_epochs": 30,
960
  "save_steps": 400,
961
- "total_flos": 7.198809146011316e+19,
962
  "train_batch_size": 8,
963
  "trial_name": null,
964
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 16.28222523744912,
5
  "eval_steps": 400,
6
+ "global_step": 24000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
951
  "eval_steps_per_second": 0.823,
952
  "eval_wer": 0.054845170235710984,
953
  "step": 23600
954
+ },
955
+ {
956
+ "epoch": 16.28,
957
+ "grad_norm": 0.3012249767780304,
958
+ "learning_rate": 0.00013874656907593777,
959
+ "loss": 0.0616,
960
+ "step": 24000
961
+ },
962
+ {
963
+ "epoch": 16.28,
964
+ "eval_loss": 0.0315285250544548,
965
+ "eval_runtime": 208.0351,
966
+ "eval_samples_per_second": 6.725,
967
+ "eval_steps_per_second": 0.841,
968
+ "eval_wer": 0.052688337698351566,
969
+ "step": 24000
970
  }
971
  ],
972
  "logging_steps": 400,
 
974
  "num_input_tokens_seen": 0,
975
  "num_train_epochs": 30,
976
  "save_steps": 400,
977
+ "total_flos": 7.3207680887953244e+19,
978
  "train_batch_size": 8,
979
  "trial_name": null,
980
  "trial_params": null