emonidi commited on
Commit
cd7f881
·
1 Parent(s): c8cb66b

emonidi/whisper-medium-order

Browse files
README.md ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: shripadbhat/whisper-medium-bg
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - wer
8
+ model-index:
9
+ - name: whisper-medium-order
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # whisper-medium-order
17
+
18
+ This model is a fine-tuned version of [shripadbhat/whisper-medium-bg](https://huggingface.co/shripadbhat/whisper-medium-bg) on the None dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.0001
21
+ - Wer: 0.0
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 1e-05
41
+ - train_batch_size: 1
42
+ - eval_batch_size: 8
43
+ - seed: 42
44
+ - gradient_accumulation_steps: 64
45
+ - total_train_batch_size: 64
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: linear
48
+ - training_steps: 50
49
+ - mixed_precision_training: Native AMP
50
+
51
+ ### Training results
52
+
53
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
54
+ |:-------------:|:-----:|:----:|:---------------:|:------:|
55
+ | 0.0008 | 3.0 | 5 | 0.0541 | 2.2222 |
56
+ | 0.0002 | 6.0 | 10 | 0.0016 | 0.0 |
57
+ | 0.0001 | 9.0 | 15 | 0.0003 | 0.0 |
58
+ | 0.0001 | 12.0 | 20 | 0.0002 | 0.0 |
59
+ | 0.0001 | 15.0 | 25 | 0.0001 | 0.0 |
60
+ | 0.0001 | 18.0 | 30 | 0.0001 | 0.0 |
61
+ | 0.0001 | 21.0 | 35 | 0.0001 | 0.0 |
62
+ | 0.0001 | 24.0 | 40 | 0.0001 | 0.0 |
63
+ | 0.0001 | 27.0 | 45 | 0.0001 | 0.0 |
64
+ | 0.0001 | 29.87 | 50 | 0.0001 | 0.0 |
65
+
66
+
67
+ ### Framework versions
68
+
69
+ - Transformers 4.35.2
70
+ - Pytorch 2.1.0+cu121
71
+ - Datasets 2.15.0
72
+ - Tokenizers 0.15.0
config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "shripadbhat/whisper-medium-bg",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "apply_spec_augment": false,
6
+ "architectures": [
7
+ "WhisperForConditionalGeneration"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "begin_suppress_tokens": [
11
+ 220,
12
+ 50257
13
+ ],
14
+ "bos_token_id": 50257,
15
+ "classifier_proj_size": 256,
16
+ "d_model": 1024,
17
+ "decoder_attention_heads": 16,
18
+ "decoder_ffn_dim": 4096,
19
+ "decoder_layerdrop": 0.0,
20
+ "decoder_layers": 24,
21
+ "decoder_start_token_id": 50258,
22
+ "dropout": 0.0,
23
+ "encoder_attention_heads": 16,
24
+ "encoder_ffn_dim": 4096,
25
+ "encoder_layerdrop": 0.0,
26
+ "encoder_layers": 24,
27
+ "eos_token_id": 50257,
28
+ "forced_decoder_ids": null,
29
+ "init_std": 0.02,
30
+ "is_encoder_decoder": true,
31
+ "mask_feature_length": 10,
32
+ "mask_feature_min_masks": 0,
33
+ "mask_feature_prob": 0.0,
34
+ "mask_time_length": 10,
35
+ "mask_time_min_masks": 2,
36
+ "mask_time_prob": 0.05,
37
+ "max_length": 448,
38
+ "max_source_positions": 1500,
39
+ "max_target_positions": 448,
40
+ "median_filter_width": 7,
41
+ "model_type": "whisper",
42
+ "num_hidden_layers": 24,
43
+ "num_mel_bins": 80,
44
+ "pad_token_id": 50257,
45
+ "scale_embedding": false,
46
+ "suppress_tokens": [],
47
+ "torch_dtype": "float32",
48
+ "transformers_version": "4.35.2",
49
+ "use_cache": false,
50
+ "use_weighted_layer_sum": false,
51
+ "vocab_size": 51865
52
+ }
generation_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "begin_suppress_tokens": [
3
+ 220,
4
+ 50257
5
+ ],
6
+ "bos_token_id": 50257,
7
+ "decoder_start_token_id": 50258,
8
+ "eos_token_id": 50257,
9
+ "max_length": 448,
10
+ "pad_token_id": 50257,
11
+ "return_timestamps": false,
12
+ "suppress_tokens": [],
13
+ "transformers_version": "4.35.2",
14
+ "use_cache": false
15
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40b88c0a048812c9385d26a4aed4468f1805b26bc6f225c9050ccb47591fa8e0
3
+ size 3055544304
preprocessor_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chunk_length": 30,
3
+ "feature_extractor_type": "WhisperFeatureExtractor",
4
+ "feature_size": 80,
5
+ "hop_length": 160,
6
+ "n_fft": 400,
7
+ "n_samples": 480000,
8
+ "nb_max_frames": 3000,
9
+ "padding_side": "right",
10
+ "padding_value": 0.0,
11
+ "processor_class": "WhisperProcessor",
12
+ "return_attention_mask": false,
13
+ "sampling_rate": 16000
14
+ }
runs/Dec22_08-32-19_1d25928f9346/events.out.tfevents.1703233945.1d25928f9346.1005.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f68e681486f673f562cda91d5e7dc87654966bbc045c22c655fa3f8494076bb
3
+ size 5055
runs/Dec22_08-32-32_1d25928f9346/events.out.tfevents.1703233956.1d25928f9346.1005.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7188ce64298f3d02a8244f76f380e5d2a9b128816ea3d717efd915ea366391f5
3
+ size 16223
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e7120454040ac52ff976b0096c6023c69a095f4b8de81589d7c5d6249d2adc3
3
+ size 4728