anton-l HF staff commited on
Commit
eccc472
1 Parent(s): a1e5cbc

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +38 -39
README.md CHANGED
@@ -1,11 +1,11 @@
1
  ---
2
- language:
3
  - sv-SE
4
-
5
  license: apache-2.0
6
  tags:
7
  - automatic-speech-recognition
8
  - robust-speech-event
 
9
  datasets:
10
  - mozilla-foundation/common_voice_8_0
11
  metrics:
@@ -14,45 +14,44 @@ metrics:
14
  model-index:
15
  - name: wav2vec2-xls-r-300m-swedish
16
  results:
17
- - task:
18
- type: automatic-speech-recognition # Required. Example: automatic-speech-recognition
19
- name: Speech Recognition # Optional. Example: Speech Recognition
20
  dataset:
21
- type: mozilla-foundation/common_voice_8_0 # Required. Example: common_voice. Use dataset id from https://hf.co/datasets
22
- name: Common Voice sv-SE # Required. Example: Common Voice zh-CN
23
- args: sv-SE # Optional. Example: zh-CN
24
  metrics:
25
- - type: wer # Required. Example: wer
26
- value: 24.73 # Required. Example: 20.90
27
- name: Test WER # Optional. Example: Test WER
28
- args:
29
- - learning_rate: 7.5e-05
30
- - train_batch_size: 64
31
- - eval_batch_size: 8
32
- - seed: 42
33
- - gradient_accumulation_steps: 2
34
- - total_train_batch_size: 128
35
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
36
- - lr_scheduler_type: linear
37
- - lr_scheduler_warmup_steps: 1000
38
- - num_epochs: 50
39
- - mixed_precision_training: Native AMP # Optional. Example for BLEU: max_order
40
- - type: cer # Required. Example: wer
41
- value: 7.58 # Required. Example: 20.90
42
- name: Test CER # Optional. Example: Test WER
43
- args:
44
- - learning_rate: 7.5e-05
45
- - train_batch_size: 64
46
- - eval_batch_size: 8
47
- - seed: 42
48
- - gradient_accumulation_steps: 2
49
- - total_train_batch_size: 128
50
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
51
- - lr_scheduler_type: linear
52
- - lr_scheduler_warmup_steps: 1000
53
- - num_epochs: 50
54
- - mixed_precision_training: Native AMP # Optional. Example for BLEU: max_order
55
-
56
  ---
57
 
58
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
1
  ---
2
+ language:
3
  - sv-SE
 
4
  license: apache-2.0
5
  tags:
6
  - automatic-speech-recognition
7
  - robust-speech-event
8
+ - hf-asr-leaderboard
9
  datasets:
10
  - mozilla-foundation/common_voice_8_0
11
  metrics:
 
14
  model-index:
15
  - name: wav2vec2-xls-r-300m-swedish
16
  results:
17
+ - task:
18
+ type: automatic-speech-recognition
19
+ name: Speech Recognition
20
  dataset:
21
+ type: mozilla-foundation/common_voice_8_0
22
+ name: Common Voice sv-SE
23
+ args: sv-SE
24
  metrics:
25
+ - type: wer
26
+ value: 24.73
27
+ name: Test WER
28
+ args:
29
+ - learning_rate: 7.5e-05
30
+ - train_batch_size: 64
31
+ - eval_batch_size: 8
32
+ - seed: 42
33
+ - gradient_accumulation_steps: 2
34
+ - total_train_batch_size: 128
35
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
36
+ - lr_scheduler_type: linear
37
+ - lr_scheduler_warmup_steps: 1000
38
+ - num_epochs: 50
39
+ - mixed_precision_training: Native AMP
40
+ - type: cer
41
+ value: 7.58
42
+ name: Test CER
43
+ args:
44
+ - learning_rate: 7.5e-05
45
+ - train_batch_size: 64
46
+ - eval_batch_size: 8
47
+ - seed: 42
48
+ - gradient_accumulation_steps: 2
49
+ - total_train_batch_size: 128
50
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
51
+ - lr_scheduler_type: linear
52
+ - lr_scheduler_warmup_steps: 1000
53
+ - num_epochs: 50
54
+ - mixed_precision_training: Native AMP
 
55
  ---
56
 
57
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You