UmarRamzan commited on
Commit
e5c0fcf
·
verified ·
1 Parent(s): d5183eb

Upload tokenizer

Browse files
Files changed (4) hide show
  1. README.md +4 -4
  2. added_tokens.json +2 -2
  3. tokenizer_config.json +4 -5
  4. vocab.json +32 -33
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
  license: mit
3
- base_model: facebook/w2v-bert-2.0
4
  tags:
5
  - generated_from_trainer
 
6
  datasets:
7
  - common_voice_17_0
8
  metrics:
@@ -11,8 +11,8 @@ model-index:
11
  - name: w2v2-bert-urdu
12
  results:
13
  - task:
14
- name: Automatic Speech Recognition
15
  type: automatic-speech-recognition
 
16
  dataset:
17
  name: common_voice_17_0
18
  type: common_voice_17_0
@@ -20,9 +20,9 @@ model-index:
20
  split: test[:100]
21
  args: ur
22
  metrics:
23
- - name: Wer
24
- type: wer
25
  value: 0.6273224043715847
 
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
1
  ---
2
  license: mit
 
3
  tags:
4
  - generated_from_trainer
5
+ base_model: facebook/w2v-bert-2.0
6
  datasets:
7
  - common_voice_17_0
8
  metrics:
 
11
  - name: w2v2-bert-urdu
12
  results:
13
  - task:
 
14
  type: automatic-speech-recognition
15
+ name: Automatic Speech Recognition
16
  dataset:
17
  name: common_voice_17_0
18
  type: common_voice_17_0
 
20
  split: test[:100]
21
  args: ur
22
  metrics:
23
+ - type: wer
 
24
  value: 0.6273224043715847
25
+ name: Wer
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
added_tokens.json CHANGED
@@ -1,4 +1,4 @@
1
  {
2
- "</s>": 65,
3
- "<s>": 64
4
  }
 
1
  {
2
+ "</s>": 64,
3
+ "<s>": 63
4
  }
tokenizer_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "added_tokens_decoder": {
3
- "62": {
4
  "content": "[UNK]",
5
  "lstrip": true,
6
  "normalized": false,
@@ -8,7 +8,7 @@
8
  "single_word": false,
9
  "special": false
10
  },
11
- "63": {
12
  "content": "[PAD]",
13
  "lstrip": true,
14
  "normalized": false,
@@ -16,7 +16,7 @@
16
  "single_word": false,
17
  "special": false
18
  },
19
- "64": {
20
  "content": "<s>",
21
  "lstrip": false,
22
  "normalized": false,
@@ -24,7 +24,7 @@
24
  "single_word": false,
25
  "special": true
26
  },
27
- "65": {
28
  "content": "</s>",
29
  "lstrip": false,
30
  "normalized": false,
@@ -39,7 +39,6 @@
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "[PAD]",
42
- "processor_class": "Wav2Vec2BertProcessor",
43
  "replace_word_delimiter_char": " ",
44
  "target_lang": null,
45
  "tokenizer_class": "Wav2Vec2CTCTokenizer",
 
1
  {
2
  "added_tokens_decoder": {
3
+ "61": {
4
  "content": "[UNK]",
5
  "lstrip": true,
6
  "normalized": false,
 
8
  "single_word": false,
9
  "special": false
10
  },
11
+ "62": {
12
  "content": "[PAD]",
13
  "lstrip": true,
14
  "normalized": false,
 
16
  "single_word": false,
17
  "special": false
18
  },
19
+ "63": {
20
  "content": "<s>",
21
  "lstrip": false,
22
  "normalized": false,
 
24
  "single_word": false,
25
  "special": true
26
  },
27
+ "64": {
28
  "content": "</s>",
29
  "lstrip": false,
30
  "normalized": false,
 
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "[PAD]",
 
42
  "replace_word_delimiter_char": " ",
43
  "target_lang": null,
44
  "tokenizer_class": "Wav2Vec2CTCTokenizer",
vocab.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "[PAD]": 63,
3
- "[UNK]": 62,
4
  "|": 0,
5
  "أ": 1,
6
  "ؤ": 2,
@@ -32,35 +32,34 @@
32
  "ن": 28,
33
  "ه": 29,
34
  "و": 30,
35
- "ى": 31,
36
- "ي": 32,
37
- "ٹ": 33,
38
- "پ": 34,
39
- "چ": 35,
40
- "ڈ": 36,
41
- "ڑ": 37,
42
- "ژ": 38,
43
- "ک": 39,
44
- "گ": 40,
45
- "ں": 41,
46
- "ھ": 42,
47
- "ہ": 43,
48
- "ۂ": 44,
49
- "ۃ": 45,
50
- "ی": 46,
51
- "ے": 47,
52
- "ۓ": 48,
53
- "": 49,
54
- "": 50,
55
- "": 51,
56
- "": 52,
57
- "": 53,
58
- "": 54,
59
- "": 55,
60
- "": 56,
61
- "": 57,
62
- "": 58,
63
- "": 59,
64
- "": 60,
65
- "ﻮ": 61
66
  }
 
1
  {
2
+ "[PAD]": 62,
3
+ "[UNK]": 61,
4
  "|": 0,
5
  "أ": 1,
6
  "ؤ": 2,
 
32
  "ن": 28,
33
  "ه": 29,
34
  "و": 30,
35
+ "ي": 31,
36
+ "ٹ": 32,
37
+ "پ": 33,
38
+ "چ": 34,
39
+ "ڈ": 35,
40
+ "ڑ": 36,
41
+ "ژ": 37,
42
+ "ک": 38,
43
+ "گ": 39,
44
+ "ں": 40,
45
+ "ھ": 41,
46
+ "ہ": 42,
47
+ "ۂ": 43,
48
+ "ۃ": 44,
49
+ "ی": 45,
50
+ "ے": 46,
51
+ "ۓ": 47,
52
+ "": 48,
53
+ "": 49,
54
+ "": 50,
55
+ "": 51,
56
+ "": 52,
57
+ "": 53,
58
+ "": 54,
59
+ "": 55,
60
+ "": 56,
61
+ "": 57,
62
+ "": 58,
63
+ "": 59,
64
+ "": 60
 
65
  }