SpideyDLK commited on
Commit
2ba4893
1 Parent(s): 2f62eed

wav2vec2-large-xls-r-300m-sinhala-aug-data-with-original-split-part3

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ language_model/5gram_correct.arpa filter=lfs diff=lfs merge=lfs -text
alphabet.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"labels": ["\t", " ", "\u0d82", "\u0d83", "\u0d85", "\u0d86", "\u0d87", "\u0d88", "\u0d89", "\u0d8a", "\u0d8b", "\u0d8c", "\u0d8d", "\u0d91", "\u0d92", "\u0d93", "\u0d94", "\u0d95", "\u0d96", "\u0d9a", "\u0d9b", "\u0d9c", "\u0d9d", "\u0d9e", "\u0d9f", "\u0da0", "\u0da1", "\u0da2", "\u0da4", "\u0da5", "\u0da7", "\u0da8", "\u0da9", "\u0daa", "\u0dab", "\u0dac", "\u0dad", "\u0dae", "\u0daf", "\u0db0", "\u0db1", "\u0db3", "\u0db4", "\u0db5", "\u0db6", "\u0db7", "\u0db8", "\u0db9", "\u0dba", "\u0dbb", "\u0dbd", "\u0dc0", "\u0dc1", "\u0dc2", "\u0dc3", "\u0dc4", "\u0dc5", "\u0dc6", "\u0dca", "\u0dcf", "\u0dd0", "\u0dd1", "\u0dd2", "\u0dd3", "\u0dd4", "\u0dd6", "\u0dd8", "\u0dd9", "\u0dda", "\u0ddb", "\u0ddc", "\u0ddd", "\u0dde", "\u0df2", "\u200c", "\u200d", "\u2047", "", "<s>", "</s>"], "is_bpe": false}
language_model/5gram_correct.arpa ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59446d222f9407bbf6892b694dd47070641e33c5ebea878e0310d06814ba88ea
3
+ size 288814064
language_model/attrs.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
language_model/unigrams.txt ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json CHANGED
@@ -4,6 +4,7 @@
4
  "feature_size": 1,
5
  "padding_side": "right",
6
  "padding_value": 0.0,
 
7
  "return_attention_mask": true,
8
  "sampling_rate": 16000
9
  }
 
4
  "feature_size": 1,
5
  "padding_side": "right",
6
  "padding_value": 0.0,
7
+ "processor_class": "Wav2Vec2ProcessorWithLM",
8
  "return_attention_mask": true,
9
  "sampling_rate": 16000
10
  }
special_tokens_map.json CHANGED
@@ -1,4 +1,20 @@
1
  {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  "bos_token": "<s>",
3
  "eos_token": "</s>",
4
  "pad_token": "[PAD]",
 
1
  {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": true,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "</s>",
12
+ "lstrip": false,
13
+ "normalized": true,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ }
17
+ ],
18
  "bos_token": "<s>",
19
  "eos_token": "</s>",
20
  "pad_token": "[PAD]",
tokenizer_config.json CHANGED
@@ -39,6 +39,7 @@
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "[PAD]",
 
42
  "replace_word_delimiter_char": " ",
43
  "target_lang": null,
44
  "tokenizer_class": "Wav2Vec2CTCTokenizer",
 
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "[PAD]",
42
+ "processor_class": "Wav2Vec2ProcessorWithLM",
43
  "replace_word_delimiter_char": " ",
44
  "target_lang": null,
45
  "tokenizer_class": "Wav2Vec2CTCTokenizer",