kingabzpro
commited on
Commit
β’
617876e
1
Parent(s):
0c099ca
Upload lm-boosted decoder
Browse files- alphabet.json +1 -0
- eval.py +1 -1
- language_model/3gram.bin +3 -0
- language_model/attrs.json +1 -0
- language_model/unigrams.txt +0 -0
- preprocessor_config.json +1 -0
- tokenizer_config.json +1 -1
alphabet.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"labels": ["<s>", "", "</s>", "\u2047", " ", "\u0a02", "\u0a05", "\u0a06", "\u0a07", "\u0a08", "\u0a09", "\u0a0a", "\u0a0f", "\u0a10", "\u0a13", "\u0a14", "\u0a15", "\u0a16", "\u0a17", "\u0a18", "\u0a1a", "\u0a1b", "\u0a1c", "\u0a1d", "\u0a1f", "\u0a20", "\u0a21", "\u0a22", "\u0a23", "\u0a24", "\u0a25", "\u0a26", "\u0a27", "\u0a28", "\u0a2a", "\u0a2b", "\u0a2c", "\u0a2d", "\u0a2e", "\u0a2f", "\u0a30", "\u0a32", "\u0a33", "\u0a35", "\u0a36", "\u0a38", "\u0a39", "\u0a3c", "\u0a3e", "\u0a3f", "\u0a40", "\u0a41", "\u0a42", "\u0a47", "\u0a48", "\u0a4b", "\u0a4c", "\u0a4d", "\u0a59", "\u0a5a", "\u0a5b", "\u0a5c", "\u0a5e", "\u0a70", "\u0a71"], "is_bpe": false}
|
eval.py
CHANGED
@@ -50,7 +50,7 @@ def log_results(result: Dataset, args: Dict[str, str]):
|
|
50 |
def normalize_text(text: str) -> str:
|
51 |
"""DO ADAPT FOR YOUR USE CASE. this function normalizes the target text."""
|
52 |
|
53 |
-
chars_to_ignore_regex = '[
|
54 |
|
55 |
text = re.sub(chars_to_ignore_regex, "", text.lower())
|
56 |
|
|
|
50 |
def normalize_text(text: str) -> str:
|
51 |
"""DO ADAPT FOR YOUR USE CASE. this function normalizes the target text."""
|
52 |
|
53 |
+
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\β\%\β\β\οΏ½\'\Β€\β\β¦\β]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
|
54 |
|
55 |
text = re.sub(chars_to_ignore_regex, "", text.lower())
|
56 |
|
language_model/3gram.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5dc29e9312be0f490518804e8b308e8836eb6d7b4f63f95f9b0c169d8442aa1d
|
3 |
+
size 1141410
|
language_model/attrs.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
|
language_model/unigrams.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
preprocessor_config.json
CHANGED
@@ -4,6 +4,7 @@
|
|
4 |
"feature_size": 1,
|
5 |
"padding_side": "right",
|
6 |
"padding_value": 0,
|
|
|
7 |
"return_attention_mask": false,
|
8 |
"sampling_rate": 16000
|
9 |
}
|
|
|
4 |
"feature_size": 1,
|
5 |
"padding_side": "right",
|
6 |
"padding_value": 0,
|
7 |
+
"processor_class": "Wav2Vec2ProcessorWithLM",
|
8 |
"return_attention_mask": false,
|
9 |
"sampling_rate": 16000
|
10 |
}
|
tokenizer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": "/workspace/.cache/huggingface/transformers/93e627c1d485fcab8a79fcf898b021187013b29075034ece7e0e46dfa29292ec.9d6cd81ef646692fb1c169a880161ea1cb95f49694f220aced9b704b457e51dd", "tokenizer_file": null, "name_or_path": "
|
|
|
1 |
+
{"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": "/workspace/.cache/huggingface/transformers/93e627c1d485fcab8a79fcf898b021187013b29075034ece7e0e46dfa29292ec.9d6cd81ef646692fb1c169a880161ea1cb95f49694f220aced9b704b457e51dd", "tokenizer_file": null, "name_or_path": "kingabzpro/wav2vec2-large-xlsr-53-punjabi", "tokenizer_class": "Wav2Vec2CTCTokenizer", "processor_class": "Wav2Vec2ProcessorWithLM"}
|