add tokenizer
Browse files- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]"}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "tokenizer_class": "Wav2Vec2CTCTokenizer", "processor_class": "Wav2Vec2Processor"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"q": 0, "a": 1, "t": 2, "y": 3, "k": 4, "h": 5, "c": 6, "i": 7, "z": 8, "m": 9, "o": 10, "e": 11, "x": 12, "f": 13, "b": 14, "l": 15, "v": 16, "n": 18, "ü": 19, "d": 20, "p": 21, "j": 22, "ö": 23, "w": 24, "ä": 25, "s": 26, "r": 27, "g": 28, "u": 29, "|": 17, "[UNK]": 30, "[PAD]": 31}
|