add tokenizer
Browse files- added_tokens.json +1 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<s>": 40, "</s>": 41}
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "special_tokens_map_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"t": 0, "w": 1, "l": 2, "y": 3, "o": 4, "f": 5, "n": 7, "i": 8, "p": 9, "c": 10, "z": 11, "v": 12, "k": 13, "ğ": 14, "r": 15, "e": 16, "ö": 17, "m": 18, "ş": 19, "d": 20, "ç": 21, "ü": 22, "h": 23, "j": 24, "x": 25, "q": 26, "â": 27, "ë": 28, "î": 29, "a": 30, "g": 31, "̇": 32, "ı": 33, "b": 34, "u": 35, "'": 36, "s": 37, "|": 6, "[UNK]": 38, "[PAD]": 39}
|