Ar4ikov commited on
Commit
ae3e31a
1 Parent(s): e079c3d

Upload tokenizer

Browse files
Files changed (4) hide show
  1. special_tokens_map.json +5 -4
  2. tokenizer.json +0 -0
  3. tokenizer_config.json +10 -10
  4. vocab.txt +0 -0
special_tokens_map.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
- "bos_token": "<s>",
3
- "eos_token": "</s>",
4
- "pad_token": "<pad>",
5
- "unk_token": "<unk>"
 
6
  }
 
1
  {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
  }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
- "bos_token": "<s>",
3
  "clean_up_tokenization_spaces": true,
4
- "do_lower_case": false,
5
- "eos_token": "</s>",
6
- "model_max_length": 1000000000000000019884624838656,
7
- "pad_token": "<pad>",
8
- "processor_class": "Wav2Vec2Processor",
9
- "replace_word_delimiter_char": " ",
10
- "tokenizer_class": "Wav2Vec2CTCTokenizer",
11
- "unk_token": "<unk>",
12
- "word_delimiter_token": "|"
 
13
  }
 
1
  {
 
2
  "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": true,
5
+ "mask_token": "[MASK]",
6
+ "model_max_length": 512,
7
+ "pad_token": "[PAD]",
8
+ "sep_token": "[SEP]",
9
+ "strip_accents": null,
10
+ "tokenize_chinese_chars": true,
11
+ "tokenizer_class": "BertTokenizer",
12
+ "unk_token": "[UNK]"
13
  }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff