{ "added_tokens_decoder": { "0": { "content": "[MASK]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "1": { "content": "[PAD]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "3": { "content": "[UNK]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "4": { "content": "[CLS]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "5": { "content": "[SEP]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "31002": { "content": "[INICIO]", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "31003": { "content": "[FIN]", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false } }, "clean_up_tokenization_spaces": true, "cls_token": "[CLS]", "do_basic_tokenize": true, "do_lower_case": false, "mask_token": "[MASK]", "model_max_length": 512, "never_split": null, "pad_token": "[PAD]", "sep_token": "[SEP]", "strip_accents": false, "tokenize_chinese_chars": true, "tokenizer_class": "BertTokenizer", "unk_token": "[UNK]" }