{ "tokenizer_class": "PreTrainedTokenizerFast", "model_type": "sentencepiece", "vocab_size": 50000, "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "sp_model_file": "odia_tokenizers_test.model" }