{ "tokenizer_class": "PreTrainedTokenizerFast", "model_type": "sentencepiece", "vocab_size": 50000, "unk_token": "", "bos_token": "", "eos_token": "", "pad_token": "", "sp_model_file": "odia_tokenizers_test.model" }