{ "add_bos_token": true, "add_eos_token": false, "add_prefix_space": null, "added_tokens_decoder": { "32000": { "content": "õ", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32001": { "content": "÷", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32002": { "content": "Á", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32003": { "content": "ý", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32004": { "content": "À", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32005": { "content": "ÿ", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32006": { "content": "ø", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32007": { "content": "ú", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32008": { "content": "þ", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32009": { "content": "ü", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32010": { "content": "ù", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32011": { "content": "ö", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32012": { "content": "û", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32013": { "content": "<|begin▁of▁sentence|>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": true }, "32014": { "content": "<|end▁of▁sentence|>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": true }, "32015": { "content": "<|fim▁hole|>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32016": { "content": "<|fim▁begin|>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32017": { "content": "<|fim▁end|>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32018": { "content": "", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32019": { "content": "<|User|>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32020": { "content": "<|Assistant|>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": false }, "32021": { "content": "<|EOT|>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": true } }, "bos_token": "<|begin▁of▁sentence|>", "chat_template": "{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}", "clean_up_tokenization_spaces": false, "eos_token": "<|EOT|>", "legacy": true, "model_max_length": 16384, "pad_token": "<|end▁of▁sentence|>", "padding_side": "left", "sp_model_kwargs": {}, "tokenizer_class": "LlamaTokenizer", "unk_token": null, "use_default_system_prompt": false }