Upload tokenizer
Browse files- added_tokens.json +1 -4
- special_tokens_map.json +1 -8
- tokenizer.json +0 -0
- tokenizer_config.json +2 -27
added_tokens.json
CHANGED
@@ -36,8 +36,5 @@
|
|
36 |
" ": 50260,
|
37 |
" ": 50259,
|
38 |
" ": 50258,
|
39 |
-
" ": 50257
|
40 |
-
"<PAD>": 50296,
|
41 |
-
"<|im_end|>": 50297,
|
42 |
-
"<|im_start|>": 50295
|
43 |
}
|
|
|
36 |
" ": 50260,
|
37 |
" ": 50259,
|
38 |
" ": 50258,
|
39 |
+
" ": 50257
|
|
|
|
|
|
|
40 |
}
|
special_tokens_map.json
CHANGED
@@ -7,19 +7,12 @@
|
|
7 |
"single_word": false
|
8 |
},
|
9 |
"eos_token": {
|
10 |
-
"content": "<|
|
11 |
"lstrip": false,
|
12 |
"normalized": false,
|
13 |
"rstrip": false,
|
14 |
"single_word": false
|
15 |
},
|
16 |
-
"pad_token": {
|
17 |
-
"content": "<PAD>",
|
18 |
-
"lstrip": false,
|
19 |
-
"normalized": true,
|
20 |
-
"rstrip": false,
|
21 |
-
"single_word": false
|
22 |
-
},
|
23 |
"unk_token": {
|
24 |
"content": "<|endoftext|>",
|
25 |
"lstrip": false,
|
|
|
7 |
"single_word": false
|
8 |
},
|
9 |
"eos_token": {
|
10 |
+
"content": "<|endoftext|>",
|
11 |
"lstrip": false,
|
12 |
"normalized": false,
|
13 |
"rstrip": false,
|
14 |
"single_word": false
|
15 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
"unk_token": {
|
17 |
"content": "<|endoftext|>",
|
18 |
"lstrip": false,
|
tokenizer.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -312,38 +312,13 @@
|
|
312 |
"rstrip": false,
|
313 |
"single_word": false,
|
314 |
"special": false
|
315 |
-
},
|
316 |
-
"50295": {
|
317 |
-
"content": "<|im_start|>",
|
318 |
-
"lstrip": false,
|
319 |
-
"normalized": true,
|
320 |
-
"rstrip": false,
|
321 |
-
"single_word": false,
|
322 |
-
"special": false
|
323 |
-
},
|
324 |
-
"50296": {
|
325 |
-
"content": "<PAD>",
|
326 |
-
"lstrip": false,
|
327 |
-
"normalized": true,
|
328 |
-
"rstrip": false,
|
329 |
-
"single_word": false,
|
330 |
-
"special": true
|
331 |
-
},
|
332 |
-
"50297": {
|
333 |
-
"content": "<|im_end|>",
|
334 |
-
"lstrip": false,
|
335 |
-
"normalized": false,
|
336 |
-
"rstrip": false,
|
337 |
-
"single_word": false,
|
338 |
-
"special": true
|
339 |
}
|
340 |
},
|
341 |
"bos_token": "<|endoftext|>",
|
342 |
-
"chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
343 |
"clean_up_tokenization_spaces": true,
|
344 |
-
"eos_token": "<|
|
345 |
"model_max_length": 2048,
|
346 |
-
"
|
347 |
"tokenizer_class": "CodeGenTokenizer",
|
348 |
"unk_token": "<|endoftext|>"
|
349 |
}
|
|
|
312 |
"rstrip": false,
|
313 |
"single_word": false,
|
314 |
"special": false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
315 |
}
|
316 |
},
|
317 |
"bos_token": "<|endoftext|>",
|
|
|
318 |
"clean_up_tokenization_spaces": true,
|
319 |
+
"eos_token": "<|endoftext|>",
|
320 |
"model_max_length": 2048,
|
321 |
+
"return_token_type_ids": false,
|
322 |
"tokenizer_class": "CodeGenTokenizer",
|
323 |
"unk_token": "<|endoftext|>"
|
324 |
}
|