Upload tokenizer
Browse files- tokenizer.json +1 -6
- tokenizer_config.json +4 -0
tokenizer.json
CHANGED
@@ -1,11 +1,6 @@
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
-
"truncation":
|
4 |
-
"direction": "Right",
|
5 |
-
"max_length": 700,
|
6 |
-
"strategy": "LongestFirst",
|
7 |
-
"stride": 0
|
8 |
-
},
|
9 |
"padding": null,
|
10 |
"added_tokens": [
|
11 |
{
|
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
+
"truncation": null,
|
|
|
|
|
|
|
|
|
|
|
4 |
"padding": null,
|
5 |
"added_tokens": [
|
6 |
{
|
tokenizer_config.json
CHANGED
@@ -121,11 +121,15 @@
|
|
121 |
"clean_up_tokenization_spaces": false,
|
122 |
"eos_token": "<|endoftext|>",
|
123 |
"legacy": false,
|
|
|
124 |
"model_max_length": 131072,
|
125 |
"pad_token": "<|endoftext|>",
|
126 |
"padding_side": "right",
|
127 |
"sp_model_kwargs": {},
|
|
|
128 |
"tokenizer_class": "LlamaTokenizer",
|
|
|
|
|
129 |
"unk_token": "<unk>",
|
130 |
"use_default_system_prompt": false
|
131 |
}
|
|
|
121 |
"clean_up_tokenization_spaces": false,
|
122 |
"eos_token": "<|endoftext|>",
|
123 |
"legacy": false,
|
124 |
+
"max_length": 700,
|
125 |
"model_max_length": 131072,
|
126 |
"pad_token": "<|endoftext|>",
|
127 |
"padding_side": "right",
|
128 |
"sp_model_kwargs": {},
|
129 |
+
"stride": 0,
|
130 |
"tokenizer_class": "LlamaTokenizer",
|
131 |
+
"truncation_side": "right",
|
132 |
+
"truncation_strategy": "longest_first",
|
133 |
"unk_token": "<unk>",
|
134 |
"use_default_system_prompt": false
|
135 |
}
|