zjyhf commited on
Commit
26140d9
1 Parent(s): 1d88e17

The tokenizer adds a special token '<|im_end|>' to solve the problem of non-stop generation when encountering <|im_end|>.

Browse files

Using vllm to infer 'Llama3-ChatQA-1.5-70B', it will continue to be generated when encountering the special token '<|im_end|>', as shown in the figure below. This PR adds <|im_end|> to the tokenizer, and you need to add mapping to generation_config.json.
![8e4f01f676a0de25c1412b10172cfa9.png](https://cdn-uploads.huggingface.co/production/uploads/66161a077b605932bfbc106b/gwk4PQzYiutyeDvLBeHWo.png)

Files changed (1) hide show
  1. tokenizer.json +65 -6
tokenizer.json CHANGED
@@ -95,7 +95,7 @@
95
  },
96
  {
97
  "id": 128010,
98
- "content": "<|reserved_special_token_5|>",
99
  "single_word": false,
100
  "lstrip": false,
101
  "rstrip": false,
@@ -2329,10 +2329,69 @@
2329
  ]
2330
  },
2331
  "post_processor": {
2332
- "type": "ByteLevel",
2333
- "add_prefix_space": true,
2334
- "trim_offsets": false,
2335
- "use_regex": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2336
  },
2337
  "decoder": {
2338
  "type": "ByteLevel",
@@ -410501,4 +410560,4 @@
410501
  "éĶ ¦"
410502
  ]
410503
  }
410504
- }
 
95
  },
96
  {
97
  "id": 128010,
98
+ "content": "<|im_end|>",
99
  "single_word": false,
100
  "lstrip": false,
101
  "rstrip": false,
 
2329
  ]
2330
  },
2331
  "post_processor": {
2332
+ "type": "Sequence",
2333
+ "processors": [
2334
+ {
2335
+ "type": "ByteLevel",
2336
+ "add_prefix_space": true,
2337
+ "trim_offsets": false,
2338
+ "use_regex": true
2339
+ },
2340
+ {
2341
+ "type": "TemplateProcessing",
2342
+ "single": [
2343
+ {
2344
+ "SpecialToken": {
2345
+ "id": "<|begin_of_text|>",
2346
+ "type_id": 0
2347
+ }
2348
+ },
2349
+ {
2350
+ "Sequence": {
2351
+ "id": "A",
2352
+ "type_id": 0
2353
+ }
2354
+ }
2355
+ ],
2356
+ "pair": [
2357
+ {
2358
+ "SpecialToken": {
2359
+ "id": "<|begin_of_text|>",
2360
+ "type_id": 0
2361
+ }
2362
+ },
2363
+ {
2364
+ "Sequence": {
2365
+ "id": "A",
2366
+ "type_id": 0
2367
+ }
2368
+ },
2369
+ {
2370
+ "SpecialToken": {
2371
+ "id": "<|begin_of_text|>",
2372
+ "type_id": 1
2373
+ }
2374
+ },
2375
+ {
2376
+ "Sequence": {
2377
+ "id": "B",
2378
+ "type_id": 1
2379
+ }
2380
+ }
2381
+ ],
2382
+ "special_tokens": {
2383
+ "<|begin_of_text|>": {
2384
+ "id": "<|begin_of_text|>",
2385
+ "ids": [
2386
+ 128000
2387
+ ],
2388
+ "tokens": [
2389
+ "<|begin_of_text|>"
2390
+ ]
2391
+ }
2392
+ }
2393
+ }
2394
+ ]
2395
  },
2396
  "decoder": {
2397
  "type": "ByteLevel",
 
410560
  "éĶ ¦"
410561
  ]
410562
  }
410563
+ }