Leyo commited on
Commit
f868177
1 Parent(s): 8737bed

Upload tokenizer

Browse files
added_tokens.json CHANGED
@@ -1,4 +1,7 @@
1
  {
 
2
  "<fake_token_around_image>": 32000,
3
- "<image>": 32001
 
 
4
  }
 
1
  {
2
+ "</s>": 2,
3
  "<fake_token_around_image>": 32000,
4
+ "<image>": 32001,
5
+ "<s>": 1,
6
+ "<unk>": 0
7
  }
special_tokens_map.json CHANGED
@@ -1,40 +1,10 @@
1
  {
2
  "additional_special_tokens": [
3
- {
4
- "content": "<fake_token_around_image>",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false
9
- },
10
- {
11
- "content": "<image>",
12
- "lstrip": false,
13
- "normalized": false,
14
- "rstrip": false,
15
- "single_word": false
16
- }
17
  ],
18
- "bos_token": {
19
- "content": "<s>",
20
- "lstrip": false,
21
- "normalized": false,
22
- "rstrip": false,
23
- "single_word": false
24
- },
25
- "eos_token": {
26
- "content": "</s>",
27
- "lstrip": false,
28
- "normalized": false,
29
- "rstrip": false,
30
- "single_word": false
31
- },
32
  "pad_token": "<unk>",
33
- "unk_token": {
34
- "content": "<unk>",
35
- "lstrip": false,
36
- "normalized": false,
37
- "rstrip": false,
38
- "single_word": false
39
- }
40
  }
 
1
  {
2
  "additional_special_tokens": [
3
+ "<fake_token_around_image>",
4
+ "<image>"
 
 
 
 
 
 
 
 
 
 
 
 
5
  ],
6
+ "bos_token": "<s>",
7
+ "eos_token": "</s>",
 
 
 
 
 
 
 
 
 
 
 
 
8
  "pad_token": "<unk>",
9
+ "unk_token": "<unk>"
 
 
 
 
 
 
10
  }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,35 +1,61 @@
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
- "bos_token": {
5
- "__type": "AddedToken",
6
- "content": "<s>",
7
- "lstrip": false,
8
- "normalized": false,
9
- "rstrip": false,
10
- "single_word": false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  },
 
 
 
 
 
12
  "clean_up_tokenization_spaces": false,
13
- "eos_token": {
14
- "__type": "AddedToken",
15
- "content": "</s>",
16
- "lstrip": false,
17
- "normalized": false,
18
- "rstrip": false,
19
- "single_word": false
20
- },
21
  "legacy": true,
22
  "model_max_length": 2048,
23
- "pad_token": null,
24
  "sp_model_kwargs": {},
25
- "special_tokens_map_file": "/Users/leotronchon/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/special_tokens_map.json",
26
  "tokenizer_class": "LlamaTokenizer",
27
- "unk_token": {
28
- "__type": "AddedToken",
29
- "content": "<unk>",
30
- "lstrip": false,
31
- "normalized": false,
32
- "rstrip": false,
33
- "single_word": false
34
- }
35
  }
 
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32000": {
30
+ "content": "<fake_token_around_image>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "32001": {
38
+ "content": "<image>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ }
45
  },
46
+ "additional_special_tokens": [
47
+ "<fake_token_around_image>",
48
+ "<image>"
49
+ ],
50
+ "bos_token": "<s>",
51
  "clean_up_tokenization_spaces": false,
52
+ "eos_token": "</s>",
 
 
 
 
 
 
 
53
  "legacy": true,
54
  "model_max_length": 2048,
55
+ "pad_token": "<unk>",
56
  "sp_model_kwargs": {},
57
+ "spaces_between_special_tokens": false,
58
  "tokenizer_class": "LlamaTokenizer",
59
+ "unk_token": "<unk>",
60
+ "use_default_system_prompt": true
 
 
 
 
 
 
61
  }