yzimmermann commited on
Commit
1806a96
·
verified ·
1 Parent(s): bd5498d

Upload tokenizer

Browse files
special_tokens_map.json CHANGED
@@ -9,7 +9,7 @@
9
  "cls_token": {
10
  "content": "[CLS]",
11
  "lstrip": false,
12
- "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
@@ -30,21 +30,21 @@
30
  "pad_token": {
31
  "content": "[PAD]",
32
  "lstrip": false,
33
- "normalized": false,
34
  "rstrip": false,
35
  "single_word": false
36
  },
37
  "sep_token": {
38
  "content": "[SEP]",
39
  "lstrip": false,
40
- "normalized": false,
41
  "rstrip": false,
42
  "single_word": false
43
  },
44
  "unk_token": {
45
  "content": "[UNK]",
46
  "lstrip": false,
47
- "normalized": false,
48
  "rstrip": false,
49
  "single_word": false
50
  }
 
9
  "cls_token": {
10
  "content": "[CLS]",
11
  "lstrip": false,
12
+ "normalized": true,
13
  "rstrip": false,
14
  "single_word": false
15
  },
 
30
  "pad_token": {
31
  "content": "[PAD]",
32
  "lstrip": false,
33
+ "normalized": true,
34
  "rstrip": false,
35
  "single_word": false
36
  },
37
  "sep_token": {
38
  "content": "[SEP]",
39
  "lstrip": false,
40
+ "normalized": true,
41
  "rstrip": false,
42
  "single_word": false
43
  },
44
  "unk_token": {
45
  "content": "[UNK]",
46
  "lstrip": false,
47
+ "normalized": true,
48
  "rstrip": false,
49
  "single_word": false
50
  }
tokenizer.json CHANGED
@@ -23,7 +23,7 @@
23
  "single_word": false,
24
  "lstrip": false,
25
  "rstrip": false,
26
- "normalized": false,
27
  "special": true
28
  },
29
  {
@@ -32,7 +32,7 @@
32
  "single_word": false,
33
  "lstrip": false,
34
  "rstrip": false,
35
- "normalized": false,
36
  "special": true
37
  },
38
  {
@@ -41,7 +41,7 @@
41
  "single_word": false,
42
  "lstrip": false,
43
  "rstrip": false,
44
- "normalized": false,
45
  "special": true
46
  },
47
  {
@@ -50,7 +50,7 @@
50
  "single_word": false,
51
  "lstrip": false,
52
  "rstrip": false,
53
- "normalized": false,
54
  "special": true
55
  },
56
  {
 
23
  "single_word": false,
24
  "lstrip": false,
25
  "rstrip": false,
26
+ "normalized": true,
27
  "special": true
28
  },
29
  {
 
32
  "single_word": false,
33
  "lstrip": false,
34
  "rstrip": false,
35
+ "normalized": true,
36
  "special": true
37
  },
38
  {
 
41
  "single_word": false,
42
  "lstrip": false,
43
  "rstrip": false,
44
+ "normalized": true,
45
  "special": true
46
  },
47
  {
 
50
  "single_word": false,
51
  "lstrip": false,
52
  "rstrip": false,
53
+ "normalized": true,
54
  "special": true
55
  },
56
  {
tokenizer_config.json CHANGED
@@ -4,7 +4,7 @@
4
  "0": {
5
  "content": "[PAD]",
6
  "lstrip": false,
7
- "normalized": false,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
@@ -12,7 +12,7 @@
12
  "11": {
13
  "content": "[UNK]",
14
  "lstrip": false,
15
- "normalized": false,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
@@ -20,7 +20,7 @@
20
  "12": {
21
  "content": "[CLS]",
22
  "lstrip": false,
23
- "normalized": false,
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
@@ -28,7 +28,7 @@
28
  "13": {
29
  "content": "[SEP]",
30
  "lstrip": false,
31
- "normalized": false,
32
  "rstrip": false,
33
  "single_word": false,
34
  "special": true
@@ -65,8 +65,8 @@
65
  "errors": "replace",
66
  "full_tokenizer_file": null,
67
  "mask_token": "[MASK]",
68
- "max_len": 512,
69
- "model_max_length": 512,
70
  "pad_token": "[PAD]",
71
  "sep_token": "[SEP]",
72
  "tokenizer_class": "RobertaTokenizer",
 
4
  "0": {
5
  "content": "[PAD]",
6
  "lstrip": false,
7
+ "normalized": true,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
 
12
  "11": {
13
  "content": "[UNK]",
14
  "lstrip": false,
15
+ "normalized": true,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
 
20
  "12": {
21
  "content": "[CLS]",
22
  "lstrip": false,
23
+ "normalized": true,
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
 
28
  "13": {
29
  "content": "[SEP]",
30
  "lstrip": false,
31
+ "normalized": true,
32
  "rstrip": false,
33
  "single_word": false,
34
  "special": true
 
65
  "errors": "replace",
66
  "full_tokenizer_file": null,
67
  "mask_token": "[MASK]",
68
+ "max_len": 514,
69
+ "model_max_length": 514,
70
  "pad_token": "[PAD]",
71
  "sep_token": "[SEP]",
72
  "tokenizer_class": "RobertaTokenizer",