zhiyuan8 commited on
Commit
d324e84
·
verified ·
1 Parent(s): 9a09de9

Update hf_rwkv_tokenizer.py

Browse files
Files changed (1) hide show
  1. hf_rwkv_tokenizer.py +279 -278
hf_rwkv_tokenizer.py CHANGED
@@ -1,278 +1,279 @@
1
- # coding=utf-8
2
- # Copyright 2024 The HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Tokenization classes for RWKV."""
16
-
17
- import os
18
- import re
19
- from typing import TYPE_CHECKING, List, Optional, Tuple
20
-
21
- from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
22
- from transformers.utils import logging
23
-
24
-
25
- if TYPE_CHECKING:
26
- pass
27
-
28
- logger = logging.get_logger(__name__)
29
-
30
-
31
- VOCAB_FILES_NAMES = {
32
- "vocab_file": "rwkv_vocab_v20230424.txt",
33
- }
34
-
35
- class TRIE:
36
- __slots__ = tuple("ch,to,values,front".split(","))
37
- to: list
38
- values: set
39
-
40
- def __init__(self, front=None, ch=None):
41
- self.ch = ch
42
- self.to = [None for ch in range(256)]
43
- self.values = set()
44
- self.front = front
45
-
46
- def __repr__(self):
47
- fr = self
48
- ret = []
49
- while fr != None:
50
- if fr.ch != None:
51
- ret.append(fr.ch)
52
- fr = fr.front
53
- return "<TRIE %s %s>" % (ret[::-1], self.values)
54
-
55
- def add(self, key: bytes, idx: int = 0, val=None):
56
- if idx == len(key):
57
- if val is None:
58
- val = key
59
- self.values.add(val)
60
- return self
61
- ch = key[idx]
62
- if self.to[ch] is None:
63
- self.to[ch] = TRIE(front=self, ch=ch)
64
- return self.to[ch].add(key, idx=idx + 1, val=val)
65
-
66
- def find_longest(self, key: bytes, idx: int = 0):
67
- u: TRIE = self
68
- ch: int = key[idx]
69
-
70
- while u.to[ch] is not None:
71
- u = u.to[ch]
72
- idx += 1
73
- if u.values:
74
- ret = idx, u, u.values
75
- if idx == len(key):
76
- break
77
- ch = key[idx]
78
- return ret
79
-
80
-
81
- class RWKV_TOKENIZER:
82
- def __init__(self, file_name):
83
- self.idx2token = {}
84
- sorted = [] # must be already sorted
85
- with open(file_name, "r", encoding="utf-8") as f:
86
- lines = f.readlines()
87
- for l in lines:
88
- idx = int(l[: l.index(" ")])
89
- x = eval(l[l.index(" ") : l.rindex(" ")])
90
- x = x.encode("utf-8") if isinstance(x, str) else x
91
- assert isinstance(x, bytes)
92
-
93
- assert len(x) == int(l[l.rindex(" ") :])
94
- sorted += [x]
95
- self.idx2token[idx] = x
96
-
97
- self.token2idx = {}
98
- for k, v in self.idx2token.items():
99
- self.token2idx[v] = int(k)
100
-
101
- self.root = TRIE()
102
- for t, i in self.token2idx.items():
103
- _ = self.root.add(t, val=(t, i))
104
-
105
- def encodeBytes(self, src: bytes):
106
- idx: int = 0
107
- tokens = []
108
- while idx < len(src):
109
- _idx: int = idx
110
- idx, _, values = self.root.find_longest(src, idx)
111
- assert idx != _idx
112
- _, token = next(iter(values))
113
- tokens.append(token)
114
- return tokens
115
-
116
- def decodeBytes(self, tokens):
117
- return b"".join(map(lambda i: self.idx2token[i], tokens))
118
-
119
- def encode(self, src):
120
- if isinstance(src, str):
121
- return [self.encodeBytes(src.encode("utf-8"))]
122
- elif isinstance(src, list):
123
- return [self.encodeBytes(s.encode("utf-8")) for s in src]
124
-
125
- def decode(self, tokens):
126
- return [self.decodeBytes(batch).decode("utf-8") for batch in tokens]
127
- # try:
128
- # return self.decodeBytes(tokens).decode('utf-8')
129
- # except:
130
- # return '\ufffd' # bad utf-8
131
-
132
- def printTokens(self, tokens):
133
- for i in tokens:
134
- s = self.idx2token[i]
135
- try:
136
- s = s.decode("utf-8")
137
- except:
138
- pass
139
- print(f"{repr(s)}{i}", end=" ")
140
- print()
141
-
142
-
143
- class RwkvTokenizer(PreTrainedTokenizer):
144
- vocab_files_names = VOCAB_FILES_NAMES
145
- model_input_names = ["input_ids", "attention_mask"]
146
-
147
- def __init__(
148
- self, vocab_file, bos_token="<|rwkv_tokenizer_end_of_text|>", eos_token="<|rwkv_tokenizer_end_of_text|>", unk_token="<|rwkv_tokenizer_end_of_text|>", **kwargs
149
- ):
150
- if not os.path.isfile(vocab_file):
151
- raise ValueError(
152
- f"Can't find a vocabulary file at path '{vocab_file}'."
153
- )
154
-
155
- with open(vocab_file, "r", encoding="utf-8") as reader:
156
- tokens = reader.readlines()
157
-
158
- if "add_bos_token" in kwargs:
159
- self.add_bos_token = kwargs["add_bos_token"]
160
- else:
161
- self.add_bos_token = False
162
- self.trie_tokenizer = RWKV_TOKENIZER(vocab_file)
163
- vocab = self.trie_tokenizer.token2idx
164
- self.encoder = vocab
165
- self.decoder = {v: k for k, v in vocab.items()}
166
- self._added_tokens_decoder = {0: AddedToken(str(bos_token))}
167
- super().__init__(
168
- bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs
169
- )
170
-
171
- @property
172
- def vocab_size(self):
173
- return len(self.encoder)
174
-
175
- def get_vocab(self):
176
- vocab = {str(self.convert_ids_to_tokens(i)): i for i in range(self.vocab_size)}
177
- vocab.update(self.added_tokens_encoder)
178
- return vocab
179
-
180
- def _tokenize(self, text, split_special_tokens=False):
181
- # return self.wordpiece_tokenizer.tokenize(text.encode("utf-8"))
182
- return self.trie_tokenizer.encode(text)[0]
183
-
184
- def _convert_token_to_id(self, token):
185
- return token
186
-
187
- def _convert_id_to_token(self, index):
188
- """Converts an index (integer) in a token (byte) using the vocab."""
189
- token = self.decoder.get(index, self.unk_token)
190
- if isinstance(token, (bytes)):
191
- token = token.decode("utf-8", errors="replace")
192
- return token
193
-
194
- def convert_tokens_to_string(self, tokens):
195
- """Converts a sequence of tokens (bytes) in a single string. Additional tokens are encoded to bytes"""
196
- out_string = b"".join(
197
- [k.encode(errors="replace") if isinstance(k, str) else k for k in tokens]
198
- ).decode("utf-8")
199
- return out_string
200
-
201
- def save_vocabulary(
202
- self, save_directory: str, filename_prefix: Optional[str] = None
203
- ) -> Tuple[str]:
204
- index = 0
205
- if os.path.isdir(save_directory):
206
- vocab_file = os.path.join(
207
- save_directory,
208
- (filename_prefix + "-" if filename_prefix else "") + "vocab.txt",
209
- )
210
- else:
211
- vocab_file = (
212
- filename_prefix + "-" if filename_prefix else ""
213
- ) + save_directory
214
- with open(vocab_file, "w", encoding="utf-8") as writer:
215
- for token, token_index in sorted(
216
- self.encoder.items(), key=lambda kv: kv[1]
217
- ):
218
- if index != token_index:
219
- logger.warning(
220
- f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
221
- " Please check that the vocabulary is not corrupted!"
222
- )
223
- index = token_index
224
- writer.write(str(token) + "\n")
225
- index += 1
226
- return (vocab_file,)
227
-
228
- def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
229
- if self.add_bos_token:
230
- bos_token_ids = [self.bos_token_id]
231
- else:
232
- bos_token_ids = []
233
-
234
- output = bos_token_ids + token_ids_0
235
-
236
- if token_ids_1 is None:
237
- return output
238
-
239
- return output + bos_token_ids + token_ids_1
240
-
241
- def get_special_tokens_mask(
242
- self,
243
- token_ids_0: List[int],
244
- token_ids_1: Optional[List[int]] = None,
245
- already_has_special_tokens: bool = False,
246
- ) -> List[int]:
247
- """
248
- Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
249
- special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
250
-
251
- Args:
252
- token_ids_0 (`List[int]`):
253
- List of IDs.
254
- token_ids_1 (`List[int]`, *optional*):
255
- Optional second list of IDs for sequence pairs.
256
- already_has_special_tokens (`bool`, *optional*, defaults to `False`):
257
- Whether or not the token list is already formatted with special tokens for the model.
258
-
259
- Returns:
260
- `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
261
- """
262
- if already_has_special_tokens:
263
- return super().get_special_tokens_mask(
264
- token_ids_0=token_ids_0,
265
- token_ids_1=token_ids_1,
266
- already_has_special_tokens=True,
267
- )
268
-
269
- if not self.add_bos_token:
270
- return super().get_special_tokens_mask(
271
- token_ids_0=token_ids_0,
272
- token_ids_1=token_ids_1,
273
- already_has_special_tokens=False,
274
- )
275
-
276
- if token_ids_1 is None:
277
- return [1] + ([0] * len(token_ids_0))
278
- return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for RWKV."""
16
+
17
+ import os
18
+ import re
19
+ from typing import TYPE_CHECKING, List, Optional, Tuple
20
+
21
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
22
+ from transformers.utils import logging
23
+
24
+
25
+ if TYPE_CHECKING:
26
+ pass
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ VOCAB_FILES_NAMES = {
32
+ "vocab_file": "rwkv_vocab_v20230424.txt",
33
+ }
34
+
35
+ class TRIE:
36
+ __slots__ = tuple("ch,to,values,front".split(","))
37
+ to: list
38
+ values: set
39
+
40
+ def __init__(self, front=None, ch=None):
41
+ self.ch = ch
42
+ self.to = [None for ch in range(256)]
43
+ self.values = set()
44
+ self.front = front
45
+
46
+ def __repr__(self):
47
+ fr = self
48
+ ret = []
49
+ while fr != None:
50
+ if fr.ch != None:
51
+ ret.append(fr.ch)
52
+ fr = fr.front
53
+ return "<TRIE %s %s>" % (ret[::-1], self.values)
54
+
55
+ def add(self, key: bytes, idx: int = 0, val=None):
56
+ if idx == len(key):
57
+ if val is None:
58
+ val = key
59
+ self.values.add(val)
60
+ return self
61
+ ch = key[idx]
62
+ if self.to[ch] is None:
63
+ self.to[ch] = TRIE(front=self, ch=ch)
64
+ return self.to[ch].add(key, idx=idx + 1, val=val)
65
+
66
+ def find_longest(self, key: bytes, idx: int = 0):
67
+ u: TRIE = self
68
+ ch: int = key[idx]
69
+
70
+ while u.to[ch] is not None:
71
+ u = u.to[ch]
72
+ idx += 1
73
+ if u.values:
74
+ ret = idx, u, u.values
75
+ if idx == len(key):
76
+ break
77
+ ch = key[idx]
78
+ return ret
79
+
80
+
81
+ class RWKV_TOKENIZER:
82
+ def __init__(self, file_name):
83
+ self.idx2token = {}
84
+ sorted = [] # must be already sorted
85
+ with open(file_name, "r", encoding="utf-8") as f:
86
+ lines = f.readlines()
87
+ for l in lines:
88
+ idx = int(l[: l.index(" ")])
89
+ x = eval(l[l.index(" ") : l.rindex(" ")])
90
+ x = x.encode("utf-8") if isinstance(x, str) else x
91
+ assert isinstance(x, bytes)
92
+
93
+ assert len(x) == int(l[l.rindex(" ") :])
94
+ sorted += [x]
95
+ self.idx2token[idx] = x
96
+
97
+ self.token2idx = {}
98
+ for k, v in self.idx2token.items():
99
+ self.token2idx[v] = int(k)
100
+
101
+ self.root = TRIE()
102
+ for t, i in self.token2idx.items():
103
+ _ = self.root.add(t, val=(t, i))
104
+
105
+ def encodeBytes(self, src: bytes):
106
+ idx: int = 0
107
+ tokens = []
108
+ while idx < len(src):
109
+ _idx: int = idx
110
+ idx, _, values = self.root.find_longest(src, idx)
111
+ assert idx != _idx
112
+ _, token = next(iter(values))
113
+ tokens.append(token)
114
+ return tokens
115
+
116
+ def decodeBytes(self, tokens):
117
+ return b"".join(map(lambda i: self.idx2token[i], tokens))
118
+
119
+ def encode(self, src):
120
+ if isinstance(src, str):
121
+ return [self.encodeBytes(src.encode("utf-8"))]
122
+ elif isinstance(src, list):
123
+ return [self.encodeBytes(s.encode("utf-8")) for s in src]
124
+
125
+ def decode(self, tokens):
126
+ return [self.decodeBytes(batch).decode("utf-8") for batch in tokens]
127
+ # try:
128
+ # return self.decodeBytes(tokens).decode('utf-8')
129
+ # except:
130
+ # return '\ufffd' # bad utf-8
131
+
132
+ def printTokens(self, tokens):
133
+ for i in tokens:
134
+ s = self.idx2token[i]
135
+ try:
136
+ s = s.decode("utf-8")
137
+ except:
138
+ pass
139
+ print(f"{repr(s)}{i}", end=" ")
140
+ print()
141
+
142
+
143
+ class RwkvTokenizer(PreTrainedTokenizer):
144
+ vocab_files_names = VOCAB_FILES_NAMES
145
+ model_input_names = ["input_ids", "attention_mask"]
146
+
147
+ def __init__(
148
+ self, vocab_file, bos_token="<|rwkv_tokenizer_end_of_text|>", eos_token="<|rwkv_tokenizer_end_of_text|>", unk_token="<|rwkv_tokenizer_end_of_text|>", **kwargs
149
+ ):
150
+ if not os.path.isfile(vocab_file):
151
+ raise ValueError(
152
+ f"Can't find a vocabulary file at path '{vocab_file}'."
153
+ )
154
+
155
+ with open(vocab_file, "r", encoding="utf-8") as reader:
156
+ tokens = reader.readlines()
157
+
158
+ if "add_bos_token" in kwargs:
159
+ self.add_bos_token = kwargs["add_bos_token"]
160
+ else:
161
+ self.add_bos_token = False
162
+ self.trie_tokenizer = RWKV_TOKENIZER(vocab_file)
163
+ vocab = self.trie_tokenizer.token2idx
164
+ self.encoder = vocab
165
+ self.decoder = {v: k for k, v in vocab.items()}
166
+ self._added_tokens_decoder = {0: AddedToken(str(bos_token))}
167
+ super().__init__(
168
+ bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs
169
+ )
170
+
171
+ @property
172
+ def vocab_size(self):
173
+ return len(self.encoder)
174
+
175
+ def get_vocab(self):
176
+ vocab = self.encoder
177
+ vocab.update(self.added_tokens_encoder)
178
+ vocab = dict(sorted(vocab.items(), key=lambda item: item[1]))
179
+ return vocab
180
+
181
+ def _tokenize(self, text, split_special_tokens=False):
182
+ # return self.wordpiece_tokenizer.tokenize(text.encode("utf-8"))
183
+ return self.trie_tokenizer.encode(text)[0]
184
+
185
+ def _convert_token_to_id(self, token):
186
+ return token
187
+
188
+ def _convert_id_to_token(self, index):
189
+ """Converts an index (integer) in a token (byte) using the vocab."""
190
+ token = self.decoder.get(index, self.unk_token)
191
+ if isinstance(token, (bytes)):
192
+ token = token.decode("utf-8", errors="replace")
193
+ return token
194
+
195
+ def convert_tokens_to_string(self, tokens):
196
+ """Converts a sequence of tokens (bytes) in a single string. Additional tokens are encoded to bytes"""
197
+ out_string = b"".join(
198
+ [k.encode(errors="replace") if isinstance(k, str) else k for k in tokens]
199
+ ).decode("utf-8")
200
+ return out_string
201
+
202
+ def save_vocabulary(
203
+ self, save_directory: str, filename_prefix: Optional[str] = None
204
+ ) -> Tuple[str]:
205
+ index = 0
206
+ if os.path.isdir(save_directory):
207
+ vocab_file = os.path.join(
208
+ save_directory,
209
+ (filename_prefix + "-" if filename_prefix else "") + "vocab.txt",
210
+ )
211
+ else:
212
+ vocab_file = (
213
+ filename_prefix + "-" if filename_prefix else ""
214
+ ) + save_directory
215
+ with open(vocab_file, "w", encoding="utf-8") as writer:
216
+ for token, token_index in sorted(
217
+ self.encoder.items(), key=lambda kv: kv[1]
218
+ ):
219
+ if index != token_index:
220
+ logger.warning(
221
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
222
+ " Please check that the vocabulary is not corrupted!"
223
+ )
224
+ index = token_index
225
+ writer.write(str(token) + "\n")
226
+ index += 1
227
+ return (vocab_file,)
228
+
229
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
230
+ if self.add_bos_token:
231
+ bos_token_ids = [self.bos_token_id]
232
+ else:
233
+ bos_token_ids = []
234
+
235
+ output = bos_token_ids + token_ids_0
236
+
237
+ if token_ids_1 is None:
238
+ return output
239
+
240
+ return output + bos_token_ids + token_ids_1
241
+
242
+ def get_special_tokens_mask(
243
+ self,
244
+ token_ids_0: List[int],
245
+ token_ids_1: Optional[List[int]] = None,
246
+ already_has_special_tokens: bool = False,
247
+ ) -> List[int]:
248
+ """
249
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
250
+ special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
251
+
252
+ Args:
253
+ token_ids_0 (`List[int]`):
254
+ List of IDs.
255
+ token_ids_1 (`List[int]`, *optional*):
256
+ Optional second list of IDs for sequence pairs.
257
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
258
+ Whether or not the token list is already formatted with special tokens for the model.
259
+
260
+ Returns:
261
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
262
+ """
263
+ if already_has_special_tokens:
264
+ return super().get_special_tokens_mask(
265
+ token_ids_0=token_ids_0,
266
+ token_ids_1=token_ids_1,
267
+ already_has_special_tokens=True,
268
+ )
269
+
270
+ if not self.add_bos_token:
271
+ return super().get_special_tokens_mask(
272
+ token_ids_0=token_ids_0,
273
+ token_ids_1=token_ids_1,
274
+ already_has_special_tokens=False,
275
+ )
276
+
277
+ if token_ids_1 is None:
278
+ return [1] + ([0] * len(token_ids_0))
279
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))