Spaces:
Sleeping
Sleeping
Pradeep Kumar
commited on
Commit
•
3744210
1
Parent(s):
d6fca6a
Upload tokenization.py
Browse files- tokenization.py +541 -0
tokenization.py
ADDED
@@ -0,0 +1,541 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
# coding=utf-8
|
16 |
+
"""Tokenization classes implementation.
|
17 |
+
|
18 |
+
The file is forked from:
|
19 |
+
https://github.com/google-research/bert/blob/master/tokenization.py.
|
20 |
+
"""
|
21 |
+
|
22 |
+
import collections
|
23 |
+
import re
|
24 |
+
import unicodedata
|
25 |
+
|
26 |
+
import six
|
27 |
+
import tensorflow as tf, tf_keras
|
28 |
+
|
29 |
+
import sentencepiece as spm
|
30 |
+
|
31 |
+
SPIECE_UNDERLINE = "▁"
|
32 |
+
|
33 |
+
|
34 |
+
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
|
35 |
+
"""Checks whether the casing config is consistent with the checkpoint name."""
|
36 |
+
|
37 |
+
# The casing has to be passed in by the user and there is no explicit check
|
38 |
+
# as to whether it matches the checkpoint. The casing information probably
|
39 |
+
# should have been stored in the bert_config.json file, but it's not, so
|
40 |
+
# we have to heuristically detect it to validate.
|
41 |
+
|
42 |
+
if not init_checkpoint:
|
43 |
+
return
|
44 |
+
|
45 |
+
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
|
46 |
+
if m is None:
|
47 |
+
return
|
48 |
+
|
49 |
+
model_name = m.group(1)
|
50 |
+
|
51 |
+
lower_models = [
|
52 |
+
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
|
53 |
+
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
|
54 |
+
]
|
55 |
+
|
56 |
+
cased_models = [
|
57 |
+
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
|
58 |
+
"multi_cased_L-12_H-768_A-12"
|
59 |
+
]
|
60 |
+
|
61 |
+
is_bad_config = False
|
62 |
+
if model_name in lower_models and not do_lower_case:
|
63 |
+
is_bad_config = True
|
64 |
+
actual_flag = "False"
|
65 |
+
case_name = "lowercased"
|
66 |
+
opposite_flag = "True"
|
67 |
+
|
68 |
+
if model_name in cased_models and do_lower_case:
|
69 |
+
is_bad_config = True
|
70 |
+
actual_flag = "True"
|
71 |
+
case_name = "cased"
|
72 |
+
opposite_flag = "False"
|
73 |
+
|
74 |
+
if is_bad_config:
|
75 |
+
raise ValueError(
|
76 |
+
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
|
77 |
+
"However, `%s` seems to be a %s model, so you "
|
78 |
+
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
|
79 |
+
"how the model was pre-training. If this error is wrong, please "
|
80 |
+
"just comment out this check." %
|
81 |
+
(actual_flag, init_checkpoint, model_name, case_name, opposite_flag))
|
82 |
+
|
83 |
+
|
84 |
+
def convert_to_unicode(text):
|
85 |
+
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
|
86 |
+
if six.PY3:
|
87 |
+
if isinstance(text, str):
|
88 |
+
return text
|
89 |
+
elif isinstance(text, bytes):
|
90 |
+
return text.decode("utf-8", "ignore")
|
91 |
+
else:
|
92 |
+
raise ValueError("Unsupported string type: %s" % (type(text)))
|
93 |
+
elif six.PY2:
|
94 |
+
if isinstance(text, str):
|
95 |
+
return text.decode("utf-8", "ignore")
|
96 |
+
elif isinstance(text, unicode):
|
97 |
+
return text
|
98 |
+
else:
|
99 |
+
raise ValueError("Unsupported string type: %s" % (type(text)))
|
100 |
+
else:
|
101 |
+
raise ValueError("Not running on Python2 or Python 3?")
|
102 |
+
|
103 |
+
|
104 |
+
def printable_text(text):
|
105 |
+
"""Returns text encoded in a way suitable for print or `tf.logging`."""
|
106 |
+
|
107 |
+
# These functions want `str` for both Python2 and Python3, but in one case
|
108 |
+
# it's a Unicode string and in the other it's a byte string.
|
109 |
+
if six.PY3:
|
110 |
+
if isinstance(text, str):
|
111 |
+
return text
|
112 |
+
elif isinstance(text, bytes):
|
113 |
+
return text.decode("utf-8", "ignore")
|
114 |
+
else:
|
115 |
+
raise ValueError("Unsupported string type: %s" % (type(text)))
|
116 |
+
elif six.PY2:
|
117 |
+
if isinstance(text, str):
|
118 |
+
return text
|
119 |
+
elif isinstance(text, unicode):
|
120 |
+
return text.encode("utf-8")
|
121 |
+
else:
|
122 |
+
raise ValueError("Unsupported string type: %s" % (type(text)))
|
123 |
+
else:
|
124 |
+
raise ValueError("Not running on Python2 or Python 3?")
|
125 |
+
|
126 |
+
|
127 |
+
def load_vocab(vocab_file):
|
128 |
+
"""Loads a vocabulary file into a dictionary."""
|
129 |
+
vocab = collections.OrderedDict()
|
130 |
+
index = 0
|
131 |
+
with tf.io.gfile.GFile(vocab_file, "r") as reader:
|
132 |
+
while True:
|
133 |
+
token = convert_to_unicode(reader.readline())
|
134 |
+
if not token:
|
135 |
+
break
|
136 |
+
token = token.strip()
|
137 |
+
vocab[token] = index
|
138 |
+
index += 1
|
139 |
+
return vocab
|
140 |
+
|
141 |
+
|
142 |
+
def convert_by_vocab(vocab, items):
|
143 |
+
"""Converts a sequence of [tokens|ids] using the vocab."""
|
144 |
+
output = []
|
145 |
+
for item in items:
|
146 |
+
output.append(vocab[item])
|
147 |
+
return output
|
148 |
+
|
149 |
+
|
150 |
+
def convert_tokens_to_ids(vocab, tokens):
|
151 |
+
return convert_by_vocab(vocab, tokens)
|
152 |
+
|
153 |
+
|
154 |
+
def convert_ids_to_tokens(inv_vocab, ids):
|
155 |
+
return convert_by_vocab(inv_vocab, ids)
|
156 |
+
|
157 |
+
|
158 |
+
def whitespace_tokenize(text):
|
159 |
+
"""Runs basic whitespace cleaning and splitting on a piece of text."""
|
160 |
+
text = text.strip()
|
161 |
+
if not text:
|
162 |
+
return []
|
163 |
+
tokens = text.split()
|
164 |
+
return tokens
|
165 |
+
|
166 |
+
|
167 |
+
class FullTokenizer(object):
|
168 |
+
"""Runs end-to-end tokenziation."""
|
169 |
+
|
170 |
+
def __init__(self, vocab_file, do_lower_case=True, split_on_punc=True):
|
171 |
+
self.vocab = load_vocab(vocab_file)
|
172 |
+
self.inv_vocab = {v: k for k, v in self.vocab.items()}
|
173 |
+
self.basic_tokenizer = BasicTokenizer(
|
174 |
+
do_lower_case=do_lower_case, split_on_punc=split_on_punc)
|
175 |
+
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
|
176 |
+
|
177 |
+
def tokenize(self, text):
|
178 |
+
split_tokens = []
|
179 |
+
for token in self.basic_tokenizer.tokenize(text):
|
180 |
+
for sub_token in self.wordpiece_tokenizer.tokenize(token):
|
181 |
+
split_tokens.append(sub_token)
|
182 |
+
|
183 |
+
return split_tokens
|
184 |
+
|
185 |
+
def convert_tokens_to_ids(self, tokens):
|
186 |
+
return convert_by_vocab(self.vocab, tokens)
|
187 |
+
|
188 |
+
def convert_ids_to_tokens(self, ids):
|
189 |
+
return convert_by_vocab(self.inv_vocab, ids)
|
190 |
+
|
191 |
+
|
192 |
+
class BasicTokenizer(object):
|
193 |
+
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
|
194 |
+
|
195 |
+
def __init__(self, do_lower_case=True, split_on_punc=True):
|
196 |
+
"""Constructs a BasicTokenizer.
|
197 |
+
|
198 |
+
Args:
|
199 |
+
do_lower_case: Whether to lower case the input.
|
200 |
+
split_on_punc: Whether to apply split on punctuations. By default BERT
|
201 |
+
starts a new token for punctuations. This makes detokenization difficult
|
202 |
+
for tasks like seq2seq decoding.
|
203 |
+
"""
|
204 |
+
self.do_lower_case = do_lower_case
|
205 |
+
self.split_on_punc = split_on_punc
|
206 |
+
|
207 |
+
def tokenize(self, text):
|
208 |
+
"""Tokenizes a piece of text."""
|
209 |
+
text = convert_to_unicode(text)
|
210 |
+
text = self._clean_text(text)
|
211 |
+
|
212 |
+
# This was added on November 1st, 2018 for the multilingual and Chinese
|
213 |
+
# models. This is also applied to the English models now, but it doesn't
|
214 |
+
# matter since the English models were not trained on any Chinese data
|
215 |
+
# and generally don't have any Chinese data in them (there are Chinese
|
216 |
+
# characters in the vocabulary because Wikipedia does have some Chinese
|
217 |
+
# words in the English Wikipedia.).
|
218 |
+
text = self._tokenize_chinese_chars(text)
|
219 |
+
|
220 |
+
orig_tokens = whitespace_tokenize(text)
|
221 |
+
split_tokens = []
|
222 |
+
for token in orig_tokens:
|
223 |
+
if self.do_lower_case:
|
224 |
+
token = token.lower()
|
225 |
+
token = self._run_strip_accents(token)
|
226 |
+
if self.split_on_punc:
|
227 |
+
split_tokens.extend(self._run_split_on_punc(token))
|
228 |
+
else:
|
229 |
+
split_tokens.append(token)
|
230 |
+
|
231 |
+
output_tokens = whitespace_tokenize(" ".join(split_tokens))
|
232 |
+
return output_tokens
|
233 |
+
|
234 |
+
def _run_strip_accents(self, text):
|
235 |
+
"""Strips accents from a piece of text."""
|
236 |
+
text = unicodedata.normalize("NFD", text)
|
237 |
+
output = []
|
238 |
+
for char in text:
|
239 |
+
cat = unicodedata.category(char)
|
240 |
+
if cat == "Mn":
|
241 |
+
continue
|
242 |
+
output.append(char)
|
243 |
+
return "".join(output)
|
244 |
+
|
245 |
+
def _run_split_on_punc(self, text):
|
246 |
+
"""Splits punctuation on a piece of text."""
|
247 |
+
chars = list(text)
|
248 |
+
i = 0
|
249 |
+
start_new_word = True
|
250 |
+
output = []
|
251 |
+
while i < len(chars):
|
252 |
+
char = chars[i]
|
253 |
+
if _is_punctuation(char):
|
254 |
+
output.append([char])
|
255 |
+
start_new_word = True
|
256 |
+
else:
|
257 |
+
if start_new_word:
|
258 |
+
output.append([])
|
259 |
+
start_new_word = False
|
260 |
+
output[-1].append(char)
|
261 |
+
i += 1
|
262 |
+
|
263 |
+
return ["".join(x) for x in output]
|
264 |
+
|
265 |
+
def _tokenize_chinese_chars(self, text):
|
266 |
+
"""Adds whitespace around any CJK character."""
|
267 |
+
output = []
|
268 |
+
for char in text:
|
269 |
+
cp = ord(char)
|
270 |
+
if self._is_chinese_char(cp):
|
271 |
+
output.append(" ")
|
272 |
+
output.append(char)
|
273 |
+
output.append(" ")
|
274 |
+
else:
|
275 |
+
output.append(char)
|
276 |
+
return "".join(output)
|
277 |
+
|
278 |
+
def _is_chinese_char(self, cp):
|
279 |
+
"""Checks whether CP is the codepoint of a CJK character."""
|
280 |
+
# This defines a "chinese character" as anything in the CJK Unicode block:
|
281 |
+
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
|
282 |
+
#
|
283 |
+
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
|
284 |
+
# despite its name. The modern Korean Hangul alphabet is a different block,
|
285 |
+
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
|
286 |
+
# space-separated words, so they are not treated specially and handled
|
287 |
+
# like the all of the other languages.
|
288 |
+
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
|
289 |
+
(cp >= 0x3400 and cp <= 0x4DBF) or #
|
290 |
+
(cp >= 0x20000 and cp <= 0x2A6DF) or #
|
291 |
+
(cp >= 0x2A700 and cp <= 0x2B73F) or #
|
292 |
+
(cp >= 0x2B740 and cp <= 0x2B81F) or #
|
293 |
+
(cp >= 0x2B820 and cp <= 0x2CEAF) or
|
294 |
+
(cp >= 0xF900 and cp <= 0xFAFF) or #
|
295 |
+
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
|
296 |
+
return True
|
297 |
+
|
298 |
+
return False
|
299 |
+
|
300 |
+
def _clean_text(self, text):
|
301 |
+
"""Performs invalid character removal and whitespace cleanup on text."""
|
302 |
+
output = []
|
303 |
+
for char in text:
|
304 |
+
cp = ord(char)
|
305 |
+
if cp == 0 or cp == 0xfffd or _is_control(char):
|
306 |
+
continue
|
307 |
+
if _is_whitespace(char):
|
308 |
+
output.append(" ")
|
309 |
+
else:
|
310 |
+
output.append(char)
|
311 |
+
return "".join(output)
|
312 |
+
|
313 |
+
|
314 |
+
class WordpieceTokenizer(object):
|
315 |
+
"""Runs WordPiece tokenziation."""
|
316 |
+
|
317 |
+
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=400):
|
318 |
+
self.vocab = vocab
|
319 |
+
self.unk_token = unk_token
|
320 |
+
self.max_input_chars_per_word = max_input_chars_per_word
|
321 |
+
|
322 |
+
def tokenize(self, text):
|
323 |
+
"""Tokenizes a piece of text into its word pieces.
|
324 |
+
|
325 |
+
This uses a greedy longest-match-first algorithm to perform tokenization
|
326 |
+
using the given vocabulary.
|
327 |
+
|
328 |
+
For example:
|
329 |
+
input = "unaffable"
|
330 |
+
output = ["un", "##aff", "##able"]
|
331 |
+
|
332 |
+
Args:
|
333 |
+
text: A single token or whitespace separated tokens. This should have
|
334 |
+
already been passed through `BasicTokenizer.
|
335 |
+
|
336 |
+
Returns:
|
337 |
+
A list of wordpiece tokens.
|
338 |
+
"""
|
339 |
+
|
340 |
+
text = convert_to_unicode(text)
|
341 |
+
|
342 |
+
output_tokens = []
|
343 |
+
for token in whitespace_tokenize(text):
|
344 |
+
chars = list(token)
|
345 |
+
if len(chars) > self.max_input_chars_per_word:
|
346 |
+
output_tokens.append(self.unk_token)
|
347 |
+
continue
|
348 |
+
|
349 |
+
is_bad = False
|
350 |
+
start = 0
|
351 |
+
sub_tokens = []
|
352 |
+
while start < len(chars):
|
353 |
+
end = len(chars)
|
354 |
+
cur_substr = None
|
355 |
+
while start < end:
|
356 |
+
substr = "".join(chars[start:end])
|
357 |
+
if start > 0:
|
358 |
+
substr = "##" + substr
|
359 |
+
if substr in self.vocab:
|
360 |
+
cur_substr = substr
|
361 |
+
break
|
362 |
+
end -= 1
|
363 |
+
if cur_substr is None:
|
364 |
+
is_bad = True
|
365 |
+
break
|
366 |
+
sub_tokens.append(cur_substr)
|
367 |
+
start = end
|
368 |
+
|
369 |
+
if is_bad:
|
370 |
+
output_tokens.append(self.unk_token)
|
371 |
+
else:
|
372 |
+
output_tokens.extend(sub_tokens)
|
373 |
+
return output_tokens
|
374 |
+
|
375 |
+
|
376 |
+
def _is_whitespace(char):
|
377 |
+
"""Checks whether `chars` is a whitespace character."""
|
378 |
+
# \t, \n, and \r are technically control characters but we treat them
|
379 |
+
# as whitespace since they are generally considered as such.
|
380 |
+
if char == " " or char == "\t" or char == "\n" or char == "\r":
|
381 |
+
return True
|
382 |
+
cat = unicodedata.category(char)
|
383 |
+
if cat == "Zs":
|
384 |
+
return True
|
385 |
+
return False
|
386 |
+
|
387 |
+
|
388 |
+
def _is_control(char):
|
389 |
+
"""Checks whether `chars` is a control character."""
|
390 |
+
# These are technically control characters but we count them as whitespace
|
391 |
+
# characters.
|
392 |
+
if char == "\t" or char == "\n" or char == "\r":
|
393 |
+
return False
|
394 |
+
cat = unicodedata.category(char)
|
395 |
+
if cat in ("Cc", "Cf"):
|
396 |
+
return True
|
397 |
+
return False
|
398 |
+
|
399 |
+
|
400 |
+
def _is_punctuation(char):
|
401 |
+
"""Checks whether `chars` is a punctuation character."""
|
402 |
+
cp = ord(char)
|
403 |
+
# We treat all non-letter/number ASCII as punctuation.
|
404 |
+
# Characters such as "^", "$", and "`" are not in the Unicode
|
405 |
+
# Punctuation class but we treat them as punctuation anyways, for
|
406 |
+
# consistency.
|
407 |
+
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
|
408 |
+
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
|
409 |
+
return True
|
410 |
+
cat = unicodedata.category(char)
|
411 |
+
if cat.startswith("P"):
|
412 |
+
return True
|
413 |
+
return False
|
414 |
+
|
415 |
+
|
416 |
+
def preprocess_text(inputs, remove_space=True, lower=False):
|
417 |
+
"""Preprocesses data by removing extra space and normalize data.
|
418 |
+
|
419 |
+
This method is used together with sentence piece tokenizer and is forked from:
|
420 |
+
https://github.com/google-research/google-research/blob/e1f6fa00/albert/tokenization.py
|
421 |
+
|
422 |
+
Args:
|
423 |
+
inputs: The input text.
|
424 |
+
remove_space: Whether to remove the extra space.
|
425 |
+
lower: Whether to lowercase the text.
|
426 |
+
|
427 |
+
Returns:
|
428 |
+
The preprocessed text.
|
429 |
+
|
430 |
+
"""
|
431 |
+
outputs = inputs
|
432 |
+
if remove_space:
|
433 |
+
outputs = " ".join(inputs.strip().split())
|
434 |
+
|
435 |
+
if six.PY2 and isinstance(outputs, str):
|
436 |
+
try:
|
437 |
+
outputs = six.ensure_text(outputs, "utf-8")
|
438 |
+
except UnicodeDecodeError:
|
439 |
+
outputs = six.ensure_text(outputs, "latin-1")
|
440 |
+
|
441 |
+
outputs = unicodedata.normalize("NFKD", outputs)
|
442 |
+
outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
|
443 |
+
if lower:
|
444 |
+
outputs = outputs.lower()
|
445 |
+
|
446 |
+
return outputs
|
447 |
+
|
448 |
+
|
449 |
+
def encode_pieces(sp_model, text, sample=False):
|
450 |
+
"""Segements text into pieces.
|
451 |
+
|
452 |
+
This method is used together with sentence piece tokenizer and is forked from:
|
453 |
+
https://github.com/google-research/google-research/blob/e1f6fa00/albert/tokenization.py
|
454 |
+
|
455 |
+
|
456 |
+
Args:
|
457 |
+
sp_model: A spm.SentencePieceProcessor object.
|
458 |
+
text: The input text to be segemented.
|
459 |
+
sample: Whether to randomly sample a segmentation output or return a
|
460 |
+
deterministic one.
|
461 |
+
|
462 |
+
Returns:
|
463 |
+
A list of token pieces.
|
464 |
+
"""
|
465 |
+
if six.PY2 and isinstance(text, six.text_type):
|
466 |
+
text = six.ensure_binary(text, "utf-8")
|
467 |
+
|
468 |
+
if not sample:
|
469 |
+
pieces = sp_model.EncodeAsPieces(text)
|
470 |
+
else:
|
471 |
+
pieces = sp_model.SampleEncodeAsPieces(text, 64, 0.1)
|
472 |
+
new_pieces = []
|
473 |
+
for piece in pieces:
|
474 |
+
piece = printable_text(piece)
|
475 |
+
if len(piece) > 1 and piece[-1] == "," and piece[-2].isdigit():
|
476 |
+
cur_pieces = sp_model.EncodeAsPieces(piece[:-1].replace(
|
477 |
+
SPIECE_UNDERLINE, ""))
|
478 |
+
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
|
479 |
+
if len(cur_pieces[0]) == 1:
|
480 |
+
cur_pieces = cur_pieces[1:]
|
481 |
+
else:
|
482 |
+
cur_pieces[0] = cur_pieces[0][1:]
|
483 |
+
cur_pieces.append(piece[-1])
|
484 |
+
new_pieces.extend(cur_pieces)
|
485 |
+
else:
|
486 |
+
new_pieces.append(piece)
|
487 |
+
|
488 |
+
return new_pieces
|
489 |
+
|
490 |
+
|
491 |
+
def encode_ids(sp_model, text, sample=False):
|
492 |
+
"""Segments text and return token ids.
|
493 |
+
|
494 |
+
This method is used together with sentence piece tokenizer and is forked from:
|
495 |
+
https://github.com/google-research/google-research/blob/e1f6fa00/albert/tokenization.py
|
496 |
+
|
497 |
+
Args:
|
498 |
+
sp_model: A spm.SentencePieceProcessor object.
|
499 |
+
text: The input text to be segemented.
|
500 |
+
sample: Whether to randomly sample a segmentation output or return a
|
501 |
+
deterministic one.
|
502 |
+
|
503 |
+
Returns:
|
504 |
+
A list of token ids.
|
505 |
+
"""
|
506 |
+
pieces = encode_pieces(sp_model, text, sample=sample)
|
507 |
+
ids = [sp_model.PieceToId(piece) for piece in pieces]
|
508 |
+
return ids
|
509 |
+
|
510 |
+
|
511 |
+
class FullSentencePieceTokenizer(object):
|
512 |
+
"""Runs end-to-end sentence piece tokenization.
|
513 |
+
|
514 |
+
The interface of this class is intended to keep the same as above
|
515 |
+
`FullTokenizer` class for easier usage.
|
516 |
+
"""
|
517 |
+
|
518 |
+
def __init__(self, sp_model_file):
|
519 |
+
"""Inits FullSentencePieceTokenizer.
|
520 |
+
|
521 |
+
Args:
|
522 |
+
sp_model_file: The path to the sentence piece model file.
|
523 |
+
"""
|
524 |
+
self.sp_model = spm.SentencePieceProcessor()
|
525 |
+
self.sp_model.Load(sp_model_file)
|
526 |
+
self.vocab = {
|
527 |
+
self.sp_model.IdToPiece(i): i
|
528 |
+
for i in six.moves.range(self.sp_model.GetPieceSize())
|
529 |
+
}
|
530 |
+
|
531 |
+
def tokenize(self, text):
|
532 |
+
"""Tokenizes text into pieces."""
|
533 |
+
return encode_pieces(self.sp_model, text)
|
534 |
+
|
535 |
+
def convert_tokens_to_ids(self, tokens):
|
536 |
+
"""Converts a list of tokens to a list of ids."""
|
537 |
+
return [self.sp_model.PieceToId(printable_text(token)) for token in tokens]
|
538 |
+
|
539 |
+
def convert_ids_to_tokens(self, ids):
|
540 |
+
"""Converts a list of ids ot a list of tokens."""
|
541 |
+
return [self.sp_model.IdToPiece(id_) for id_ in ids]
|