Spaces:
Runtime error
Runtime error
# -*- encoding:utf-8 -*- | |
from __future__ import absolute_import, division, print_function, unicode_literals | |
from tencentpretrain.utils.constants import * | |
from tencentpretrain.utils.vocab import Vocab | |
import collections | |
import unicodedata | |
import six | |
import regex as re | |
class Tokenizer(object): | |
def __init__(self, args, is_src=True): | |
self.vocab = None | |
self.sp_model = None | |
if is_src == True: | |
spm_model_path = args.spm_model_path | |
vocab_path = args.vocab_path | |
else: | |
spm_model_path = args.tgt_spm_model_path | |
vocab_path = args.tgt_vocab_path | |
if spm_model_path: | |
try: | |
import sentencepiece as spm | |
except ImportError: | |
raise ImportError("You need to install SentencePiece to use XLNetTokenizer: https://github.com/google/sentencepiece" | |
"pip install sentencepiece") | |
self.sp_model = spm.SentencePieceProcessor() | |
self.sp_model.Load(spm_model_path) | |
self.vocab = {self.sp_model.IdToPiece(i): i for i | |
in range(self.sp_model.GetPieceSize())} | |
else: | |
self.vocab = Vocab() | |
self.vocab.load(vocab_path, is_quiet=True) | |
self.vocab = self.vocab.w2i | |
self.inv_vocab = {v: k for k, v in self.vocab.items()} | |
def tokenize(self, text): | |
raise NotImplementedError | |
def convert_tokens_to_ids(self, tokens): | |
if self.sp_model: | |
return [self.sp_model.PieceToId( | |
printable_text(token)) for token in tokens] | |
else: | |
return convert_by_vocab(self.vocab, tokens) | |
def convert_ids_to_tokens(self, ids): | |
if self.sp_model: | |
return [self.sp_model.IdToPiece(id_) for id_ in ids] | |
else: | |
return convert_by_vocab(self.inv_vocab, ids) | |
class CharTokenizer(Tokenizer): | |
def __init__(self, args, is_src=True): | |
super().__init__(args, is_src) | |
def tokenize(self, text, use_vocab=True): | |
if use_vocab: | |
return [token if token in self.vocab else UNK_TOKEN for token in list(text.strip())] | |
else: | |
return [token for token in list(text.strip())] | |
class SpaceTokenizer(Tokenizer): | |
def __init__(self, args, is_src=True): | |
super().__init__(args, is_src) | |
def tokenize(self, text, use_vocab=True): | |
if use_vocab: | |
return [token if token in self.vocab else UNK_TOKEN for token in text.strip().split(" ")] | |
else: | |
return [token for token in text.strip().split(" ")] | |
SPIECE_UNDERLINE = u"▁".encode("utf-8") | |
def preprocess_text(inputs, remove_space=True, lower=False): | |
"""preprocess data by removing extra space and normalize data.""" | |
outputs = inputs | |
if remove_space: | |
outputs = " ".join(inputs.strip().split()) | |
if six.PY2 and isinstance(outputs, str): | |
try: | |
outputs = six.ensure_text(outputs, "utf-8") | |
except UnicodeDecodeError: | |
outputs = six.ensure_text(outputs, "latin-1") | |
outputs = unicodedata.normalize("NFKD", outputs) | |
outputs = "".join([c for c in outputs if not unicodedata.combining(c)]) | |
if lower: | |
outputs = outputs.lower() | |
return outputs | |
def encode_pieces(sp_model, text, return_unicode=True, sample=False): | |
"""turn sentences into word pieces.""" | |
if six.PY2 and isinstance(text, six.text_type): | |
text = six.ensure_binary(text, "utf-8") | |
if not sample: | |
pieces = sp_model.EncodeAsPieces(text) | |
else: | |
pieces = sp_model.SampleEncodeAsPieces(text, 64, 0.1) | |
new_pieces = [] | |
for piece in pieces: | |
piece = printable_text(piece) | |
if len(piece) > 1 and piece[-1] == "," and piece[-2].isdigit(): | |
cur_pieces = sp_model.EncodeAsPieces( | |
six.ensure_binary(piece[:-1]).replace(SPIECE_UNDERLINE, b"")) | |
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: | |
if len(cur_pieces[0]) == 1: | |
cur_pieces = cur_pieces[1:] | |
else: | |
cur_pieces[0] = cur_pieces[0][1:] | |
cur_pieces.append(piece[-1]) | |
new_pieces.extend(cur_pieces) | |
else: | |
new_pieces.append(piece) | |
# note(zhiliny): convert back to unicode for py2 | |
if six.PY2 and return_unicode: | |
ret_pieces = [] | |
for piece in new_pieces: | |
if isinstance(piece, str): | |
piece = six.ensure_text(piece, "utf-8") | |
ret_pieces.append(piece) | |
new_pieces = ret_pieces | |
return new_pieces | |
def encode_ids(sp_model, text, sample=False): | |
pieces = encode_pieces(sp_model, text, return_unicode=False, sample=sample) | |
ids = [sp_model.PieceToId(piece) for piece in pieces] | |
return ids | |
def convert_to_unicode(text): | |
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" | |
if six.PY3: | |
if isinstance(text, str): | |
return text | |
elif isinstance(text, bytes): | |
return six.ensure_text(text, "utf-8", "ignore") | |
else: | |
raise ValueError("Unsupported string type: %s" % (type(text))) | |
elif six.PY2: | |
if isinstance(text, str): | |
return six.ensure_text(text, "utf-8", "ignore") | |
elif isinstance(text, six.text_type): | |
return text | |
else: | |
raise ValueError("Unsupported string type: %s" % (type(text))) | |
else: | |
raise ValueError("Not running on Python2 or Python 3?") | |
def printable_text(text): | |
"""Returns text encoded in a way suitable for print or `tf.logging`.""" | |
# These functions want `str` for both Python2 and Python3, but in one case | |
# it's a Unicode string and in the other it's a byte string. | |
if six.PY3: | |
if isinstance(text, str): | |
return text | |
elif isinstance(text, bytes): | |
return six.ensure_text(text, "utf-8", "ignore") | |
else: | |
raise ValueError("Unsupported string type: %s" % (type(text))) | |
elif six.PY2: | |
if isinstance(text, str): | |
return text | |
elif isinstance(text, six.text_type): | |
return six.ensure_binary(text, "utf-8") | |
else: | |
raise ValueError("Unsupported string type: %s" % (type(text))) | |
else: | |
raise ValueError("Not running on Python2 or Python 3?") | |
def convert_by_vocab(vocab, items): | |
"""Converts a sequence of [tokens|ids] using the vocab.""" | |
output = [] | |
for item in items: | |
output.append(vocab[item] if item in vocab else vocab.get(UNK_TOKEN)) | |
return output | |
def convert_tokens_to_ids(vocab, tokens): | |
return convert_by_vocab(vocab, tokens) | |
def convert_ids_to_tokens(inv_vocab, ids): | |
return convert_by_vocab(inv_vocab, ids) | |
def whitespace_tokenize(text): | |
"""Runs basic whitespace cleaning and splitting on a piece of text.""" | |
text = text.strip() | |
if not text: | |
return [] | |
tokens = text.split() | |
return tokens | |
def bytes_to_unicode(): | |
""" | |
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control | |
characters the bpe code barfs on. | |
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab | |
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for | |
decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup | |
tables between utf-8 bytes and unicode strings. | |
""" | |
bs = ( | |
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) | |
) | |
cs = bs[:] | |
n = 0 | |
for b in range(2 ** 8): | |
if b not in bs: | |
bs.append(b) | |
cs.append(2 ** 8 + n) | |
n += 1 | |
cs = [chr(n) for n in cs] | |
return dict(zip(bs, cs)) | |
def get_pairs(word): | |
""" | |
Return set of symbol pairs in a word. | |
Word is represented as tuple of symbols (symbols being variable-length strings). | |
""" | |
pairs = set() | |
prev_char = word[0] | |
for char in word[1:]: | |
pairs.add((prev_char, char)) | |
prev_char = char | |
return pairs | |
class BertTokenizer(Tokenizer): | |
"""Runs end-to-end tokenziation.""" | |
def __init__(self, args, is_src=True): | |
super().__init__(args, is_src) | |
if not args.spm_model_path: | |
self.basic_tokenizer = BasicTokenizer(do_lower_case=args.do_lower_case if is_src else args.tgt_do_lower_case) | |
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=UNK_TOKEN) | |
def tokenize(self, text): | |
if self.sp_model: | |
split_tokens = encode_pieces(self.sp_model, text, return_unicode=False) | |
else: | |
split_tokens = [] | |
for token in self.basic_tokenizer.tokenize(text): | |
for sub_token in self.wordpiece_tokenizer.tokenize(token): | |
split_tokens.append(sub_token) | |
return split_tokens | |
class BPETokenizer(Tokenizer): | |
def __init__(self, args, is_src=True): | |
super().__init__(args, is_src) | |
self.byte_encoder = bytes_to_unicode() | |
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} | |
with open(args.merges_path, encoding="utf-8") as merges_handle: | |
bpe_merges = merges_handle.read().split("\n")[1:-1] | |
bpe_merges = [tuple(merge.split()) for merge in bpe_merges] | |
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) | |
self.cache = {} | |
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions | |
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") | |
def bpe(self, token): | |
if token in self.cache: | |
return self.cache[token] | |
word = tuple(token) | |
pairs = get_pairs(word) | |
if not pairs: | |
return token | |
while True: | |
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) | |
if bigram not in self.bpe_ranks: | |
break | |
first, second = bigram | |
new_word = [] | |
i = 0 | |
while i < len(word): | |
try: | |
j = word.index(first, i) | |
except ValueError: | |
new_word.extend(word[i:]) | |
break | |
else: | |
new_word.extend(word[i:j]) | |
i = j | |
if word[i] == first and i < len(word) - 1 and word[i + 1] == second: | |
new_word.append(first + second) | |
i += 2 | |
else: | |
new_word.append(word[i]) | |
i += 1 | |
new_word = tuple(new_word) | |
word = new_word | |
if len(word) == 1: | |
break | |
else: | |
pairs = get_pairs(word) | |
word = " ".join(word) | |
self.cache[token] = word | |
return word | |
def tokenize(self, text): | |
"""Tokenize a string.""" | |
bpe_tokens = [] | |
for token in re.findall(self.pat, text): | |
token = "".join( | |
self.byte_encoder[b] for b in token.encode("utf-8") | |
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) | |
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) | |
return bpe_tokens | |
class XLMRobertaTokenizer(Tokenizer): | |
"""Runs end-to-end tokenziation.""" | |
def __init__(self, args, is_src=True): | |
super().__init__(args, is_src) | |
assert args.spm_model_path, \ | |
"spm_model_path must provided for huggingface roberta tokenizer" | |
special_tokens = ["<s>", "<pad>", "</s>", "<unk>"] | |
vocab = [token for token in self.vocab if token not in special_tokens] | |
vocab = special_tokens + vocab + ["<mask>"] | |
self.vocab = {k: v for v, k in enumerate(vocab)} | |
self.inv_vocab = {v: k for k, v in self.vocab.items()} | |
def tokenize(self, text): | |
split_tokens = encode_pieces(self.sp_model, text, return_unicode=False) | |
return split_tokens | |
def convert_tokens_to_ids(self, tokens): | |
return convert_by_vocab(self.vocab, tokens) | |
def convert_ids_to_tokens(self, ids): | |
return convert_by_vocab(self.inv_vocab, ids) | |
class BasicTokenizer(object): | |
"""Runs basic tokenization (punctuation splitting, lower casing, etc.).""" | |
def __init__(self, do_lower_case): | |
"""Constructs a BasicTokenizer. | |
Args: | |
do_lower_case: Whether to lower case the input. | |
""" | |
if do_lower_case == "true": | |
self.do_lower_case = True | |
else: | |
self.do_lower_case = False | |
def tokenize(self, text): | |
"""Tokenizes a piece of text.""" | |
text = convert_to_unicode(text) | |
text = self._clean_text(text) | |
# This was added on November 1st, 2018 for the multilingual and Chinese | |
# models. This is also applied to the English models now, but it doesn't | |
# matter since the English models were not trained on any Chinese data | |
# and generally don't have any Chinese data in them (there are Chinese | |
# characters in the vocabulary because Wikipedia does have some Chinese | |
# words in the English Wikipedia.). | |
text = self._tokenize_chinese_chars(text) | |
orig_tokens = whitespace_tokenize(text) | |
split_tokens = [] | |
for token in orig_tokens: | |
if self.do_lower_case: | |
token = token.lower() | |
token = self._run_strip_accents(token) | |
split_tokens.extend(self._run_split_on_punc(token)) | |
output_tokens = whitespace_tokenize(" ".join(split_tokens)) | |
return output_tokens | |
def _run_strip_accents(self, text): | |
"""Strips accents from a piece of text.""" | |
text = unicodedata.normalize("NFD", text) | |
output = [] | |
for char in text: | |
cat = unicodedata.category(char) | |
if cat == "Mn": | |
continue | |
output.append(char) | |
return "".join(output) | |
def _run_split_on_punc(self, text): | |
"""Splits punctuation on a piece of text.""" | |
chars = list(text) | |
i = 0 | |
start_new_word = True | |
output = [] | |
while i < len(chars): | |
char = chars[i] | |
if _is_punctuation(char): | |
output.append([char]) | |
start_new_word = True | |
else: | |
if start_new_word: | |
output.append([]) | |
start_new_word = False | |
output[-1].append(char) | |
i += 1 | |
return ["".join(x) for x in output] | |
def _tokenize_chinese_chars(self, text): | |
"""Adds whitespace around any CJK character.""" | |
output = [] | |
for char in text: | |
cp = ord(char) | |
if self._is_chinese_char(cp): | |
output.append(" ") | |
output.append(char) | |
output.append(" ") | |
else: | |
output.append(char) | |
return "".join(output) | |
def _is_chinese_char(self, cp): | |
"""Checks whether CP is the codepoint of a CJK character.""" | |
# This defines a "chinese character" as anything in the CJK Unicode block: | |
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) | |
# | |
# Note that the CJK Unicode block is NOT all Japanese and Korean characters, | |
# despite its name. The modern Korean Hangul alphabet is a different block, | |
# as is Japanese Hiragana and Katakana. Those alphabets are used to write | |
# space-separated words, so they are not treated specially and handled | |
# like the all of the other languages. | |
if ((cp >= 0x4E00 and cp <= 0x9FFF) or # | |
(cp >= 0x3400 and cp <= 0x4DBF) or # | |
(cp >= 0x20000 and cp <= 0x2A6DF) or # | |
(cp >= 0x2A700 and cp <= 0x2B73F) or # | |
(cp >= 0x2B740 and cp <= 0x2B81F) or # | |
(cp >= 0x2B820 and cp <= 0x2CEAF) or | |
(cp >= 0xF900 and cp <= 0xFAFF) or # | |
(cp >= 0x2F800 and cp <= 0x2FA1F)): # | |
return True | |
return False | |
def _clean_text(self, text): | |
"""Performs invalid character removal and whitespace cleanup on text.""" | |
output = [] | |
for char in text: | |
cp = ord(char) | |
if cp == 0 or cp == 0xfffd or _is_control(char): | |
continue | |
if _is_whitespace(char): | |
output.append(" ") | |
else: | |
output.append(char) | |
return "".join(output) | |
class WordpieceTokenizer(object): | |
"""Runs WordPiece tokenziation.""" | |
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200): | |
self.vocab = vocab | |
self.unk_token = unk_token | |
self.max_input_chars_per_word = max_input_chars_per_word | |
def tokenize(self, text): | |
"""Tokenizes a piece of text into its word pieces. | |
This uses a greedy longest-match-first algorithm to perform tokenization | |
using the given vocabulary. | |
For example: | |
input = "unaffable" | |
output = ["un", "##aff", "##able"] | |
Args: | |
text: A single token or whitespace separated tokens. This should have | |
already been passed through `BasicTokenizer. | |
Returns: | |
A list of wordpiece tokens. | |
""" | |
text = convert_to_unicode(text) | |
output_tokens = [] | |
for token in whitespace_tokenize(text): | |
chars = list(token) | |
if len(chars) > self.max_input_chars_per_word: | |
output_tokens.append(self.unk_token) | |
continue | |
is_bad = False | |
start = 0 | |
sub_tokens = [] | |
while start < len(chars): | |
end = len(chars) | |
cur_substr = None | |
while start < end: | |
substr = "".join(chars[start:end]) | |
if start > 0: | |
substr = "##" + six.ensure_str(substr) | |
if substr in self.vocab: | |
cur_substr = substr | |
break | |
end -= 1 | |
if cur_substr is None: | |
is_bad = True | |
break | |
sub_tokens.append(cur_substr) | |
start = end | |
if is_bad: | |
output_tokens.append(self.unk_token) | |
else: | |
output_tokens.extend(sub_tokens) | |
return output_tokens | |
def _is_whitespace(char): | |
"""Checks whether `chars` is a whitespace character.""" | |
# \t, \n, and \r are technically control characters but we treat them | |
# as whitespace since they are generally considered as such. | |
if char == " " or char == "\t" or char == "\n" or char == "\r": | |
return True | |
cat = unicodedata.category(char) | |
if cat == "Zs": | |
return True | |
return False | |
def _is_control(char): | |
"""Checks whether `chars` is a control character.""" | |
# These are technically control characters but we count them as whitespace | |
# characters. | |
if char == "\t" or char == "\n" or char == "\r": | |
return False | |
cat = unicodedata.category(char) | |
if cat in ("Cc", "Cf"): | |
return True | |
return False | |
def _is_punctuation(char): | |
"""Checks whether `chars` is a punctuation character.""" | |
cp = ord(char) | |
# We treat all non-letter/number ASCII as punctuation. | |
# Characters such as "^", "$", and "`" are not in the Unicode | |
# Punctuation class but we treat them as punctuation anyways, for | |
# consistency. | |
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or | |
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): | |
return True | |
cat = unicodedata.category(char) | |
if cat.startswith("P"): | |
return True | |
return False | |
class ImageTokenizer(Tokenizer): | |
""" Virtual tokenizer for vqgan models """ | |
def __init__(self, args, is_src=True): | |
self.vocab = range(args.image_tokenizer["image_vocab_size"]) | |
class VirtualTokenizer(Tokenizer): | |
""" Virtual tokenizer for vit models """ | |
def __init__(self, args, is_src=True): | |
self.vocab = [] | |
class TextImageTokenizer(BertTokenizer): | |
""" Text and image tokenizer (BERT and VQGAN) """ | |
def __init__(self, args, is_src=True): | |
super().__init__(args, is_src) | |
self.vocab_bias = len(self.vocab) | |
for i in range(args.image_tokenizer["image_vocab_size"]): | |
self.vocab[i + self.vocab_bias] = str(i) | |