AIAT
/

kkatiz commited on
Commit
ece4c0d
·
verified ·
1 Parent(s): 4ebd91f

Delete tokenization_SEA_BPE.py

Browse files
Files changed (1) hide show
  1. tokenization_SEA_BPE.py +0 -197
tokenization_SEA_BPE.py DELETED
@@ -1,197 +0,0 @@
1
- import os
2
- from shutil import copyfile
3
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
4
- import sentencepiece as spm
5
- from tokenizers import processors
6
- from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
7
- from transformers.utils import logging
8
-
9
- logger = logging.get_logger(__name__)
10
- VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
11
- SPIECE_UNDERLINE = "▁"
12
-
13
-
14
- class SEABPETokenizer(PreTrainedTokenizer):
15
- """
16
- Construct the SEA BPE Tokenizer tailored for SEA languages. Based on the Byte-Pair-Encoding with an expanded voculabulary size
17
-
18
- Args:
19
- vocab_file (`str`):
20
- Path to the vocabulary file.
21
- legacy (`bool`, *optional*, defaults to `True`):
22
- Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622
23
- which includes fixes to properly handle tokens that appear after special tokens.
24
- legacy means we are not modifying existing tokenizers without knowing. (And we need to manually update those core tokenizers)
25
-
26
- A simple example:
27
-
28
- - `legacy=True`:
29
- ```python
30
- >>> from transformers import T5Tokenizer
31
-
32
- >>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=True)
33
- >>> tokenizer.encode("Hello <extra_id_0>.")
34
- [8774, 32099, 3, 5, 1]
35
- ```
36
- - `legacy=False`:
37
- ```python
38
- >>> from transformers import T5Tokenizer
39
-
40
- >>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=False)
41
- >>> tokenizer.encode("Hello <extra_id_0>.") # the extra space `[3]` is no longer here
42
- [8774, 32099, 5, 1]
43
- ```
44
- Checkout the pull request and the issue [here](https://github.com/huggingface/transformers/pull/24565) for
45
- more details.
46
-
47
- """
48
-
49
- vocab_files_names = VOCAB_FILES_NAMES
50
-
51
- def __init__(
52
- self,
53
- vocab_file,
54
- unk_token="<unk>",
55
- bos_token=None,
56
- eos_token="<|endoftext|>",
57
- pad_token=None,
58
- sp_model_kwargs: Optional[Dict[str, Any]] = None,
59
- add_bos_token=False,
60
- add_eos_token=False,
61
- clean_up_tokenization_spaces=False,
62
- legacy=None,
63
- **kwargs,
64
- ):
65
- self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
66
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
67
- self.sp_model.Load(vocab_file)
68
- super().__init__(
69
- bos_token=bos_token,
70
- eos_token=eos_token,
71
- unk_token=unk_token,
72
- pad_token=pad_token,
73
- add_bos_token=add_bos_token,
74
- add_eos_token=add_eos_token,
75
- sp_model_kwargs=self.sp_model_kwargs,
76
- clean_up_tokenization_spaces=clean_up_tokenization_spaces,
77
- legacy=legacy,
78
- **kwargs,
79
- )
80
- if legacy is None:
81
- logger.warning_once(
82
- f"You are using the default legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to read the related pull request available at https://github.com/huggingface/transformers/pull/24565, and set the legacy attribute accordingly."
83
- )
84
- legacy = True
85
- self.legacy = legacy
86
- self.vocab_file = vocab_file
87
- self.add_bos_token = add_bos_token
88
- self.add_eos_token = add_eos_token
89
-
90
- def __getstate__(self):
91
- state = self.__dict__.copy()
92
- state["sp_model"] = None
93
- state["sp_model_proto"] = self.sp_model.serialized_model_proto()
94
- return state
95
-
96
- def __setstate__(self, d):
97
- self.__dict__ = d
98
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
99
- self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
100
-
101
- @property
102
- def vocab_size(self):
103
- """Returns vocab size"""
104
- return self.sp_model.get_piece_size()
105
-
106
- def get_vocab(self):
107
- """Returns vocab as a dict"""
108
- vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
109
- vocab.update(self.added_tokens_encoder)
110
- return vocab
111
-
112
- def tokenize(self, text, **kwargs) -> List[str]:
113
- if not self.legacy:
114
- text = SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, " ")
115
- return super().tokenize(text, **kwargs)
116
-
117
- def _tokenize(self, text):
118
- """
119
- Returns a tokenized string.
120
-
121
- Since the sentencepiece internal model always adds a SPIECE_UNDERLINE, at the beginning of the provided text,
122
- we need to remove it by hand when the current text is a subsequence. This happens whenever the `self.tokenize`
123
- function is called with specials tokens: the input is split on the special tokens, and each subsequence is
124
- passed to `_tokenize`. Thus if a subsequence did not start with a `" "` or SPIECE_UNDERLINE, we have to remove
125
- the extra `SPIECE_UNDERLINE` prepended.
126
- """
127
- if not self.legacy:
128
- is_first = text.startswith(SPIECE_UNDERLINE)
129
- if is_first:
130
- text = text[1:]
131
- tokens = self.sp_model.encode(text, out_type=str)
132
- if (
133
- not self.legacy
134
- and (not is_first)
135
- and (not text.startswith(" "))
136
- and tokens[0].startswith(SPIECE_UNDERLINE)
137
- ):
138
- tokens = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
139
- return tokens
140
-
141
- def _convert_token_to_id(self, token):
142
- """Converts a token (str) in an id using the vocab."""
143
- return self.sp_model.piece_to_id(token)
144
-
145
- def _convert_id_to_token(self, index):
146
- """Converts an index (integer) in a token (str) using the vocab."""
147
- token = self.sp_model.IdToPiece(index)
148
- return token
149
-
150
- def convert_tokens_to_string(self, tokens):
151
- """Converts a sequence of tokens (string) in a single string."""
152
- current_sub_tokens = []
153
- out_string = ""
154
- prev_is_special = False
155
- for i, token in enumerate(tokens):
156
- if token in self.all_special_tokens:
157
- if not prev_is_special and i != 0:
158
- out_string += " "
159
- out_string += self.sp_model.decode(current_sub_tokens) + token
160
- prev_is_special = True
161
- current_sub_tokens = []
162
- else:
163
- current_sub_tokens.append(token)
164
- prev_is_special = False
165
- out_string += self.sp_model.decode(current_sub_tokens)
166
- return out_string
167
-
168
- def save_vocabulary(
169
- self, save_directory, filename_prefix: Optional[str] = None
170
- ) -> Tuple[str]:
171
- """
172
- Save the vocabulary and special tokens file to a directory.
173
-
174
- Args:
175
- save_directory (`str`):
176
- The directory in which to save the vocabulary.
177
-
178
- Returns:
179
- `Tuple(str)`: Paths to the files saved.
180
- """
181
- if not os.path.isdir(save_directory):
182
- logger.error(f"Vocabulary path ({save_directory}) should be a directory")
183
- return
184
- out_vocab_file = os.path.join(
185
- save_directory,
186
- (filename_prefix + "-" if filename_prefix else "")
187
- + VOCAB_FILES_NAMES["vocab_file"],
188
- )
189
- if os.path.abspath(self.vocab_file) != os.path.abspath(
190
- out_vocab_file
191
- ) and os.path.isfile(self.vocab_file):
192
- copyfile(self.vocab_file, out_vocab_file)
193
- elif not os.path.isfile(self.vocab_file):
194
- with open(out_vocab_file, "wb") as fi:
195
- content_spiece_model = self.sp_model.serialized_model_proto()
196
- fi.write(content_spiece_model)
197
- return (out_vocab_file,)