diff --git "a/tokenization_utils_base.py" "b/tokenization_utils_base.py"
new file mode 100644--- /dev/null
+++ "b/tokenization_utils_base.py"
@@ -0,0 +1,3831 @@
+# coding=utf-8
+# Copyright 2020 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Base classes common to both the slow and the fast tokenization classes: PreTrainedTokenizerBase (host all the user
+fronting encoding methods) Special token mixing (host the special tokens logic) and BatchEncoding (wrap the dictionary
+of output with special method for the Fast tokenizers)
+"""
+
+import copy
+import json
+import os
+import re
+import warnings
+from collections import OrderedDict, UserDict
+from collections.abc import Mapping, Sized
+from contextlib import contextmanager
+from dataclasses import dataclass, field
+from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union
+
+import numpy as np
+from packaging import version
+
+from transformers import __version__
+from transformers.dynamic_module_utils import custom_object_save
+from transformers.utils import (
+ ExplicitEnum,
+ PaddingStrategy,
+ PushToHubMixin,
+ TensorType,
+ add_end_docstrings,
+ add_model_info_to_auto_map,
+ cached_file,
+ copy_func,
+ download_url,
+ extract_commit_hash,
+ is_flax_available,
+ is_jax_tensor,
+ is_numpy_array,
+ is_offline_mode,
+ is_remote_url,
+ is_tf_available,
+ is_tf_tensor,
+ is_tokenizers_available,
+ is_torch_available,
+ is_torch_device,
+ is_torch_tensor,
+ logging,
+ requires_backends,
+ to_py_obj,
+)
+
+
+if TYPE_CHECKING:
+ if is_torch_available():
+ import torch
+ if is_tf_available():
+ import tensorflow as tf
+ if is_flax_available():
+ import jax.numpy as jnp # noqa: F401
+
+
+if is_tokenizers_available():
+ from tokenizers import AddedToken
+ from tokenizers import Encoding as EncodingFast
+else:
+
+ @dataclass(frozen=True, eq=True)
+ class AddedToken:
+ """
+ AddedToken represents a token to be added to a Tokenizer An AddedToken can have special options defining the
+ way it should behave.
+ """
+
+ content: str = field(default_factory=str)
+ single_word: bool = False
+ lstrip: bool = False
+ rstrip: bool = False
+ normalized: bool = True
+
+ def __getstate__(self):
+ return self.__dict__
+
+ @dataclass
+ class EncodingFast:
+ """This is dummy class because without the `tokenizers` library we don't have these objects anyway"""
+
+ pass
+
+
+logger = logging.get_logger(__name__)
+
+VERY_LARGE_INTEGER = int(1e30) # This is used to set the max input length for a model with infinite size input
+LARGE_INTEGER = int(1e20) # This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER
+
+# Define type aliases and NamedTuples
+TextInput = str
+PreTokenizedInput = List[str]
+EncodedInput = List[int]
+TextInputPair = Tuple[str, str]
+PreTokenizedInputPair = Tuple[List[str], List[str]]
+EncodedInputPair = Tuple[List[int], List[int]]
+
+
+# Slow tokenizers used to be saved in three separated files
+SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
+ADDED_TOKENS_FILE = "added_tokens.json"
+TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
+
+# Fast tokenizers (provided by HuggingFace tokenizer's library) can be saved in a single file
+FULL_TOKENIZER_FILE = "tokenizer.json"
+_re_tokenizer_file = re.compile(r"tokenizer\.(.*)\.json")
+
+
+class TruncationStrategy(ExplicitEnum):
+ """
+ Possible values for the `truncation` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in
+ an IDE.
+ """
+
+ ONLY_FIRST = "only_first"
+ ONLY_SECOND = "only_second"
+ LONGEST_FIRST = "longest_first"
+ DO_NOT_TRUNCATE = "do_not_truncate"
+
+
+class CharSpan(NamedTuple):
+ """
+ Character span in the original string.
+
+ Args:
+ start (`int`): Index of the first character in the original string.
+ end (`int`): Index of the character following the last character in the original string.
+ """
+
+ start: int
+ end: int
+
+
+class TokenSpan(NamedTuple):
+ """
+ Token span in an encoded string (list of tokens).
+
+ Args:
+ start (`int`): Index of the first token in the span.
+ end (`int`): Index of the token following the last token in the span.
+ """
+
+ start: int
+ end: int
+
+
+class BatchEncoding(UserDict):
+ """
+ Holds the output of the [`~tokenization_utils_base.PreTrainedTokenizerBase.__call__`],
+ [`~tokenization_utils_base.PreTrainedTokenizerBase.encode_plus`] and
+ [`~tokenization_utils_base.PreTrainedTokenizerBase.batch_encode_plus`] methods (tokens, attention_masks, etc).
+
+ This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes
+ utility methods to map from word/character space to token space.
+
+ Args:
+ data (`dict`):
+ Dictionary of lists/arrays/tensors returned by the `__call__`/`encode_plus`/`batch_encode_plus` methods
+ ('input_ids', 'attention_mask', etc.).
+ encoding (`tokenizers.Encoding` or `Sequence[tokenizers.Encoding]`, *optional*):
+ If the tokenizer is a fast tokenizer which outputs additional information like mapping from word/character
+ space to token space the `tokenizers.Encoding` instance or list of instance (for batches) hold this
+ information.
+ tensor_type (`Union[None, str, TensorType]`, *optional*):
+ You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
+ initialization.
+ prepend_batch_axis (`bool`, *optional*, defaults to `False`):
+ Whether or not to add a batch axis when converting to tensors (see `tensor_type` above).
+ n_sequences (`Optional[int]`, *optional*):
+ You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
+ initialization.
+ """
+
+ def __init__(
+ self,
+ data: Optional[Dict[str, Any]] = None,
+ encoding: Optional[Union[EncodingFast, Sequence[EncodingFast]]] = None,
+ tensor_type: Union[None, str, TensorType] = None,
+ prepend_batch_axis: bool = False,
+ n_sequences: Optional[int] = None,
+ ):
+ super().__init__(data)
+
+ if isinstance(encoding, EncodingFast):
+ encoding = [encoding]
+
+ self._encodings = encoding
+
+ if n_sequences is None and encoding is not None and len(encoding):
+ n_sequences = encoding[0].n_sequences
+
+ self._n_sequences = n_sequences
+
+ self.convert_to_tensors(tensor_type=tensor_type, prepend_batch_axis=prepend_batch_axis)
+
+ @property
+ def n_sequences(self) -> Optional[int]:
+ """
+ `Optional[int]`: The number of sequences used to generate each sample from the batch encoded in this
+ [`BatchEncoding`]. Currently can be one of `None` (unknown), `1` (a single sentence) or `2` (a pair of
+ sentences)
+ """
+ return self._n_sequences
+
+ @property
+ def is_fast(self) -> bool:
+ """
+ `bool`: Indicate whether this [`BatchEncoding`] was generated from the result of a [`PreTrainedTokenizerFast`]
+ or not.
+ """
+ return self._encodings is not None
+
+ def __getitem__(self, item: Union[int, str]) -> Union[Any, EncodingFast]:
+ """
+ If the key is a string, returns the value of the dict associated to `key` ('input_ids', 'attention_mask',
+ etc.).
+
+ If the key is an integer, get the `tokenizers.Encoding` for batch item with index `key`.
+
+ If the key is a slice, returns the value of the dict associated to `key` ('input_ids', 'attention_mask', etc.)
+ with the constraint of slice.
+ """
+ if isinstance(item, str):
+ return self.data[item]
+ elif self._encodings is not None:
+ return self._encodings[item]
+ elif isinstance(item, slice):
+ return {key: self.data[key][item] for key in self.data.keys()}
+ else:
+ raise KeyError(
+ "Invalid key. Only three types of key are available: "
+ "(1) string, (2) integers for backend Encoding, and (3) slices for data subsetting."
+ )
+
+ def __getattr__(self, item: str):
+ try:
+ return self.data[item]
+ except KeyError:
+ raise AttributeError
+
+ def __getstate__(self):
+ return {"data": self.data, "encodings": self._encodings}
+
+ def __setstate__(self, state):
+ if "data" in state:
+ self.data = state["data"]
+
+ if "encodings" in state:
+ self._encodings = state["encodings"]
+
+ def keys(self):
+ return self.data.keys()
+
+ def values(self):
+ return self.data.values()
+
+ def items(self):
+ return self.data.items()
+
+ # After this point:
+ # Extended properties and methods only available for fast (Rust-based) tokenizers
+ # provided by HuggingFace tokenizers library.
+
+ @property
+ def encodings(self) -> Optional[List[EncodingFast]]:
+ """
+ `Optional[List[tokenizers.Encoding]]`: The list all encodings from the tokenization process. Returns `None` if
+ the input was tokenized through Python (i.e., not a fast) tokenizer.
+ """
+ return self._encodings
+
+ def tokens(self, batch_index: int = 0) -> List[str]:
+ """
+ Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to
+ integer indices) at a given batch index (only works for the output of a fast tokenizer).
+
+ Args:
+ batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
+
+ Returns:
+ `List[str]`: The list of tokens at that index.
+ """
+ if not self._encodings:
+ raise ValueError(
+ "tokens() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`"
+ " class)."
+ )
+ return self._encodings[batch_index].tokens
+
+ def sequence_ids(self, batch_index: int = 0) -> List[Optional[int]]:
+ """
+ Return a list mapping the tokens to the id of their original sentences:
+
+ - `None` for special tokens added around or between sequences,
+ - `0` for tokens corresponding to words in the first sequence,
+ - `1` for tokens corresponding to words in the second sequence when a pair of sequences was jointly
+ encoded.
+
+ Args:
+ batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
+
+ Returns:
+ `List[Optional[int]]`: A list indicating the sequence id corresponding to each token. Special tokens added
+ by the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding
+ sequence.
+ """
+ if not self._encodings:
+ raise ValueError(
+ "sequence_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`"
+ " class)."
+ )
+ return self._encodings[batch_index].sequence_ids
+
+ def words(self, batch_index: int = 0) -> List[Optional[int]]:
+ """
+ Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.
+
+ Args:
+ batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
+
+ Returns:
+ `List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the
+ tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word
+ (several tokens will be mapped to the same word index if they are parts of that word).
+ """
+ if not self._encodings:
+ raise ValueError(
+ "words() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`"
+ " class)."
+ )
+ warnings.warn(
+ "`BatchEncoding.words()` property is deprecated and should be replaced with the identical, "
+ "but more self-explanatory `BatchEncoding.word_ids()` property.",
+ FutureWarning,
+ )
+ return self.word_ids(batch_index)
+
+ def word_ids(self, batch_index: int = 0) -> List[Optional[int]]:
+ """
+ Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.
+
+ Args:
+ batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
+
+ Returns:
+ `List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the
+ tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word
+ (several tokens will be mapped to the same word index if they are parts of that word).
+ """
+ if not self._encodings:
+ raise ValueError(
+ "word_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`"
+ " class)."
+ )
+ return self._encodings[batch_index].word_ids
+
+ def token_to_sequence(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int:
+ """
+ Get the index of the sequence represented by the given token. In the general use case, this method returns `0`
+ for a single sequence or the first sequence of a pair, and `1` for the second sequence of a pair
+
+ Can be called as:
+
+ - `self.token_to_sequence(token_index)` if batch size is 1
+ - `self.token_to_sequence(batch_index, token_index)` if batch size is greater than 1
+
+ This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e.,
+ words are defined by the user). In this case it allows to easily associate encoded tokens with provided
+ tokenized words.
+
+ Args:
+ batch_or_token_index (`int`):
+ Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of
+ the token in the sequence.
+ token_index (`int`, *optional*):
+ If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the
+ sequence.
+
+ Returns:
+ `int`: Index of the word in the input sequence.
+ """
+
+ if not self._encodings:
+ raise ValueError("token_to_sequence() is not available when using Python based tokenizers")
+ if token_index is not None:
+ batch_index = batch_or_token_index
+ else:
+ batch_index = 0
+ token_index = batch_or_token_index
+ if batch_index < 0:
+ batch_index = self._batch_size + batch_index
+ if token_index < 0:
+ token_index = self._seq_len + token_index
+ return self._encodings[batch_index].token_to_sequence(token_index)
+
+ def token_to_word(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int:
+ """
+ Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch.
+
+ Can be called as:
+
+ - `self.token_to_word(token_index)` if batch size is 1
+ - `self.token_to_word(batch_index, token_index)` if batch size is greater than 1
+
+ This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e.,
+ words are defined by the user). In this case it allows to easily associate encoded tokens with provided
+ tokenized words.
+
+ Args:
+ batch_or_token_index (`int`):
+ Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
+ the token in the sequence.
+ token_index (`int`, *optional*):
+ If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the
+ sequence.
+
+ Returns:
+ `int`: Index of the word in the input sequence.
+ """
+
+ if not self._encodings:
+ raise ValueError("token_to_word() is not available when using Python based tokenizers")
+ if token_index is not None:
+ batch_index = batch_or_token_index
+ else:
+ batch_index = 0
+ token_index = batch_or_token_index
+ if batch_index < 0:
+ batch_index = self._batch_size + batch_index
+ if token_index < 0:
+ token_index = self._seq_len + token_index
+ return self._encodings[batch_index].token_to_word(token_index)
+
+ def word_to_tokens(
+ self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0
+ ) -> Optional[TokenSpan]:
+ """
+ Get the encoded token span corresponding to a word in a sequence of the batch.
+
+ Token spans are returned as a [`~tokenization_utils_base.TokenSpan`] with:
+
+ - **start** -- Index of the first token.
+ - **end** -- Index of the token following the last token.
+
+ Can be called as:
+
+ - `self.word_to_tokens(word_index, sequence_index: int = 0)` if batch size is 1
+ - `self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)` if batch size is greater or equal to
+ 1
+
+ This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
+ are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
+ words.
+
+ Args:
+ batch_or_word_index (`int`):
+ Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of
+ the word in the sequence.
+ word_index (`int`, *optional*):
+ If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
+ sequence.
+ sequence_index (`int`, *optional*, defaults to 0):
+ If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
+ or 1) the provided word index belongs to.
+
+ Returns:
+ ([`~tokenization_utils_base.TokenSpan`], *optional*): Span of tokens in the encoded sequence. Returns
+ `None` if no tokens correspond to the word. This can happen especially when the token is a special token
+ that has been used to format the tokenization. For example when we add a class token at the very beginning
+ of the tokenization.
+ """
+
+ if not self._encodings:
+ raise ValueError("word_to_tokens() is not available when using Python based tokenizers")
+ if word_index is not None:
+ batch_index = batch_or_word_index
+ else:
+ batch_index = 0
+ word_index = batch_or_word_index
+ if batch_index < 0:
+ batch_index = self._batch_size + batch_index
+ if word_index < 0:
+ word_index = self._seq_len + word_index
+ span = self._encodings[batch_index].word_to_tokens(word_index, sequence_index)
+ return TokenSpan(*span) if span is not None else None
+
+ def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int] = None) -> CharSpan:
+ """
+ Get the character span corresponding to an encoded token in a sequence of the batch.
+
+ Character spans are returned as a [`~tokenization_utils_base.CharSpan`] with:
+
+ - **start** -- Index of the first character in the original string associated to the token.
+ - **end** -- Index of the character following the last character in the original string associated to the
+ token.
+
+ Can be called as:
+
+ - `self.token_to_chars(token_index)` if batch size is 1
+ - `self.token_to_chars(batch_index, token_index)` if batch size is greater or equal to 1
+
+ Args:
+ batch_or_token_index (`int`):
+ Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
+ the token in the sequence.
+ token_index (`int`, *optional*):
+ If a batch index is provided in *batch_or_token_index*, this can be the index of the token or tokens in
+ the sequence.
+
+ Returns:
+ [`~tokenization_utils_base.CharSpan`]: Span of characters in the original string, or None, if the token
+ (e.g. , ) doesn't correspond to any chars in the origin string.
+ """
+
+ if not self._encodings:
+ raise ValueError("token_to_chars() is not available when using Python based tokenizers")
+ if token_index is not None:
+ batch_index = batch_or_token_index
+ else:
+ batch_index = 0
+ token_index = batch_or_token_index
+ span_indices = self._encodings[batch_index].token_to_chars(token_index)
+
+ return CharSpan(*span_indices) if span_indices is not None else None
+
+ def char_to_token(
+ self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0
+ ) -> int:
+ """
+ Get the index of the token in the encoded output comprising a character in the original string for a sequence
+ of the batch.
+
+ Can be called as:
+
+ - `self.char_to_token(char_index)` if batch size is 1
+ - `self.char_to_token(batch_index, char_index)` if batch size is greater or equal to 1
+
+ This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
+ are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
+ words.
+
+ Args:
+ batch_or_char_index (`int`):
+ Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
+ the word in the sequence
+ char_index (`int`, *optional*):
+ If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
+ sequence.
+ sequence_index (`int`, *optional*, defaults to 0):
+ If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
+ or 1) the provided character index belongs to.
+
+
+ Returns:
+ `int`: Index of the token.
+ """
+
+ if not self._encodings:
+ raise ValueError("char_to_token() is not available when using Python based tokenizers")
+ if char_index is not None:
+ batch_index = batch_or_char_index
+ else:
+ batch_index = 0
+ char_index = batch_or_char_index
+ return self._encodings[batch_index].char_to_token(char_index, sequence_index)
+
+ def word_to_chars(
+ self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0
+ ) -> CharSpan:
+ """
+ Get the character span in the original string corresponding to given word in a sequence of the batch.
+
+ Character spans are returned as a CharSpan NamedTuple with:
+
+ - start: index of the first character in the original string
+ - end: index of the character following the last character in the original string
+
+ Can be called as:
+
+ - `self.word_to_chars(word_index)` if batch size is 1
+ - `self.word_to_chars(batch_index, word_index)` if batch size is greater or equal to 1
+
+ Args:
+ batch_or_word_index (`int`):
+ Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
+ the word in the sequence
+ word_index (`int`, *optional*):
+ If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
+ sequence.
+ sequence_index (`int`, *optional*, defaults to 0):
+ If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
+ or 1) the provided word index belongs to.
+
+ Returns:
+ `CharSpan` or `List[CharSpan]`: Span(s) of the associated character or characters in the string. CharSpan
+ are NamedTuple with:
+
+ - start: index of the first character associated to the token in the original string
+ - end: index of the character following the last character associated to the token in the original
+ string
+ """
+
+ if not self._encodings:
+ raise ValueError("word_to_chars() is not available when using Python based tokenizers")
+ if word_index is not None:
+ batch_index = batch_or_word_index
+ else:
+ batch_index = 0
+ word_index = batch_or_word_index
+ return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index, sequence_index)))
+
+ def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0) -> int:
+ """
+ Get the word in the original string corresponding to a character in the original string of a sequence of the
+ batch.
+
+ Can be called as:
+
+ - `self.char_to_word(char_index)` if batch size is 1
+ - `self.char_to_word(batch_index, char_index)` if batch size is greater than 1
+
+ This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
+ are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
+ words.
+
+ Args:
+ batch_or_char_index (`int`):
+ Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
+ the character in the original string.
+ char_index (`int`, *optional*):
+ If a batch index is provided in *batch_or_token_index*, this can be the index of the character in the
+ original string.
+ sequence_index (`int`, *optional*, defaults to 0):
+ If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
+ or 1) the provided character index belongs to.
+
+
+ Returns:
+ `int` or `List[int]`: Index or indices of the associated encoded token(s).
+ """
+
+ if not self._encodings:
+ raise ValueError("char_to_word() is not available when using Python based tokenizers")
+ if char_index is not None:
+ batch_index = batch_or_char_index
+ else:
+ batch_index = 0
+ char_index = batch_or_char_index
+ return self._encodings[batch_index].char_to_word(char_index, sequence_index)
+
+ def convert_to_tensors(
+ self, tensor_type: Optional[Union[str, TensorType]] = None, prepend_batch_axis: bool = False
+ ):
+ """
+ Convert the inner content to tensors.
+
+ Args:
+ tensor_type (`str` or [`~utils.TensorType`], *optional*):
+ The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If
+ `None`, no modification is done.
+ prepend_batch_axis (`int`, *optional*, defaults to `False`):
+ Whether or not to add the batch dimension during the conversion.
+ """
+ if tensor_type is None:
+ return self
+
+ # Convert to TensorType
+ if not isinstance(tensor_type, TensorType):
+ tensor_type = TensorType(tensor_type)
+
+ # Get a function reference for the correct framework
+ if tensor_type == TensorType.TENSORFLOW:
+ if not is_tf_available():
+ raise ImportError(
+ "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed."
+ )
+ import tensorflow as tf
+
+ as_tensor = tf.constant
+ is_tensor = tf.is_tensor
+ elif tensor_type == TensorType.PYTORCH:
+ if not is_torch_available():
+ raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.")
+ import torch
+
+ as_tensor = torch.tensor
+ is_tensor = torch.is_tensor
+ elif tensor_type == TensorType.JAX:
+ if not is_flax_available():
+ raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.")
+ import jax.numpy as jnp # noqa: F811
+
+ as_tensor = jnp.array
+ is_tensor = is_jax_tensor
+ else:
+
+ def as_tensor(value, dtype=None):
+ if isinstance(value, (list, tuple)) and isinstance(value[0], (list, tuple, np.ndarray)):
+ value_lens = [len(val) for val in value]
+ if len(set(value_lens)) > 1 and dtype is None:
+ # we have a ragged list so handle explicitly
+ value = as_tensor([np.asarray(val) for val in value], dtype=object)
+ return np.asarray(value, dtype=dtype)
+
+ is_tensor = is_numpy_array
+
+ # Do the tensor conversion in batch
+ for key, value in self.items():
+ try:
+ if prepend_batch_axis:
+ value = [value]
+
+ if not is_tensor(value):
+ tensor = as_tensor(value)
+
+ # Removing this for now in favor of controlling the shape with `prepend_batch_axis`
+ # # at-least2d
+ # if tensor.ndim > 2:
+ # tensor = tensor.squeeze(0)
+ # elif tensor.ndim < 2:
+ # tensor = tensor[None, :]
+
+ self[key] = tensor
+ except Exception as e:
+ if key == "overflowing_tokens":
+ raise ValueError(
+ "Unable to create tensor returning overflowing tokens of different lengths. "
+ "Please see if a fast version of this tokenizer is available to have this feature available."
+ ) from e
+ raise ValueError(
+ "Unable to create tensor, you should probably activate truncation and/or padding with"
+ " 'padding=True' 'truncation=True' to have batched tensors with the same length. Perhaps your"
+ f" features (`{key}` in this case) have excessive nesting (inputs type `list` where type `int` is"
+ " expected)."
+ ) from e
+
+ return self
+
+ def to(self, device: Union[str, "torch.device"]) -> "BatchEncoding":
+ """
+ Send all values to device by calling `v.to(device)` (PyTorch only).
+
+ Args:
+ device (`str` or `torch.device`): The device to put the tensors on.
+
+ Returns:
+ [`BatchEncoding`]: The same instance after modification.
+ """
+ requires_backends(self, ["torch"])
+
+ # This check catches things like APEX blindly calling "to" on all inputs to a module
+ # Otherwise it passes the casts down and casts the LongTensor containing the token idxs
+ # into a HalfTensor
+ if isinstance(device, str) or is_torch_device(device) or isinstance(device, int):
+ self.data = {k: v.to(device=device) for k, v in self.data.items()}
+ else:
+ logger.warning(f"Attempting to cast a BatchEncoding to type {str(device)}. This is not supported.")
+ return self
+
+
+class SpecialTokensMixin:
+ """
+ A mixin derived by [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] to handle specific behaviors related to
+ special tokens. In particular, this class hold the attributes which can be used to directly access these special
+ tokens in a model-independent manner and allow to set and update the special tokens.
+
+ Args:
+ bos_token (`str` or `tokenizers.AddedToken`, *optional*):
+ A special token representing the beginning of a sentence.
+ eos_token (`str` or `tokenizers.AddedToken`, *optional*):
+ A special token representing the end of a sentence.
+ unk_token (`str` or `tokenizers.AddedToken`, *optional*):
+ A special token representing an out-of-vocabulary token.
+ sep_token (`str` or `tokenizers.AddedToken`, *optional*):
+ A special token separating two different sentences in the same input (used by BERT for instance).
+ pad_token (`str` or `tokenizers.AddedToken`, *optional*):
+ A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
+ attention mechanisms or loss computation.
+ cls_token (`str` or `tokenizers.AddedToken`, *optional*):
+ A special token representing the class of the input (used by BERT for instance).
+ mask_token (`str` or `tokenizers.AddedToken`, *optional*):
+ A special token representing a masked token (used by masked-language modeling pretraining objectives, like
+ BERT).
+ additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
+ A tuple or a list of additional special tokens.
+ """
+
+ SPECIAL_TOKENS_ATTRIBUTES = [
+ "bos_token",
+ "eos_token",
+ "unk_token",
+ "sep_token",
+ "pad_token",
+ "cls_token",
+ "mask_token",
+ "additional_special_tokens",
+ ]
+
+ def __init__(self, verbose=True, **kwargs):
+ self._bos_token = None
+ self._eos_token = None
+ self._unk_token = None
+ self._sep_token = None
+ self._pad_token = None
+ self._cls_token = None
+ self._mask_token = None
+ self._pad_token_type_id = 0
+ self._additional_special_tokens = []
+ self.verbose = verbose
+
+ # We directly set the hidden value to allow initialization with special tokens
+ # which are not yet in the vocabulary. Necessary for serialization/de-serialization
+ # TODO clean this up at some point (probably by switching to fast tokenizers)
+ for key, value in kwargs.items():
+ if value is None:
+ continue
+ if key in self.SPECIAL_TOKENS_ATTRIBUTES:
+ if key == "additional_special_tokens":
+ assert isinstance(value, (list, tuple)), f"Value {value} is not a list or tuple"
+ assert all(
+ isinstance(t, (str, AddedToken)) for t in value
+ ), "One of the tokens is not a string or an AddedToken"
+ setattr(self, key, value)
+ elif isinstance(value, (str, AddedToken)):
+ setattr(self, key, value)
+ else:
+ raise TypeError(f"special token {key} has to be either str or AddedToken but got: {type(value)}")
+
+ def sanitize_special_tokens(self) -> int:
+ """
+ Make sure that all the special tokens attributes of the tokenizer (`tokenizer.mask_token`,
+ `tokenizer.cls_token`, etc.) are in the vocabulary.
+
+ Add the missing ones to the vocabulary if needed.
+
+ Return:
+ `int`: The number of tokens added in the vocabulary during the operation.
+ """
+ return self.add_tokens(self.all_special_tokens_extended, special_tokens=True)
+
+ def add_special_tokens(
+ self, special_tokens_dict: Dict[str, Union[str, AddedToken]], replace_additional_special_tokens=True
+ ) -> int:
+ """
+ Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If
+ special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the
+ current vocabulary).
+
+ Note,None When adding new tokens to the vocabulary, you should make sure to also resize the token embedding
+ matrix of the model so that its embedding matrix matches the tokenizer.
+
+ In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method.
+
+ Using `add_special_tokens` will ensure your special tokens can be used in several ways:
+
+ - Special tokens are carefully handled by the tokenizer (they are never split).
+ - You can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This
+ makes it easy to develop model-agnostic training and fine-tuning scripts.
+
+ When possible, special tokens are already registered for provided pretrained models (for instance
+ [`BertTokenizer`] `cls_token` is already registered to be :obj*'[CLS]'* and XLM's one is also registered to be
+ `''`).
+
+ Args:
+ special_tokens_dict (dictionary *str* to *str* or `tokenizers.AddedToken`):
+ Keys should be in the list of predefined special attributes: [`bos_token`, `eos_token`, `unk_token`,
+ `sep_token`, `pad_token`, `cls_token`, `mask_token`, `additional_special_tokens`].
+
+ Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer
+ assign the index of the `unk_token` to them).
+ replace_additional_special_tokens (`bool`, *optional*,, defaults to `True`):
+ If `True`, the existing list of additional special tokens will be replaced by the one specified in
+ `special_tokens_dict`. Otherwise, `self._additional_special_tokens` is updated. In the former case, the
+ tokens will NOT be removed from the tokenizer's full vocabulary - they are only being flagged as
+ non-special tokens.
+
+ Returns:
+ `int`: Number of tokens added to the vocabulary.
+
+ Examples:
+
+ ```python
+ # Let's see how to add a new classification token to GPT-2
+ tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
+ model = GPT2Model.from_pretrained("gpt2")
+
+ special_tokens_dict = {"cls_token": ""}
+
+ num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
+ print("We have added", num_added_toks, "tokens")
+ # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
+ model.resize_token_embeddings(len(tokenizer))
+
+ assert tokenizer.cls_token == ""
+ ```"""
+ if not special_tokens_dict:
+ return 0
+
+ added_tokens = 0
+ for key, value in special_tokens_dict.items():
+ assert key in self.SPECIAL_TOKENS_ATTRIBUTES, f"Key {key} is not a special token"
+
+ if self.verbose:
+ logger.info(f"Assigning {value} to the {key} key of the tokenizer")
+
+ if key == "additional_special_tokens":
+ assert isinstance(value, (list, tuple)) and all(
+ isinstance(t, (str, AddedToken)) for t in value
+ ), f"Tokens {value} for key {key} should all be str or AddedToken instances"
+
+ if replace_additional_special_tokens:
+ setattr(self, key, value)
+ else:
+ # This is a copy of `self._additional_special_tokens`
+ additional_special_tokens = getattr(self, key)
+ additional_special_tokens_set = set(additional_special_tokens)
+ to_add = []
+ for token in value:
+ if str(token) not in additional_special_tokens_set and str(token) not in to_add:
+ to_add.append(token)
+ # update the property
+ additional_special_tokens.extend(to_add)
+ self.additional_special_tokens = additional_special_tokens
+
+ added_tokens += self.add_tokens(value, special_tokens=True)
+ else:
+ assert isinstance(
+ value, (str, AddedToken)
+ ), f"Token {value} for key {key} should be a str or an AddedToken instance"
+ setattr(self, key, value)
+ added_tokens += self.add_tokens([value], special_tokens=True)
+
+ return added_tokens
+
+ def add_tokens(
+ self, new_tokens: Union[str, AddedToken, List[Union[str, AddedToken]]], special_tokens: bool = False
+ ) -> int:
+ """
+ Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
+ it with indices starting from length of the current vocabulary and and will be isolated before the tokenization
+ algorithm is applied. Added tokens and tokens from the vocabulary of the tokenization algorithm are therefore
+ not treated in the same way.
+
+ Note, when adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix
+ of the model so that its embedding matrix matches the tokenizer.
+
+ In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method.
+
+ Args:
+ new_tokens (`str`, `tokenizers.AddedToken` or a list of *str* or `tokenizers.AddedToken`):
+ Tokens are only added if they are not already in the vocabulary. `tokenizers.AddedToken` wraps a string
+ token to let you personalize its behavior: whether this token should only match against a single word,
+ whether this token should strip all potential whitespaces on the left side, whether this token should
+ strip all potential whitespaces on the right side, etc.
+ special_tokens (`bool`, *optional*, defaults to `False`):
+ Can be used to specify if the token is a special token. This mostly change the normalization behavior
+ (special tokens like CLS or [MASK] are usually not lower-cased for instance).
+
+ See details for `tokenizers.AddedToken` in HuggingFace tokenizers library.
+
+ Returns:
+ `int`: Number of tokens added to the vocabulary.
+
+ Examples:
+
+ ```python
+ # Let's see how to increase the vocabulary of Bert model and tokenizer
+ tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
+ model = BertModel.from_pretrained("bert-base-uncased")
+
+ num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
+ print("We have added", num_added_toks, "tokens")
+ # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
+ model.resize_token_embeddings(len(tokenizer))
+ ```"""
+ if not new_tokens:
+ return 0
+
+ if not isinstance(new_tokens, (list, tuple)):
+ new_tokens = [new_tokens]
+
+ return self._add_tokens(new_tokens, special_tokens=special_tokens)
+
+ def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
+ raise NotImplementedError
+
+ @property
+ def bos_token(self) -> str:
+ """
+ `str`: Beginning of sentence token. Log an error if used while not having been set.
+ """
+ if self._bos_token is None:
+ if self.verbose:
+ logger.error("Using bos_token, but it is not set yet.")
+ return None
+ return str(self._bos_token)
+
+ @property
+ def eos_token(self) -> str:
+ """
+ `str`: End of sentence token. Log an error if used while not having been set.
+ """
+ if self._eos_token is None:
+ if self.verbose:
+ logger.error("Using eos_token, but it is not set yet.")
+ return None
+ return str(self._eos_token)
+
+ @property
+ def unk_token(self) -> str:
+ """
+ `str`: Unknown token. Log an error if used while not having been set.
+ """
+ if self._unk_token is None:
+ if self.verbose:
+ logger.error("Using unk_token, but it is not set yet.")
+ return None
+ return str(self._unk_token)
+
+ @property
+ def sep_token(self) -> str:
+ """
+ `str`: Separation token, to separate context and query in an input sequence. Log an error if used while not
+ having been set.
+ """
+ if self._sep_token is None:
+ if self.verbose:
+ logger.error("Using sep_token, but it is not set yet.")
+ return None
+ return str(self._sep_token)
+
+ @property
+ def pad_token(self) -> str:
+ """
+ `str`: Padding token. Log an error if used while not having been set.
+ """
+ if self._pad_token is None:
+ if self.verbose:
+ logger.error("Using pad_token, but it is not set yet.")
+ return None
+ return str(self._pad_token)
+
+ @property
+ def cls_token(self) -> str:
+ """
+ `str`: Classification token, to extract a summary of an input sequence leveraging self-attention along the full
+ depth of the model. Log an error if used while not having been set.
+ """
+ if self._cls_token is None:
+ if self.verbose:
+ logger.error("Using cls_token, but it is not set yet.")
+ return None
+ return str(self._cls_token)
+
+ @property
+ def mask_token(self) -> str:
+ """
+ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
+ having been set.
+ """
+ if self._mask_token is None:
+ if self.verbose:
+ logger.error("Using mask_token, but it is not set yet.")
+ return None
+ return str(self._mask_token)
+
+ @property
+ def additional_special_tokens(self) -> List[str]:
+ """
+ `List[str]`: All the additional special tokens you may want to use. Log an error if used while not having been
+ set.
+ """
+ if self._additional_special_tokens is None:
+ if self.verbose:
+ logger.error("Using additional_special_tokens, but it is not set yet.")
+ return None
+ return [str(tok) for tok in self._additional_special_tokens]
+
+ @bos_token.setter
+ def bos_token(self, value):
+ self._bos_token = value
+
+ @eos_token.setter
+ def eos_token(self, value):
+ self._eos_token = value
+
+ @unk_token.setter
+ def unk_token(self, value):
+ self._unk_token = value
+
+ @sep_token.setter
+ def sep_token(self, value):
+ self._sep_token = value
+
+ @pad_token.setter
+ def pad_token(self, value):
+ self._pad_token = value
+
+ @cls_token.setter
+ def cls_token(self, value):
+ self._cls_token = value
+
+ @mask_token.setter
+ def mask_token(self, value):
+ self._mask_token = value
+
+ @additional_special_tokens.setter
+ def additional_special_tokens(self, value):
+ self._additional_special_tokens = value
+
+ @property
+ def bos_token_id(self) -> Optional[int]:
+ """
+ `Optional[int]`: Id of the beginning of sentence token in the vocabulary. Returns `None` if the token has not
+ been set.
+ """
+ if self._bos_token is None:
+ return None
+ return self.convert_tokens_to_ids(self.bos_token)
+
+ @property
+ def eos_token_id(self) -> Optional[int]:
+ """
+ `Optional[int]`: Id of the end of sentence token in the vocabulary. Returns `None` if the token has not been
+ set.
+ """
+ if self._eos_token is None:
+ return None
+ return self.convert_tokens_to_ids(self.eos_token)
+
+ @property
+ def unk_token_id(self) -> Optional[int]:
+ """
+ `Optional[int]`: Id of the unknown token in the vocabulary. Returns `None` if the token has not been set.
+ """
+ if self._unk_token is None:
+ return None
+ return self.convert_tokens_to_ids(self.unk_token)
+
+ @property
+ def sep_token_id(self) -> Optional[int]:
+ """
+ `Optional[int]`: Id of the separation token in the vocabulary, to separate context and query in an input
+ sequence. Returns `None` if the token has not been set.
+ """
+ if self._sep_token is None:
+ return None
+ return self.convert_tokens_to_ids(self.sep_token)
+
+ @property
+ def pad_token_id(self) -> Optional[int]:
+ """
+ `Optional[int]`: Id of the padding token in the vocabulary. Returns `None` if the token has not been set.
+ """
+ if self._pad_token is None:
+ return None
+ return self.convert_tokens_to_ids(self.pad_token)
+
+ @property
+ def pad_token_type_id(self) -> int:
+ """
+ `int`: Id of the padding token type in the vocabulary.
+ """
+ return self._pad_token_type_id
+
+ @property
+ def cls_token_id(self) -> Optional[int]:
+ """
+ `Optional[int]`: Id of the classification token in the vocabulary, to extract a summary of an input sequence
+ leveraging self-attention along the full depth of the model.
+
+ Returns `None` if the token has not been set.
+ """
+ if self._cls_token is None:
+ return None
+ return self.convert_tokens_to_ids(self.cls_token)
+
+ @property
+ def mask_token_id(self) -> Optional[int]:
+ """
+ `Optional[int]`: Id of the mask token in the vocabulary, used when training a model with masked-language
+ modeling. Returns `None` if the token has not been set.
+ """
+ if self._mask_token is None:
+ return None
+ return self.convert_tokens_to_ids(self.mask_token)
+
+ @property
+ def additional_special_tokens_ids(self) -> List[int]:
+ """
+ `List[int]`: Ids of all the additional special tokens in the vocabulary. Log an error if used while not having
+ been set.
+ """
+ return self.convert_tokens_to_ids(self.additional_special_tokens)
+
+ @bos_token_id.setter
+ def bos_token_id(self, value):
+ self._bos_token = self.convert_ids_to_tokens(value) if value is not None else None
+
+ @eos_token_id.setter
+ def eos_token_id(self, value):
+ self._eos_token = self.convert_ids_to_tokens(value) if value is not None else None
+
+ @unk_token_id.setter
+ def unk_token_id(self, value):
+ self._unk_token = self.convert_ids_to_tokens(value) if value is not None else None
+
+ @sep_token_id.setter
+ def sep_token_id(self, value):
+ self._sep_token = self.convert_ids_to_tokens(value) if value is not None else None
+
+ @pad_token_id.setter
+ def pad_token_id(self, value):
+ self._pad_token = self.convert_ids_to_tokens(value) if value is not None else None
+
+ @cls_token_id.setter
+ def cls_token_id(self, value):
+ self._cls_token = self.convert_ids_to_tokens(value) if value is not None else None
+
+ @mask_token_id.setter
+ def mask_token_id(self, value):
+ self._mask_token = self.convert_ids_to_tokens(value) if value is not None else None
+
+ @additional_special_tokens_ids.setter
+ def additional_special_tokens_ids(self, values):
+ self._additional_special_tokens = [self.convert_ids_to_tokens(value) for value in values]
+
+ @property
+ def special_tokens_map(self) -> Dict[str, Union[str, List[str]]]:
+ """
+ `Dict[str, Union[str, List[str]]]`: A dictionary mapping special token class attributes (`cls_token`,
+ `unk_token`, etc.) to their values (`''`, `''`, etc.).
+
+ Convert potential tokens of `tokenizers.AddedToken` type to string.
+ """
+ set_attr = {}
+ for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
+ attr_value = getattr(self, "_" + attr)
+ if attr_value:
+ set_attr[attr] = (
+ type(attr_value)(str(attr_value_sub) for attr_value_sub in attr_value)
+ if isinstance(attr_value, (list, tuple))
+ else str(attr_value)
+ )
+ return set_attr
+
+ @property
+ def special_tokens_map_extended(self) -> Dict[str, Union[str, AddedToken, List[Union[str, AddedToken]]]]:
+ """
+ `Dict[str, Union[str, tokenizers.AddedToken, List[Union[str, tokenizers.AddedToken]]]]`: A dictionary mapping
+ special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`''`, `''`, etc.).
+
+ Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how
+ special tokens are tokenized.
+ """
+ set_attr = {}
+ for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
+ attr_value = getattr(self, "_" + attr)
+ if attr_value:
+ set_attr[attr] = attr_value
+ return set_attr
+
+ @property
+ def all_special_tokens(self) -> List[str]:
+ """
+ `List[str]`: All the special tokens (`''`, `''`, etc.) mapped to class attributes.
+
+ Convert tokens of `tokenizers.AddedToken` type to string.
+ """
+ all_toks = [str(s) for s in self.all_special_tokens_extended]
+ return all_toks
+
+ @property
+ def all_special_tokens_extended(self) -> List[Union[str, AddedToken]]:
+ """
+ `List[Union[str, tokenizers.AddedToken]]`: All the special tokens (`''`, `''`, etc.) mapped to class
+ attributes.
+
+ Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how
+ special tokens are tokenized.
+ """
+ all_toks = []
+ set_attr = self.special_tokens_map_extended
+ for attr_value in set_attr.values():
+ all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value])
+ all_toks = list(OrderedDict.fromkeys(all_toks))
+ return all_toks
+
+ @property
+ def all_special_ids(self) -> List[int]:
+ """
+ `List[int]`: List the ids of the special tokens(`''`, `''`, etc.) mapped to class attributes.
+ """
+ all_toks = self.all_special_tokens
+ all_ids = self.convert_tokens_to_ids(all_toks)
+ return all_ids
+
+
+ENCODE_KWARGS_DOCSTRING = r"""
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
+ Whether or not to encode the sequences with the special tokens relative to their model.
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
+ Activates and controls padding. Accepts the following values:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
+ Activates and controls truncation. Accepts the following values:
+
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
+ to the maximum acceptable input length for the model if that argument is not provided. This will
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
+ sequences (or a batch of pairs) is provided.
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
+ greater than the model maximum admissible input size).
+ max_length (`int`, *optional*):
+ Controls the maximum length to use by one of the truncation/padding parameters.
+
+ If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
+ is required by one of the truncation/padding parameters. If the model has no specific maximum input
+ length (like XLNet) truncation/padding to a maximum length will be deactivated.
+ stride (`int`, *optional*, defaults to 0):
+ If set to a number along with `max_length`, the overflowing tokens returned when
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
+ argument defines the number of overlapping tokens.
+ is_split_into_words (`bool`, *optional*, defaults to `False`):
+ Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
+ tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
+ which it will tokenize. This is useful for NER or token classification.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value. Requires `padding` to be activated.
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta).
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+"""
+
+ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
+ return_token_type_ids (`bool`, *optional*):
+ Whether to return token type IDs. If left to the default, will return the token type IDs according to
+ the specific tokenizer's default, defined by the `return_outputs` attribute.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ return_attention_mask (`bool`, *optional*):
+ Whether to return the attention mask. If left to the default, will return the attention mask according
+ to the specific tokenizer's default, defined by the `return_outputs` attribute.
+
+ [What are attention masks?](../glossary#attention-mask)
+ return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
+ of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
+ of returning overflowing tokens.
+ return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
+ Whether or not to return special tokens mask information.
+ return_offsets_mapping (`bool`, *optional*, defaults to `False`):
+ Whether or not to return `(char_start, char_end)` for each token.
+
+ This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
+ Python's tokenizer, this method will raise `NotImplementedError`.
+ return_length (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the lengths of the encoded inputs.
+ verbose (`bool`, *optional*, defaults to `True`):
+ Whether or not to print more information and warnings.
+ **kwargs: passed to the `self.tokenize()` method
+
+ Return:
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
+
+ - **input_ids** -- List of token ids to be fed to a model.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
+ if *"token_type_ids"* is in `self.model_input_names`).
+
+ [What are token type IDs?](../glossary#token-type-ids)
+
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
+ `return_overflowing_tokens=True`).
+ - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
+ `return_overflowing_tokens=True`).
+ - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
+ regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
+ - **length** -- The length of the inputs (when `return_length=True`)
+"""
+
+INIT_TOKENIZER_DOCSTRING = r"""
+ Class attributes (overridden by derived classes)
+
+ - **vocab_files_names** (`Dict[str, str]`) -- A dictionary with, as keys, the `__init__` keyword name of each
+ vocabulary file required by the model, and as associated values, the filename for saving the associated file
+ (string).
+ - **pretrained_vocab_files_map** (`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the
+ high-level keys being the `__init__` keyword name of each vocabulary file required by the model, the
+ low-level being the `short-cut-names` of the pretrained models with, as associated values, the `url` to the
+ associated pretrained vocabulary file.
+ - **max_model_input_sizes** (`Dict[str, Optional[int]]`) -- A dictionary with, as keys, the `short-cut-names`
+ of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model,
+ or `None` if the model has no maximum input size.
+ - **pretrained_init_configuration** (`Dict[str, Dict[str, Any]]`) -- A dictionary with, as keys, the
+ `short-cut-names` of the pretrained models, and as associated values, a dictionary of specific arguments to
+ pass to the `__init__` method of the tokenizer class for this pretrained model when loading the tokenizer
+ with the [`~tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`] method.
+ - **model_input_names** (`List[str]`) -- A list of inputs expected in the forward pass of the model.
+ - **padding_side** (`str`) -- The default value for the side on which the model should have padding applied.
+ Should be `'right'` or `'left'`.
+ - **truncation_side** (`str`) -- The default value for the side on which the model should have truncation
+ applied. Should be `'right'` or `'left'`.
+
+ Args:
+ model_max_length (`int`, *optional*):
+ The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is
+ loaded with [`~tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`], this will be set to the
+ value stored for the associated model in `max_model_input_sizes` (see above). If no value is provided, will
+ default to VERY_LARGE_INTEGER (`int(1e30)`).
+ padding_side (`str`, *optional*):
+ The side on which the model should have padding applied. Should be selected between ['right', 'left'].
+ Default value is picked from the class attribute of the same name.
+ truncation_side (`str`, *optional*):
+ The side on which the model should have truncation applied. Should be selected between ['right', 'left'].
+ Default value is picked from the class attribute of the same name.
+ model_input_names (`List[string]`, *optional*):
+ The list of inputs accepted by the forward pass of the model (like `"token_type_ids"` or
+ `"attention_mask"`). Default value is picked from the class attribute of the same name.
+ bos_token (`str` or `tokenizers.AddedToken`, *optional*):
+ A special token representing the beginning of a sentence. Will be associated to `self.bos_token` and
+ `self.bos_token_id`.
+ eos_token (`str` or `tokenizers.AddedToken`, *optional*):
+ A special token representing the end of a sentence. Will be associated to `self.eos_token` and
+ `self.eos_token_id`.
+ unk_token (`str` or `tokenizers.AddedToken`, *optional*):
+ A special token representing an out-of-vocabulary token. Will be associated to `self.unk_token` and
+ `self.unk_token_id`.
+ sep_token (`str` or `tokenizers.AddedToken`, *optional*):
+ A special token separating two different sentences in the same input (used by BERT for instance). Will be
+ associated to `self.sep_token` and `self.sep_token_id`.
+ pad_token (`str` or `tokenizers.AddedToken`, *optional*):
+ A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
+ attention mechanisms or loss computation. Will be associated to `self.pad_token` and `self.pad_token_id`.
+ cls_token (`str` or `tokenizers.AddedToken`, *optional*):
+ A special token representing the class of the input (used by BERT for instance). Will be associated to
+ `self.cls_token` and `self.cls_token_id`.
+ mask_token (`str` or `tokenizers.AddedToken`, *optional*):
+ A special token representing a masked token (used by masked-language modeling pretraining objectives, like
+ BERT). Will be associated to `self.mask_token` and `self.mask_token_id`.
+ additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
+ A tuple or a list of additional special tokens. Add them here to ensure they won't be split by the
+ tokenization process. Will be associated to `self.additional_special_tokens` and
+ `self.additional_special_tokens_ids`.
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should cleanup the spaces that were added when splitting the input text during the
+ tokenization process.
+"""
+
+
+@add_end_docstrings(INIT_TOKENIZER_DOCSTRING)
+class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
+ """
+ Base class for [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`].
+
+ Handles shared (mostly boiler plate) methods for those two classes.
+ """
+
+ vocab_files_names: Dict[str, str] = {}
+ pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {}
+ pretrained_init_configuration: Dict[str, Dict[str, Any]] = {}
+ max_model_input_sizes: Dict[str, Optional[int]] = {}
+ _auto_class: Optional[str] = None
+
+ # first name has to correspond to main model input name
+ # to make sure `tokenizer.pad(...)` works correctly
+ model_input_names: List[str] = ["input_ids", "token_type_ids", "attention_mask"]
+ padding_side: str = "right"
+ truncation_side: str = "right"
+ slow_tokenizer_class = None
+
+ def __init__(self, **kwargs):
+ # inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``)
+ self.init_inputs = ()
+ self.init_kwargs = copy.deepcopy(kwargs)
+ self.name_or_path = kwargs.pop("name_or_path", "")
+ self._processor_class = kwargs.pop("processor_class", None)
+
+ # For backward compatibility we fallback to set model_max_length from max_len if provided
+ model_max_length = kwargs.pop("model_max_length", kwargs.pop("max_len", None))
+ self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER
+
+ # Padding and truncation side are right by default and overridden in subclasses. If specified in the kwargs, it
+ # is changed.
+ self.padding_side = kwargs.pop("padding_side", self.padding_side)
+ if self.padding_side not in ["right", "left"]:
+ raise ValueError(
+ f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}"
+ )
+
+ self.truncation_side = kwargs.pop("truncation_side", self.truncation_side)
+ if self.truncation_side not in ["right", "left"]:
+ raise ValueError(
+ f"Padding side should be selected between 'right' and 'left', current value: {self.truncation_side}"
+ )
+
+ self.model_input_names = kwargs.pop("model_input_names", self.model_input_names)
+
+ # By default, cleaning tokenization spaces for both fast and slow tokenizers
+ self.clean_up_tokenization_spaces = kwargs.pop("clean_up_tokenization_spaces", False)
+
+ self.deprecation_warnings = (
+ {}
+ ) # Use to store when we have already noticed a deprecation warning (avoid overlogging).
+ self._in_target_context_manager = False
+ super().__init__(**kwargs)
+
+ @property
+ def max_len_single_sentence(self) -> int:
+ """
+ `int`: The maximum length of a sentence that can be fed to the model.
+ """
+ return self.model_max_length - self.num_special_tokens_to_add(pair=False)
+
+ @property
+ def max_len_sentences_pair(self) -> int:
+ """
+ `int`: The maximum combined length of a pair of sentences that can be fed to the model.
+ """
+ return self.model_max_length - self.num_special_tokens_to_add(pair=True)
+
+ @max_len_single_sentence.setter
+ def max_len_single_sentence(self, value) -> int:
+ # For backward compatibility, allow to try to setup 'max_len_single_sentence'.
+ if value == self.model_max_length - self.num_special_tokens_to_add(pair=False) and self.verbose:
+ if not self.deprecation_warnings.get("max_len_single_sentence", False):
+ logger.warning(
+ "Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up."
+ )
+ self.deprecation_warnings["max_len_single_sentence"] = True
+ else:
+ raise ValueError(
+ "Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up."
+ )
+
+ @max_len_sentences_pair.setter
+ def max_len_sentences_pair(self, value) -> int:
+ # For backward compatibility, allow to try to setup 'max_len_sentences_pair'.
+ if value == self.model_max_length - self.num_special_tokens_to_add(pair=True) and self.verbose:
+ if not self.deprecation_warnings.get("max_len_sentences_pair", False):
+ logger.warning(
+ "Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up."
+ )
+ self.deprecation_warnings["max_len_sentences_pair"] = True
+ else:
+ raise ValueError("Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up.")
+
+ def _set_processor_class(self, processor_class: str):
+ """Sets processor class as an attribute."""
+ self._processor_class = processor_class
+
+ def __repr__(self) -> str:
+ return (
+ f"{self.__class__.__name__}(name_or_path='{self.name_or_path}',"
+ f" vocab_size={self.vocab_size}, model_max_length={self.model_max_length}, is_fast={self.is_fast},"
+ f" padding_side='{self.padding_side}', truncation_side='{self.truncation_side}',"
+ f" special_tokens={self.special_tokens_map_extended}, clean_up_tokenization_spaces={self.clean_up_tokenization_spaces})"
+ )
+
+ def __len__(self) -> int:
+ raise NotImplementedError()
+
+ def get_vocab(self) -> Dict[str, int]:
+ """
+ Returns the vocabulary as a dictionary of token to index.
+
+ `tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the
+ vocab.
+
+ Returns:
+ `Dict[str, int]`: The vocabulary.
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def from_pretrained(
+ cls,
+ pretrained_model_name_or_path: Union[str, os.PathLike],
+ *init_inputs,
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
+ force_download: bool = False,
+ local_files_only: bool = False,
+ token: Optional[Union[str, bool]] = None,
+ revision: str = "main",
+ **kwargs,
+ ):
+ r"""
+ Instantiate a [`~tokenization_utils_base.PreTrainedTokenizerBase`] (or a derived class) from a predefined
+ tokenizer.
+
+ Args:
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
+ Can be either:
+
+ - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
+ Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
+ user or organization name, like `dbmdz/bert-base-german-cased`.
+ - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved
+ using the [`~tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`] method, e.g.,
+ `./my_model_directory/`.
+ - (**Deprecated**, not applicable to all derived classes) A path or url to a single saved vocabulary
+ file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g.,
+ `./my_model_directory/vocab.txt`.
+ cache_dir (`str` or `os.PathLike`, *optional*):
+ Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the
+ standard cache should not be used.
+ force_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to force the (re-)download the vocabulary files and override the cached versions if they
+ exist.
+ resume_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to delete incompletely received files. Attempt to resume the download if such a file
+ exists.
+ proxies (`Dict[str, str]`, *optional*):
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
+ token (`str` or *bool*, *optional*):
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
+ local_files_only (`bool`, *optional*, defaults to `False`):
+ Whether or not to only rely on local files and not to attempt to download any files.
+ revision (`str`, *optional*, defaults to `"main"`):
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
+ identifier allowed by git.
+ subfolder (`str`, *optional*):
+ In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for
+ facebook/rag-token-base), specify it here.
+ inputs (additional positional arguments, *optional*):
+ Will be passed along to the Tokenizer `__init__` method.
+ kwargs (additional keyword arguments, *optional*):
+ Will be passed to the Tokenizer `__init__` method. Can be used to set special tokens like `bos_token`,
+ `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`,
+ `additional_special_tokens`. See parameters in the `__init__` for more details.
+
+
+
+ Passing `use_auth_token=True` is required when you want to use a private model.
+
+
+
+ Examples:
+
+ ```python
+ # We can't instantiate directly the base class *PreTrainedTokenizerBase* so let's show our examples on a derived class: BertTokenizer
+ # Download vocabulary from huggingface.co and cache.
+ tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
+
+ # Download vocabulary from huggingface.co (user-uploaded) and cache.
+ tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-german-cased")
+
+ # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*)
+ tokenizer = BertTokenizer.from_pretrained("./test/saved_model/")
+
+ # If the tokenizer uses a single vocabulary file, you can point directly to this file
+ tokenizer = BertTokenizer.from_pretrained("./test/saved_model/my_vocab.txt")
+
+ # You can link tokens to special vocabulary when instantiating
+ tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", unk_token="")
+ # You should be sure '' is in the vocabulary when doing that.
+ # Otherwise use tokenizer.add_special_tokens({'unk_token': ''}) instead)
+ assert tokenizer.unk_token == ""
+ ```"""
+ resume_download = kwargs.pop("resume_download", False)
+ proxies = kwargs.pop("proxies", None)
+ use_auth_token = kwargs.pop("use_auth_token", None)
+ subfolder = kwargs.pop("subfolder", None)
+ from_pipeline = kwargs.pop("_from_pipeline", None)
+ from_auto_class = kwargs.pop("_from_auto", False)
+ commit_hash = kwargs.pop("_commit_hash", None)
+
+ if use_auth_token is not None:
+ warnings.warn(
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning
+ )
+ if token is not None:
+ raise ValueError(
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
+ )
+ token = use_auth_token
+
+ user_agent = {"file_type": "tokenizer", "from_auto_class": from_auto_class, "is_fast": "Fast" in cls.__name__}
+ if from_pipeline is not None:
+ user_agent["using_pipeline"] = from_pipeline
+
+ if is_offline_mode() and not local_files_only:
+ logger.info("Offline mode: forcing local_files_only=True")
+ local_files_only = True
+
+ pretrained_model_name_or_path = str(pretrained_model_name_or_path)
+ vocab_files = {}
+ init_configuration = {}
+
+ is_local = os.path.isdir(pretrained_model_name_or_path)
+ single_file_id = None
+ if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
+ if len(cls.vocab_files_names) > 1:
+ raise ValueError(
+ f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not "
+ "supported for this tokenizer. Use a model identifier or the path to a directory instead."
+ )
+ warnings.warn(
+ f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated and "
+ "won't be possible anymore in v5. Use a model identifier or the path to a directory instead.",
+ FutureWarning,
+ )
+ file_id = list(cls.vocab_files_names.keys())[0]
+
+ vocab_files[file_id] = pretrained_model_name_or_path
+ single_file_id = file_id
+ else:
+ # At this point pretrained_model_name_or_path is either a directory or a model identifier name
+ additional_files_names = {
+ "added_tokens_file": ADDED_TOKENS_FILE,
+ "special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE,
+ "tokenizer_config_file": TOKENIZER_CONFIG_FILE,
+ }
+ vocab_files = {**cls.vocab_files_names, **additional_files_names}
+
+ if "tokenizer_file" in vocab_files:
+ # Try to get the tokenizer config to see if there are versioned tokenizer files.
+ fast_tokenizer_file = FULL_TOKENIZER_FILE
+ resolved_config_file = cached_file(
+ pretrained_model_name_or_path,
+ TOKENIZER_CONFIG_FILE,
+ cache_dir=cache_dir,
+ force_download=force_download,
+ resume_download=resume_download,
+ proxies=proxies,
+ use_auth_token=token,
+ revision=revision,
+ local_files_only=local_files_only,
+ subfolder=subfolder,
+ user_agent=user_agent,
+ _raise_exceptions_for_missing_entries=False,
+ _raise_exceptions_for_connection_errors=False,
+ _commit_hash=commit_hash,
+ )
+ commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
+ if resolved_config_file is not None:
+ with open(resolved_config_file, encoding="utf-8") as reader:
+ tokenizer_config = json.load(reader)
+ if "fast_tokenizer_files" in tokenizer_config:
+ fast_tokenizer_file = get_fast_tokenizer_file(tokenizer_config["fast_tokenizer_files"])
+ vocab_files["tokenizer_file"] = fast_tokenizer_file
+
+ # Get files from url, cache, or disk depending on the case
+ resolved_vocab_files = {}
+ unresolved_files = []
+ for file_id, file_path in vocab_files.items():
+ if file_path is None:
+ resolved_vocab_files[file_id] = None
+ elif single_file_id == file_id:
+ if os.path.isfile(file_path):
+ resolved_vocab_files[file_id] = file_path
+ elif is_remote_url(file_path):
+ resolved_vocab_files[file_id] = download_url(file_path, proxies=proxies)
+ else:
+ resolved_vocab_files[file_id] = cached_file(
+ pretrained_model_name_or_path,
+ file_path,
+ cache_dir=cache_dir,
+ force_download=force_download,
+ proxies=proxies,
+ resume_download=resume_download,
+ local_files_only=local_files_only,
+ use_auth_token=token,
+ user_agent=user_agent,
+ revision=revision,
+ subfolder=subfolder,
+ _raise_exceptions_for_missing_entries=False,
+ _raise_exceptions_for_connection_errors=False,
+ _commit_hash=commit_hash,
+ )
+ commit_hash = extract_commit_hash(resolved_vocab_files[file_id], commit_hash)
+
+ if len(unresolved_files) > 0:
+ logger.info(
+ f"Can't load following files from cache: {unresolved_files} and cannot check if these "
+ "files are necessary for the tokenizer to operate."
+ )
+
+ if all(full_file_name is None for full_file_name in resolved_vocab_files.values()):
+ raise EnvironmentError(
+ f"Can't load tokenizer for '{pretrained_model_name_or_path}'. If you were trying to load it from "
+ "'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
+ f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
+ f"containing all relevant files for a {cls.__name__} tokenizer."
+ )
+
+ for file_id, file_path in vocab_files.items():
+ if file_id not in resolved_vocab_files:
+ continue
+
+ if is_local:
+ logger.info(f"loading file {file_path}")
+ else:
+ logger.info(f"loading file {file_path} from cache at {resolved_vocab_files[file_id]}")
+
+ return cls._from_pretrained(
+ resolved_vocab_files,
+ pretrained_model_name_or_path,
+ init_configuration,
+ *init_inputs,
+ use_auth_token=token,
+ cache_dir=cache_dir,
+ local_files_only=local_files_only,
+ _commit_hash=commit_hash,
+ _is_local=is_local,
+ **kwargs,
+ )
+
+ @classmethod
+ def _from_pretrained(
+ cls,
+ resolved_vocab_files,
+ pretrained_model_name_or_path,
+ init_configuration,
+ *init_inputs,
+ use_auth_token=None,
+ cache_dir=None,
+ local_files_only=False,
+ _commit_hash=None,
+ _is_local=False,
+ **kwargs,
+ ):
+ # We instantiate fast tokenizers based on a slow tokenizer if we don't have access to the tokenizer.json
+ # file or if `from_slow` is set to True.
+ from_slow = kwargs.get("from_slow", False)
+ has_tokenizer_file = resolved_vocab_files.get("tokenizer_file", None) is not None
+ if (from_slow or not has_tokenizer_file) and cls.slow_tokenizer_class is not None:
+ slow_tokenizer = (cls.slow_tokenizer_class)._from_pretrained(
+ copy.deepcopy(resolved_vocab_files),
+ pretrained_model_name_or_path,
+ copy.deepcopy(init_configuration),
+ *init_inputs,
+ use_auth_token=use_auth_token,
+ cache_dir=cache_dir,
+ local_files_only=local_files_only,
+ _commit_hash=_commit_hash,
+ **(copy.deepcopy(kwargs)),
+ )
+ else:
+ slow_tokenizer = None
+
+ # Prepare tokenizer initialization kwargs
+ # Did we saved some inputs and kwargs to reload ?
+ tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None)
+ if tokenizer_config_file is not None:
+ with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle:
+ init_kwargs = json.load(tokenizer_config_handle)
+ # First attempt. We get tokenizer_class from tokenizer_config to check mismatch between tokenizers.
+ config_tokenizer_class = init_kwargs.get("tokenizer_class")
+ init_kwargs.pop("tokenizer_class", None)
+ saved_init_inputs = init_kwargs.pop("init_inputs", ())
+ if not init_inputs:
+ init_inputs = saved_init_inputs
+ else:
+ config_tokenizer_class = None
+ init_kwargs = init_configuration
+
+ if "auto_map" in init_kwargs and not _is_local:
+ # For backward compatibility with odl format.
+ if isinstance(init_kwargs["auto_map"], (tuple, list)):
+ init_kwargs["auto_map"] = {"AutoTokenizer": init_kwargs["auto_map"]}
+ init_kwargs["auto_map"] = add_model_info_to_auto_map(
+ init_kwargs["auto_map"], pretrained_model_name_or_path
+ )
+
+ if config_tokenizer_class is None:
+ from .models.auto.configuration_auto import AutoConfig # tests_ignore
+
+ # Second attempt. If we have not yet found tokenizer_class, let's try to use the config.
+ try:
+ config = AutoConfig.from_pretrained(
+ pretrained_model_name_or_path,
+ use_auth_token=use_auth_token,
+ cache_dir=cache_dir,
+ local_files_only=local_files_only,
+ _commit_hash=_commit_hash,
+ )
+ config_tokenizer_class = config.tokenizer_class
+ except (OSError, ValueError, KeyError):
+ # skip if an error occurred.
+ config = None
+ if config_tokenizer_class is None:
+ # Third attempt. If we have not yet found the original type of the tokenizer,
+ # we are loading we see if we can infer it from the type of the configuration file
+ from .models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES # tests_ignore
+
+ if hasattr(config, "model_type"):
+ model_type = config.model_type
+ else:
+ # Fallback: use pattern matching on the string.
+ model_type = None
+ for pattern in TOKENIZER_MAPPING_NAMES.keys():
+ if pattern in str(pretrained_model_name_or_path):
+ model_type = pattern
+ break
+
+ if model_type is not None:
+ config_tokenizer_class, config_tokenizer_class_fast = TOKENIZER_MAPPING_NAMES.get(
+ model_type, (None, None)
+ )
+ if config_tokenizer_class is None:
+ config_tokenizer_class = config_tokenizer_class_fast
+
+ if config_tokenizer_class is not None:
+ if cls.__name__.replace("Fast", "") != config_tokenizer_class.replace("Fast", ""):
+ logger.warning(
+ "The tokenizer class you load from this checkpoint is not the same type as the class this"
+ " function is called from. It may result in unexpected tokenization. \nThe tokenizer class you"
+ f" load from this checkpoint is '{config_tokenizer_class}'. \nThe class this function is called"
+ f" from is '{cls.__name__}'."
+ )
+
+ # Update with newly provided kwargs
+ init_kwargs.update(kwargs)
+
+ # Convert AddedTokens serialized as dict to class instances
+ def convert_added_tokens(obj: Union[AddedToken, Any]):
+ if isinstance(obj, dict) and "__type" in obj and obj["__type"] == "AddedToken":
+ obj.pop("__type")
+ return AddedToken(**obj)
+ elif isinstance(obj, (list, tuple)):
+ return [convert_added_tokens(o) for o in obj]
+ elif isinstance(obj, dict):
+ return {k: convert_added_tokens(v) for k, v in obj.items()}
+ return obj
+
+ init_kwargs = convert_added_tokens(init_kwargs)
+
+ # Set max length if needed
+ if pretrained_model_name_or_path in cls.max_model_input_sizes:
+ # if we're using a pretrained model, ensure the tokenizer
+ # wont index sequences longer than the number of positional embeddings
+
+ model_max_length = cls.max_model_input_sizes[pretrained_model_name_or_path]
+ if model_max_length is not None and isinstance(model_max_length, (int, float)):
+ model_max_length = min(init_kwargs.get("model_max_length", int(1e30)), model_max_length)
+ # TODO(PVP) - uncomment following line in Transformers v5
+ # init_kwargs["model_max_length"] = model_max_length
+ # TODO(PVP) - remove in Transformers v5
+ # ---
+ init_kwargs["model_max_length"] = cls._eventually_correct_t5_max_length(
+ pretrained_model_name_or_path, model_max_length, init_kwargs.get("model_max_length")
+ )
+ # ---
+
+ # Merge resolved_vocab_files arguments in init_kwargs.
+ added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None)
+ for args_name, file_path in resolved_vocab_files.items():
+ if args_name not in init_kwargs:
+ init_kwargs[args_name] = file_path
+
+ if slow_tokenizer is not None:
+ init_kwargs["__slow_tokenizer"] = slow_tokenizer
+
+ init_kwargs["name_or_path"] = pretrained_model_name_or_path
+
+ # Instantiate tokenizer.
+ try:
+ tokenizer = cls(*init_inputs, **init_kwargs)
+ except OSError:
+ raise OSError(
+ "Unable to load vocabulary from file. "
+ "Please check that the provided vocabulary is accessible and not corrupted."
+ )
+
+ # Save inputs and kwargs for saving and re-loading with ``save_pretrained``
+ # Removed: Now done at the base class level
+ # tokenizer.init_inputs = init_inputs
+ # tokenizer.init_kwargs = init_kwargs
+
+ # If there is a complementary special token map, load it
+ special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None)
+ if special_tokens_map_file is not None:
+ with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle:
+ special_tokens_map = json.load(special_tokens_map_handle)
+ for key, value in special_tokens_map.items():
+ if key in kwargs and kwargs[key]:
+ # This value has already been redefined by the kwargs
+ # We keep this new value and ignore the one stored in the special_tokens_map_file
+
+ continue
+
+ if isinstance(value, dict):
+ value = AddedToken(**value)
+ elif isinstance(value, list):
+ value = [AddedToken(**token) if isinstance(token, dict) else token for token in value]
+ setattr(tokenizer, key, value)
+
+ # Add supplementary tokens.
+ special_tokens = tokenizer.all_special_tokens
+ if added_tokens_file is not None:
+ with open(added_tokens_file, encoding="utf-8") as added_tokens_handle:
+ added_tok_encoder = json.load(added_tokens_handle)
+
+ # Sort added tokens by index
+ added_tok_encoder_sorted = sorted(added_tok_encoder.items(), key=lambda x: x[1])
+
+ # Accumulate added tokens into batches of special/non-special tokens, because calling add_tokens() for
+ # individual tokens would repeatedly rebuild a trie, which can be slow.
+ is_last_special = None
+ tokens = []
+
+ for token, index in added_tok_encoder_sorted:
+ current_index = len(tokenizer) + len(tokens)
+ if has_tokenizer_file and index != current_index and tokenizer.convert_tokens_to_ids(token) != index:
+ # Tokenizer fast: added token needs to either be in the vocabulary with the proper index or the
+ # index is the current length of the tokenizer (not in vocabulary)
+ raise ValueError(
+ f"Wrong index found for {token}: should be {tokenizer.convert_tokens_to_ids(token)} but found "
+ f"{index}."
+ )
+ elif not has_tokenizer_file and index != current_index:
+ # Tokenizer slow: added token cannot already be in the vocabulary so its index needs to be the
+ # current length of the tokenizer.
+ raise ValueError(
+ f"Non-consecutive added token '{token}' found. "
+ f"Should have index {current_index} but has index {index} in saved vocabulary."
+ )
+
+ is_special = bool(token in special_tokens)
+ if is_last_special is None or is_last_special == is_special:
+ tokens.append(token)
+ else:
+ tokenizer.add_tokens(tokens, special_tokens=is_last_special)
+ tokens = [token]
+ is_last_special = is_special
+
+ if tokens:
+ tokenizer.add_tokens(tokens, special_tokens=is_last_special)
+
+ # Check all our special tokens are registered as "no split" token (we don't cut them) and are in the vocab
+ added_tokens = tokenizer.sanitize_special_tokens()
+ if added_tokens:
+ logger.warning_advice(
+ "Special tokens have been added in the vocabulary, make sure the associated word embeddings are"
+ " fine-tuned or trained."
+ )
+
+ return tokenizer
+
+ @staticmethod
+ def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length):
+ # This method should be deleted in Transformers v5
+ # Its only purpose is to potentially throw a warning
+ # that incorrectly defined max lengths of T5's tokenizer are used
+ # which we will correct in Transformers v5.
+ return max_model_length
+
+ def save_pretrained(
+ self,
+ save_directory: Union[str, os.PathLike],
+ legacy_format: Optional[bool] = None,
+ filename_prefix: Optional[str] = None,
+ push_to_hub: bool = False,
+ **kwargs,
+ ) -> Tuple[str]:
+ """
+ Save the full tokenizer state.
+
+
+ This method make sure the full tokenizer can then be re-loaded using the
+ [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`] class method..
+
+ Warning,None This won't save modifications you may have applied to the tokenizer after the instantiation (for
+ instance, modifying `tokenizer.do_lower_case` after creation).
+
+ Args:
+ save_directory (`str` or `os.PathLike`): The path to a directory where the tokenizer will be saved.
+ legacy_format (`bool`, *optional*):
+ Only applicable for a fast tokenizer. If unset (default), will save the tokenizer in the unified JSON
+ format as well as in legacy format if it exists, i.e. with tokenizer specific vocabulary and a separate
+ added_tokens files.
+
+ If `False`, will only save the tokenizer in the unified JSON format. This format is incompatible with
+ "slow" tokenizers (not powered by the *tokenizers* library), so the tokenizer will not be able to be
+ loaded in the corresponding "slow" tokenizer.
+
+ If `True`, will save the tokenizer in legacy format. If the "slow" tokenizer doesn't exits, a value
+ error is raised.
+ filename_prefix (`str`, *optional*):
+ A prefix to add to the names of the files saved by the tokenizer.
+ push_to_hub (`bool`, *optional*, defaults to `False`):
+ Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
+ namespace).
+ kwargs (`Dict[str, Any]`, *optional*):
+ Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
+
+ Returns:
+ A tuple of `str`: The files saved.
+ """
+ if os.path.isfile(save_directory):
+ logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
+ return
+
+ os.makedirs(save_directory, exist_ok=True)
+
+ if push_to_hub:
+ commit_message = kwargs.pop("commit_message", None)
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
+ repo_id = self._create_repo(repo_id, **kwargs)
+ files_timestamps = self._get_files_timestamps(save_directory)
+
+ special_tokens_map_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + SPECIAL_TOKENS_MAP_FILE
+ )
+ tokenizer_config_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + TOKENIZER_CONFIG_FILE
+ )
+
+ tokenizer_config = copy.deepcopy(self.init_kwargs)
+
+ # TODO: Ensure the modified attributes (those are also in the __init__ kwargs) will give identical tokenizers
+ # target_keys = self.init_kwargs.keys()
+ target_keys = ["model_max_length", "clean_up_tokenization_spaces"]
+ for k in target_keys:
+ if hasattr(self, k):
+ tokenizer_config[k] = getattr(self, k)
+
+ if len(self.init_inputs) > 0:
+ tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs)
+ for file_id in self.vocab_files_names.keys():
+ tokenizer_config.pop(file_id, None)
+
+ # Sanitize AddedTokens
+ def convert_added_tokens(obj: Union[AddedToken, Any], add_type_field=True):
+ if isinstance(obj, AddedToken):
+ out = obj.__getstate__()
+ if add_type_field:
+ out["__type"] = "AddedToken"
+ return out
+ elif isinstance(obj, (list, tuple)):
+ return [convert_added_tokens(o, add_type_field=add_type_field) for o in obj]
+ elif isinstance(obj, dict):
+ return {k: convert_added_tokens(v, add_type_field=add_type_field) for k, v in obj.items()}
+ return obj
+
+ # add_type_field=True to allow dicts in the kwargs / differentiate from AddedToken serialization
+ tokenizer_config = convert_added_tokens(tokenizer_config, add_type_field=True)
+
+ # Add tokenizer class to the tokenizer config to be able to reload it with from_pretrained
+ tokenizer_class = self.__class__.__name__
+ # Remove the Fast at the end unless we have a special `PreTrainedTokenizerFast`
+ if tokenizer_class.endswith("Fast") and tokenizer_class != "PreTrainedTokenizerFast":
+ tokenizer_class = tokenizer_class[:-4]
+ tokenizer_config["tokenizer_class"] = tokenizer_class
+ if getattr(self, "_auto_map", None) is not None:
+ tokenizer_config["auto_map"] = self._auto_map
+ if getattr(self, "_processor_class", None) is not None:
+ tokenizer_config["processor_class"] = self._processor_class
+
+ # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be
+ # loaded from the Hub.
+ if self._auto_class is not None:
+ custom_object_save(self, save_directory, config=tokenizer_config)
+
+ # remove private information
+ if "name_or_path" in tokenizer_config:
+ tokenizer_config.pop("name_or_path")
+ tokenizer_config.pop("special_tokens_map_file", None)
+
+ with open(tokenizer_config_file, "w", encoding="utf-8") as f:
+ out_str = json.dumps(tokenizer_config, indent=2, sort_keys=True, ensure_ascii=False) + "\n"
+ f.write(out_str)
+ logger.info(f"tokenizer config file saved in {tokenizer_config_file}")
+
+ # Sanitize AddedTokens in special_tokens_map
+ write_dict = convert_added_tokens(self.special_tokens_map_extended, add_type_field=False)
+ with open(special_tokens_map_file, "w", encoding="utf-8") as f:
+ out_str = json.dumps(write_dict, indent=2, sort_keys=True, ensure_ascii=False) + "\n"
+ f.write(out_str)
+ logger.info(f"Special tokens file saved in {special_tokens_map_file}")
+
+ file_names = (tokenizer_config_file, special_tokens_map_file)
+
+ save_files = self._save_pretrained(
+ save_directory=save_directory,
+ file_names=file_names,
+ legacy_format=legacy_format,
+ filename_prefix=filename_prefix,
+ )
+
+ if push_to_hub:
+ self._upload_modified_files(
+ save_directory,
+ repo_id,
+ files_timestamps,
+ commit_message=commit_message,
+ token=kwargs.get("use_auth_token"),
+ )
+
+ return save_files
+
+ def _save_pretrained(
+ self,
+ save_directory: Union[str, os.PathLike],
+ file_names: Tuple[str],
+ legacy_format: Optional[bool] = None,
+ filename_prefix: Optional[str] = None,
+ ) -> Tuple[str]:
+ """
+ Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens.
+
+ Fast tokenizers can also be saved in a unique JSON file containing {config + vocab + added-tokens} using the
+ specific [`~tokenization_utils_fast.PreTrainedTokenizerFast._save_pretrained`]
+ """
+ if legacy_format is False:
+ raise ValueError(
+ "Only fast tokenizers (instances of PreTrainedTokenizerFast) can be saved in non legacy format."
+ )
+
+ save_directory = str(save_directory)
+
+ added_tokens_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + ADDED_TOKENS_FILE
+ )
+ added_vocab = self.get_added_vocab()
+ if added_vocab:
+ with open(added_tokens_file, "w", encoding="utf-8") as f:
+ out_str = json.dumps(added_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n"
+ f.write(out_str)
+ logger.info(f"added tokens file saved in {added_tokens_file}")
+
+ vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix)
+
+ return file_names + vocab_files + (added_tokens_file,)
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ """
+ Save only the vocabulary of the tokenizer (vocabulary + added tokens).
+
+ This method won't save the configuration and special token mappings of the tokenizer. Use
+ [`~PreTrainedTokenizerFast._save_pretrained`] to save the whole state of the tokenizer.
+
+ Args:
+ save_directory (`str`):
+ The directory in which to save the vocabulary.
+ filename_prefix (`str`, *optional*):
+ An optional prefix to add to the named of the saved files.
+
+ Returns:
+ `Tuple(str)`: Paths to the files saved.
+ """
+ raise NotImplementedError
+
+ def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
+ """
+ Converts a string in a sequence of tokens, replacing unknown tokens with the `unk_token`.
+
+ Args:
+ text (`str`):
+ The sequence to be encoded.
+ pair (`str`, *optional*):
+ A second sequence to be encoded with the first.
+ add_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not to add the special tokens associated with the corresponding model.
+ kwargs (additional keyword arguments, *optional*):
+ Will be passed to the underlying model specific encode method. See details in
+ [`~PreTrainedTokenizerBase.__call__`]
+
+ Returns:
+ `List[str]`: The list of tokens.
+ """
+ raise NotImplementedError
+
+ @add_end_docstrings(
+ ENCODE_KWARGS_DOCSTRING,
+ """
+ **kwargs: Passed along to the `.tokenize()` method.
+ """,
+ """
+ Returns:
+ `List[int]`, `torch.Tensor`, `tf.Tensor` or `np.ndarray`: The tokenized ids of the text.
+ """,
+ )
+ def encode(
+ self,
+ text: Union[TextInput, PreTokenizedInput, EncodedInput],
+ text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs,
+ ) -> List[int]:
+ """
+ Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.
+
+ Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`.
+
+ Args:
+ text (`str`, `List[str]` or `List[int]`):
+ The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
+ `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
+ method).
+ text_pair (`str`, `List[str]` or `List[int]`, *optional*):
+ Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
+ the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
+ method).
+ """
+ encoded_inputs = self.encode_plus(
+ text,
+ text_pair=text_pair,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ return_tensors=return_tensors,
+ **kwargs,
+ )
+
+ return encoded_inputs["input_ids"]
+
+ def num_special_tokens_to_add(self, pair: bool = False) -> int:
+ raise NotImplementedError
+
+ def _get_padding_truncation_strategies(
+ self, padding=False, truncation=None, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs
+ ):
+ """
+ Find the correct padding/truncation strategy with backward compatibility for old arguments (truncation_strategy
+ and pad_to_max_length) and behaviors.
+ """
+ old_truncation_strategy = kwargs.pop("truncation_strategy", "do_not_truncate")
+ old_pad_to_max_length = kwargs.pop("pad_to_max_length", False)
+
+ # Backward compatibility for previous behavior, maybe we should deprecate it:
+ # If you only set max_length, it activates truncation for max_length
+ if max_length is not None and padding is False and truncation is None:
+ if verbose:
+ if not self.deprecation_warnings.get("Truncation-not-explicitly-activated", False):
+ logger.warning(
+ "Truncation was not explicitly activated but `max_length` is provided a specific value, please"
+ " use `truncation=True` to explicitly truncate examples to max length. Defaulting to"
+ " 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the"
+ " tokenizer you can select this strategy more precisely by providing a specific strategy to"
+ " `truncation`."
+ )
+ self.deprecation_warnings["Truncation-not-explicitly-activated"] = True
+ truncation = "longest_first"
+
+ # Get padding strategy
+ if padding is False and old_pad_to_max_length:
+ if verbose:
+ warnings.warn(
+ "The `pad_to_max_length` argument is deprecated and will be removed in a future version, "
+ "use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or "
+ "use `padding='max_length'` to pad to a max length. In this case, you can give a specific "
+ "length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the "
+ "maximal input size of the model (e.g. 512 for Bert).",
+ FutureWarning,
+ )
+ if max_length is None:
+ padding_strategy = PaddingStrategy.LONGEST
+ else:
+ padding_strategy = PaddingStrategy.MAX_LENGTH
+ elif padding is not False:
+ if padding is True:
+ if verbose:
+ if max_length is not None and (
+ truncation is None or truncation is False or truncation == "do_not_truncate"
+ ):
+ warnings.warn(
+ "`max_length` is ignored when `padding`=`True` and there is no truncation strategy. "
+ "To pad to max length, use `padding='max_length'`."
+ )
+ if old_pad_to_max_length is not False:
+ warnings.warn("Though `pad_to_max_length` = `True`, it is ignored because `padding`=`True`.")
+ padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
+ elif not isinstance(padding, PaddingStrategy):
+ padding_strategy = PaddingStrategy(padding)
+ elif isinstance(padding, PaddingStrategy):
+ padding_strategy = padding
+ else:
+ padding_strategy = PaddingStrategy.DO_NOT_PAD
+
+ # Get truncation strategy
+ if truncation is None and old_truncation_strategy != "do_not_truncate":
+ if verbose:
+ warnings.warn(
+ "The `truncation_strategy` argument is deprecated and will be removed in a future version, use"
+ " `truncation=True` to truncate examples to a max length. You can give a specific length with"
+ " `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the maximal input"
+ " size of the model (e.g. 512 for Bert). If you have pairs of inputs, you can give a specific"
+ " truncation strategy selected among `truncation='only_first'` (will only truncate the first"
+ " sentence in the pairs) `truncation='only_second'` (will only truncate the second sentence in the"
+ " pairs) or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence"
+ " in the pairs).",
+ FutureWarning,
+ )
+ truncation_strategy = TruncationStrategy(old_truncation_strategy)
+ elif truncation is not False and truncation is not None:
+ if truncation is True:
+ truncation_strategy = (
+ TruncationStrategy.LONGEST_FIRST
+ ) # Default to truncate the longest sequences in pairs of inputs
+ elif not isinstance(truncation, TruncationStrategy):
+ truncation_strategy = TruncationStrategy(truncation)
+ elif isinstance(truncation, TruncationStrategy):
+ truncation_strategy = truncation
+ else:
+ truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
+
+ # Set max length if needed
+ if max_length is None:
+ if padding_strategy == PaddingStrategy.MAX_LENGTH:
+ if self.model_max_length > LARGE_INTEGER:
+ if verbose:
+ if not self.deprecation_warnings.get("Asking-to-pad-to-max_length", False):
+ logger.warning(
+ "Asking to pad to max_length but no maximum length is provided and the model has no"
+ " predefined maximum length. Default to no padding."
+ )
+ self.deprecation_warnings["Asking-to-pad-to-max_length"] = True
+ padding_strategy = PaddingStrategy.DO_NOT_PAD
+ else:
+ max_length = self.model_max_length
+
+ if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE:
+ if self.model_max_length > LARGE_INTEGER:
+ if verbose:
+ if not self.deprecation_warnings.get("Asking-to-truncate-to-max_length", False):
+ logger.warning(
+ "Asking to truncate to max_length but no maximum length is provided and the model has"
+ " no predefined maximum length. Default to no truncation."
+ )
+ self.deprecation_warnings["Asking-to-truncate-to-max_length"] = True
+ truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
+ else:
+ max_length = self.model_max_length
+
+ # Test if we have a padding token
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD and (not self.pad_token or self.pad_token_id < 0):
+ raise ValueError(
+ "Asking to pad but the tokenizer does not have a padding token. "
+ "Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` "
+ "or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`."
+ )
+
+ # Check that we will truncate to a multiple of pad_to_multiple_of if both are provided
+ if (
+ truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE
+ and padding_strategy != PaddingStrategy.DO_NOT_PAD
+ and pad_to_multiple_of is not None
+ and max_length is not None
+ and (max_length % pad_to_multiple_of != 0)
+ ):
+ raise ValueError(
+ "Truncation and padding are both activated but "
+ f"truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of})."
+ )
+
+ return padding_strategy, truncation_strategy, max_length, kwargs
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def __call__(
+ self,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
+ text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
+ text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
+ text_pair_target: Optional[
+ Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]
+ ] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ is_split_into_words: bool = False,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
+ sequences.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`, *optional*):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ text_pair (`str`, `List[str]`, `List[List[str]]`, *optional*):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ text_target (`str`, `List[str]`, `List[List[str]]`, *optional*):
+ The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
+ list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
+ you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ text_pair_target (`str`, `List[str]`, `List[List[str]]`, *optional*):
+ The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
+ list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
+ you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ """
+ # To avoid duplicating
+ all_kwargs = {
+ "add_special_tokens": add_special_tokens,
+ "padding": padding,
+ "truncation": truncation,
+ "max_length": max_length,
+ "stride": stride,
+ "is_split_into_words": is_split_into_words,
+ "pad_to_multiple_of": pad_to_multiple_of,
+ "return_tensors": return_tensors,
+ "return_token_type_ids": return_token_type_ids,
+ "return_attention_mask": return_attention_mask,
+ "return_overflowing_tokens": return_overflowing_tokens,
+ "return_special_tokens_mask": return_special_tokens_mask,
+ "return_offsets_mapping": return_offsets_mapping,
+ "return_length": return_length,
+ "verbose": verbose,
+ }
+ all_kwargs.update(kwargs)
+ if text is None and text_target is None:
+ raise ValueError("You need to specify either `text` or `text_target`.")
+ if text is not None:
+ # The context manager will send the inputs as normal texts and not text_target, but we shouldn't change the
+ # input mode in this case.
+ if not self._in_target_context_manager:
+ self._switch_to_input_mode()
+ encodings = self._call_one(text=text, text_pair=text_pair, **all_kwargs)
+ if text_target is not None:
+ self._switch_to_target_mode()
+ target_encodings = self._call_one(text=text_target, text_pair=text_pair_target, **all_kwargs)
+ # Leave back tokenizer in input mode
+ self._switch_to_input_mode()
+
+ if text_target is None:
+ return encodings
+ elif text is None:
+ return target_encodings
+ else:
+ encodings["labels"] = target_encodings["input_ids"]
+ return encodings
+
+ def _call_one(
+ self,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
+ text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ is_split_into_words: bool = False,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ # Input type checking for clearer error
+ def _is_valid_text_input(t):
+ if isinstance(t, str):
+ # Strings are fine
+ return True
+ elif isinstance(t, (list, tuple)):
+ # List are fine as long as they are...
+ if len(t) == 0:
+ # ... empty
+ return True
+ elif isinstance(t[0], str):
+ # ... list of strings
+ return True
+ elif isinstance(t[0], (list, tuple)):
+ # ... list with an empty list or with a list of strings
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
+ else:
+ return False
+ else:
+ return False
+
+ if not _is_valid_text_input(text):
+ raise ValueError(
+ "text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
+ "or `List[List[str]]` (batch of pretokenized examples)."
+ )
+
+ if text_pair is not None and not _is_valid_text_input(text_pair):
+ raise ValueError(
+ "text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
+ "or `List[List[str]]` (batch of pretokenized examples)."
+ )
+
+ if is_split_into_words:
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
+ else:
+ is_batched = isinstance(text, (list, tuple))
+
+ if is_batched:
+ if isinstance(text_pair, str):
+ raise TypeError(
+ "when tokenizing batches of text, `text_pair` must be a list or tuple with the same length as"
+ " `text`."
+ )
+ if text_pair is not None and len(text) != len(text_pair):
+ raise ValueError(
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
+ f" {len(text_pair)}."
+ )
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
+ return self.batch_encode_plus(
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ is_split_into_words=is_split_into_words,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+ else:
+ return self.encode_plus(
+ text=text,
+ text_pair=text_pair,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ is_split_into_words=is_split_into_words,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def encode_plus(
+ self,
+ text: Union[TextInput, PreTokenizedInput, EncodedInput],
+ text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ is_split_into_words: bool = False,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Tokenize and prepare for the model a sequence or a pair of sequences.
+
+
+
+ This method is deprecated, `__call__` should be used instead.
+
+
+
+ Args:
+ text (`str`, `List[str]` or `List[int]` (the latter only for not-fast tokenizers)):
+ The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
+ `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
+ method).
+ text_pair (`str`, `List[str]` or `List[int]`, *optional*):
+ Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
+ the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
+ method).
+ """
+
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ return self._encode_plus(
+ text=text,
+ text_pair=text_pair,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ stride=stride,
+ is_split_into_words=is_split_into_words,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def _encode_plus(
+ self,
+ text: Union[TextInput, PreTokenizedInput, EncodedInput],
+ text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ is_split_into_words: bool = False,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ raise NotImplementedError
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def batch_encode_plus(
+ self,
+ batch_text_or_text_pairs: Union[
+ List[TextInput],
+ List[TextInputPair],
+ List[PreTokenizedInput],
+ List[PreTokenizedInputPair],
+ List[EncodedInput],
+ List[EncodedInputPair],
+ ],
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ is_split_into_words: bool = False,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Tokenize and prepare for the model a list of sequences or a list of pairs of sequences.
+
+
+
+ This method is deprecated, `__call__` should be used instead.
+
+
+
+ Args:
+ batch_text_or_text_pairs (`List[str]`, `List[Tuple[str, str]]`, `List[List[str]]`, `List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also `List[List[int]]`, `List[Tuple[List[int], List[int]]]`):
+ Batch of sequences or pair of sequences to be encoded. This can be a list of
+ string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see
+ details in `encode_plus`).
+ """
+
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ return self._batch_encode_plus(
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ stride=stride,
+ is_split_into_words=is_split_into_words,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def _batch_encode_plus(
+ self,
+ batch_text_or_text_pairs: Union[
+ List[TextInput],
+ List[TextInputPair],
+ List[PreTokenizedInput],
+ List[PreTokenizedInputPair],
+ List[EncodedInput],
+ List[EncodedInputPair],
+ ],
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ is_split_into_words: bool = False,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ raise NotImplementedError
+
+ def pad(
+ self,
+ encoded_inputs: Union[
+ BatchEncoding,
+ List[BatchEncoding],
+ Dict[str, EncodedInput],
+ Dict[str, List[EncodedInput]],
+ List[Dict[str, EncodedInput]],
+ ],
+ padding: Union[bool, str, PaddingStrategy] = True,
+ max_length: Optional[int] = None,
+ pad_to_multiple_of: Optional[int] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ verbose: bool = True,
+ ) -> BatchEncoding:
+ """
+ Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
+ in the batch.
+
+ Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`,
+ `self.pad_token_id` and `self.pad_token_type_id`).
+
+ Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the
+ text followed by a call to the `pad` method to get a padded encoding.
+
+
+
+ If the `encoded_inputs` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the
+ result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of
+ PyTorch tensors, you will lose the specific device of your tensors however.
+
+
+
+ Args:
+ encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `Dict[str, List[int]]`, `Dict[str, List[List[int]]` or `List[Dict[str, List[int]]]`):
+ Tokenized inputs. Can represent one input ([`BatchEncoding`] or `Dict[str, List[int]]`) or a batch of
+ tokenized inputs (list of [`BatchEncoding`], *Dict[str, List[List[int]]]* or *List[Dict[str,
+ List[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
+ collate function.
+
+ Instead of `List[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see
+ the note above for the return type.
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
+ index) among:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ max_length (`int`, *optional*):
+ Maximum length of the returned list and optionally padding length (see above).
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value.
+
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta).
+ return_attention_mask (`bool`, *optional*):
+ Whether to return the attention mask. If left to the default, will return the attention mask according
+ to the specific tokenizer's default, defined by the `return_outputs` attribute.
+
+ [What are attention masks?](../glossary#attention-mask)
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+ verbose (`bool`, *optional*, defaults to `True`):
+ Whether or not to print more information and warnings.
+ """
+ if self.__class__.__name__.endswith("Fast"):
+ if not self.deprecation_warnings.get("Asking-to-pad-a-fast-tokenizer", False):
+ logger.warning_advice(
+ f"You're using a {self.__class__.__name__} tokenizer. Please note that with a fast tokenizer,"
+ " using the `__call__` method is faster than using a method to encode the text followed by a call"
+ " to the `pad` method to get a padded encoding."
+ )
+ self.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = True
+
+ # If we have a list of dicts, let's convert it in a dict of lists
+ # We do this to allow using this method as a collate_fn function in PyTorch Dataloader
+ if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], Mapping):
+ encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()}
+
+ # The model's main input name, usually `input_ids`, has be passed for padding
+ if self.model_input_names[0] not in encoded_inputs:
+ raise ValueError(
+ "You should supply an encoding or a list of encodings to this method "
+ f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}"
+ )
+
+ required_input = encoded_inputs[self.model_input_names[0]]
+
+ if required_input is None or (isinstance(required_input, Sized) and len(required_input) == 0):
+ if return_attention_mask:
+ encoded_inputs["attention_mask"] = []
+ return encoded_inputs
+
+ # If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects
+ # and rebuild them afterwards if no return_tensors is specified
+ # Note that we lose the specific device the tensor may be on for PyTorch
+
+ first_element = required_input[0]
+ if isinstance(first_element, (list, tuple)):
+ # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
+ for item in required_input:
+ if len(item) != 0:
+ first_element = item[0]
+ break
+ # At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do.
+ if not isinstance(first_element, (int, list, tuple)):
+ if is_tf_tensor(first_element):
+ return_tensors = "tf" if return_tensors is None else return_tensors
+ elif is_torch_tensor(first_element):
+ return_tensors = "pt" if return_tensors is None else return_tensors
+ elif isinstance(first_element, np.ndarray):
+ return_tensors = "np" if return_tensors is None else return_tensors
+ else:
+ raise ValueError(
+ f"type of {first_element} unknown: {type(first_element)}. "
+ "Should be one of a python, numpy, pytorch or tensorflow object."
+ )
+
+ for key, value in encoded_inputs.items():
+ encoded_inputs[key] = to_py_obj(value)
+
+ # Convert padding_strategy in PaddingStrategy
+ padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(
+ padding=padding, max_length=max_length, verbose=verbose
+ )
+
+ required_input = encoded_inputs[self.model_input_names[0]]
+ if required_input and not isinstance(required_input[0], (list, tuple)):
+ encoded_inputs = self._pad(
+ encoded_inputs,
+ max_length=max_length,
+ padding_strategy=padding_strategy,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ )
+ return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
+
+ batch_size = len(required_input)
+ assert all(
+ len(v) == batch_size for v in encoded_inputs.values()
+ ), "Some items in the output dictionary have a different batch size than others."
+
+ if padding_strategy == PaddingStrategy.LONGEST:
+ max_length = max(len(inputs) for inputs in required_input)
+ padding_strategy = PaddingStrategy.MAX_LENGTH
+
+ batch_outputs = {}
+ for i in range(batch_size):
+ inputs = {k: v[i] for k, v in encoded_inputs.items()}
+ outputs = self._pad(
+ inputs,
+ max_length=max_length,
+ padding_strategy=padding_strategy,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ )
+
+ for key, value in outputs.items():
+ if key not in batch_outputs:
+ batch_outputs[key] = []
+ batch_outputs[key].append(value)
+
+ return BatchEncoding(batch_outputs, tensor_type=return_tensors)
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create the token type IDs corresponding to the sequences passed. [What are token type
+ IDs?](../glossary#token-type-ids)
+
+ Should be overridden in a subclass if the model has a special way of building those.
+
+ Args:
+ token_ids_0 (`List[int]`): The first tokenized sequence.
+ token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.
+
+ Returns:
+ `List[int]`: The token type ids.
+ """
+ if token_ids_1 is None:
+ return len(token_ids_0) * [0]
+ return [0] * len(token_ids_0) + [1] * len(token_ids_1)
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens.
+
+ This implementation does not add special tokens and this method should be overridden in a subclass.
+
+ Args:
+ token_ids_0 (`List[int]`): The first tokenized sequence.
+ token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.
+
+ Returns:
+ `List[int]`: The model input with special tokens.
+ """
+ if token_ids_1 is None:
+ return token_ids_0
+ return token_ids_0 + token_ids_1
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def prepare_for_model(
+ self,
+ ids: List[int],
+ pair_ids: Optional[List[int]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ prepend_batch_axis: bool = False,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
+ adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
+ manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids*
+ different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return
+ overflowing tokens. Such a combination of arguments will raise an error.
+
+ Args:
+ ids (`List[int]`):
+ Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
+ `convert_tokens_to_ids` methods.
+ pair_ids (`List[int]`, *optional*):
+ Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
+ and `convert_tokens_to_ids` methods.
+ """
+
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ pair = bool(pair_ids is not None)
+ len_ids = len(ids)
+ len_pair_ids = len(pair_ids) if pair else 0
+
+ if return_token_type_ids and not add_special_tokens:
+ raise ValueError(
+ "Asking to return token_type_ids while setting add_special_tokens to False "
+ "results in an undefined behavior. Please set add_special_tokens to True or "
+ "set return_token_type_ids to None."
+ )
+
+ if (
+ return_overflowing_tokens
+ and truncation_strategy == TruncationStrategy.LONGEST_FIRST
+ and pair_ids is not None
+ ):
+ raise ValueError(
+ "Not possible to return overflowing tokens for pair of sequences with the "
+ "`longest_first`. Please select another truncation strategy than `longest_first`, "
+ "for instance `only_second` or `only_first`."
+ )
+
+ # Load from model defaults
+ if return_token_type_ids is None:
+ return_token_type_ids = "token_type_ids" in self.model_input_names
+ if return_attention_mask is None:
+ return_attention_mask = "attention_mask" in self.model_input_names
+
+ encoded_inputs = {}
+
+ # Compute the total size of the returned encodings
+ total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
+
+ # Truncation: Handle max sequence length
+ overflowing_tokens = []
+ if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
+ ids, pair_ids, overflowing_tokens = self.truncate_sequences(
+ ids,
+ pair_ids=pair_ids,
+ num_tokens_to_remove=total_len - max_length,
+ truncation_strategy=truncation_strategy,
+ stride=stride,
+ )
+
+ if return_overflowing_tokens:
+ encoded_inputs["overflowing_tokens"] = overflowing_tokens
+ encoded_inputs["num_truncated_tokens"] = total_len - max_length
+
+ # Add special tokens
+ if add_special_tokens:
+ sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
+ token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
+ else:
+ sequence = ids + pair_ids if pair else ids
+ token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
+
+ # Build output dictionary
+ encoded_inputs["input_ids"] = sequence
+ if return_token_type_ids:
+ encoded_inputs["token_type_ids"] = token_type_ids
+ if return_special_tokens_mask:
+ if add_special_tokens:
+ encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
+ else:
+ encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
+
+ # Check lengths
+ self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
+
+ # Padding
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
+ encoded_inputs = self.pad(
+ encoded_inputs,
+ max_length=max_length,
+ padding=padding_strategy.value,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ )
+
+ if return_length:
+ encoded_inputs["length"] = len(encoded_inputs["input_ids"])
+
+ batch_outputs = BatchEncoding(
+ encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
+ )
+
+ return batch_outputs
+
+ def truncate_sequences(
+ self,
+ ids: List[int],
+ pair_ids: Optional[List[int]] = None,
+ num_tokens_to_remove: int = 0,
+ truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
+ stride: int = 0,
+ ) -> Tuple[List[int], List[int], List[int]]:
+ """
+ Truncates a sequence pair in-place following the strategy.
+
+ Args:
+ ids (`List[int]`):
+ Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
+ `convert_tokens_to_ids` methods.
+ pair_ids (`List[int]`, *optional*):
+ Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
+ and `convert_tokens_to_ids` methods.
+ num_tokens_to_remove (`int`, *optional*, defaults to 0):
+ Number of tokens to remove using the truncation strategy.
+ truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
+ The strategy to follow for truncation. Can be:
+
+ - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will truncate
+ token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
+ batch of pairs) is provided.
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
+ than the model maximum admissible input size).
+ stride (`int`, *optional*, defaults to 0):
+ If set to a positive number, the overflowing tokens returned will contain some tokens from the main
+ sequence returned. The value of this argument defines the number of additional tokens.
+
+ Returns:
+ `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
+ overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
+ of sequences (or a batch of pairs) is provided.
+ """
+ if num_tokens_to_remove <= 0:
+ return ids, pair_ids, []
+
+ if not isinstance(truncation_strategy, TruncationStrategy):
+ truncation_strategy = TruncationStrategy(truncation_strategy)
+
+ overflowing_tokens = []
+ if truncation_strategy == TruncationStrategy.ONLY_FIRST or (
+ truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None
+ ):
+ if len(ids) > num_tokens_to_remove:
+ window_len = min(len(ids), stride + num_tokens_to_remove)
+ if self.truncation_side == "left":
+ overflowing_tokens = ids[:window_len]
+ ids = ids[num_tokens_to_remove:]
+ elif self.truncation_side == "right":
+ overflowing_tokens = ids[-window_len:]
+ ids = ids[:-num_tokens_to_remove]
+ else:
+ raise ValueError(f"invalid truncation strategy: {self.truncation_side}, use 'left' or 'right'.")
+
+ else:
+ error_msg = (
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
+ f"but the first sequence has a length {len(ids)}. "
+ )
+ if truncation_strategy == TruncationStrategy.ONLY_FIRST:
+ error_msg = (
+ error_msg + "Please select another truncation strategy than "
+ f"{truncation_strategy}, for instance 'longest_first' or 'only_second'."
+ )
+ logger.error(error_msg)
+ elif truncation_strategy == TruncationStrategy.LONGEST_FIRST:
+ logger.warning(
+ "Be aware, overflowing tokens are not returned for the setting you have chosen,"
+ f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' "
+ "truncation strategy. So the returned list will always be empty even if some "
+ "tokens have been removed."
+ )
+ for _ in range(num_tokens_to_remove):
+ if pair_ids is None or len(ids) > len(pair_ids):
+ if self.truncation_side == "right":
+ ids = ids[:-1]
+ elif self.truncation_side == "left":
+ ids = ids[1:]
+ else:
+ raise ValueError("invalid truncation strategy:" + str(self.truncation_side))
+ else:
+ if self.truncation_side == "right":
+ pair_ids = pair_ids[:-1]
+ elif self.truncation_side == "left":
+ pair_ids = pair_ids[1:]
+ else:
+ raise ValueError("invalid truncation strategy:" + str(self.truncation_side))
+ elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
+ if len(pair_ids) > num_tokens_to_remove:
+ window_len = min(len(pair_ids), stride + num_tokens_to_remove)
+ if self.truncation_side == "right":
+ overflowing_tokens = pair_ids[-window_len:]
+ pair_ids = pair_ids[:-num_tokens_to_remove]
+ elif self.truncation_side == "left":
+ overflowing_tokens = pair_ids[:window_len]
+ pair_ids = pair_ids[num_tokens_to_remove:]
+ else:
+ raise ValueError("invalid truncation strategy:" + str(self.truncation_side))
+ else:
+ logger.error(
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
+ f"but the second sequence has a length {len(pair_ids)}. "
+ f"Please select another truncation strategy than {truncation_strategy}, "
+ "for instance 'longest_first' or 'only_first'."
+ )
+
+ return (ids, pair_ids, overflowing_tokens)
+
+ def _pad(
+ self,
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
+ max_length: Optional[int] = None,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ pad_to_multiple_of: Optional[int] = None,
+ return_attention_mask: Optional[bool] = None,
+ ) -> dict:
+ """
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
+
+ Args:
+ encoded_inputs:
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
+ max_length: maximum length of the returned list and optionally padding length (see below).
+ Will truncate by taking into account the special tokens.
+ padding_strategy: PaddingStrategy to use for padding.
+
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
+ The tokenizer padding sides are defined in self.padding_side:
+
+ - 'left': pads on the left of the sequences
+ - 'right': pads on the right of the sequences
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta).
+ return_attention_mask:
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
+ """
+ # Load from model defaults
+ if return_attention_mask is None:
+ return_attention_mask = "attention_mask" in self.model_input_names
+
+ required_input = encoded_inputs[self.model_input_names[0]]
+
+ if padding_strategy == PaddingStrategy.LONGEST:
+ max_length = len(required_input)
+
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
+
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
+
+ # Initialize attention mask if not present.
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
+
+ if needs_to_be_padded:
+ difference = max_length - len(required_input)
+
+ if self.padding_side == "right":
+ if return_attention_mask:
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
+ if "token_type_ids" in encoded_inputs:
+ encoded_inputs["token_type_ids"] = (
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
+ )
+ if "special_tokens_mask" in encoded_inputs:
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
+ elif self.padding_side == "left":
+ if return_attention_mask:
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
+ if "token_type_ids" in encoded_inputs:
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
+ "token_type_ids"
+ ]
+ if "special_tokens_mask" in encoded_inputs:
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
+ else:
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
+
+ return encoded_inputs
+
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
+ """
+ Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we
+ often want to remove sub-word tokenization artifacts at the same time.
+
+ Args:
+ tokens (`List[str]`): The token to join in a string.
+
+ Returns:
+ `str`: The joined tokens.
+ """
+ raise NotImplementedError
+
+ def batch_decode(
+ self,
+ sequences: Union[List[int], List[List[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"],
+ skip_special_tokens: bool = False,
+ clean_up_tokenization_spaces: bool = None,
+ **kwargs,
+ ) -> List[str]:
+ """
+ Convert a list of lists of token ids into a list of strings by calling decode.
+
+ Args:
+ sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`):
+ List of tokenized input ids. Can be obtained using the `__call__` method.
+ skip_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not to remove special tokens in the decoding.
+ clean_up_tokenization_spaces (`bool`, *optional*):
+ Whether or not to clean up the tokenization spaces. If `None`, will default to
+ `self.clean_up_tokenization_spaces`.
+ kwargs (additional keyword arguments, *optional*):
+ Will be passed to the underlying model specific decode method.
+
+ Returns:
+ `List[str]`: The list of decoded sentences.
+ """
+ return [
+ self.decode(
+ seq,
+ skip_special_tokens=skip_special_tokens,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ **kwargs,
+ )
+ for seq in sequences
+ ]
+
+ def decode(
+ self,
+ token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
+ skip_special_tokens: bool = False,
+ clean_up_tokenization_spaces: bool = None,
+ **kwargs,
+ ) -> str:
+ """
+ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
+ tokens and clean up tokenization spaces.
+
+ Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
+
+ Args:
+ token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
+ List of tokenized input ids. Can be obtained using the `__call__` method.
+ skip_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not to remove special tokens in the decoding.
+ clean_up_tokenization_spaces (`bool`, *optional*):
+ Whether or not to clean up the tokenization spaces. If `None`, will default to
+ `self.clean_up_tokenization_spaces`.
+ kwargs (additional keyword arguments, *optional*):
+ Will be passed to the underlying model specific decode method.
+
+ Returns:
+ `str`: The decoded sentence.
+ """
+ # Convert inputs to python lists
+ token_ids = to_py_obj(token_ids)
+
+ return self._decode(
+ token_ids=token_ids,
+ skip_special_tokens=skip_special_tokens,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ **kwargs,
+ )
+
+ def _decode(
+ self,
+ token_ids: Union[int, List[int]],
+ skip_special_tokens: bool = False,
+ clean_up_tokenization_spaces: bool = None,
+ **kwargs,
+ ) -> str:
+ raise NotImplementedError
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of ids of the first sequence.
+ token_ids_1 (`List[int]`, *optional*):
+ List of ids of the second sequence.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ assert already_has_special_tokens and token_ids_1 is None, (
+ "You cannot use ``already_has_special_tokens=False`` with this tokenizer. "
+ "Please use a slow (full python) tokenizer to activate this argument. "
+ "Or set `return_special_tokens_mask=True` when calling the encoding method "
+ "to get the special tokens mask in any tokenizer. "
+ )
+
+ all_special_ids = self.all_special_ids # cache the property
+
+ special_tokens_mask = [1 if token in all_special_ids else 0 for token in token_ids_0]
+
+ return special_tokens_mask
+
+ @staticmethod
+ def clean_up_tokenization(out_string: str) -> str:
+ """
+ Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.
+
+ Args:
+ out_string (`str`): The text to clean up.
+
+ Returns:
+ `str`: The cleaned-up string.
+ """
+ out_string = (
+ out_string.replace(" .", ".")
+ .replace(" ?", "?")
+ .replace(" !", "!")
+ .replace(" ,", ",")
+ .replace(" ' ", "'")
+ .replace(" n't", "n't")
+ .replace(" 'm", "'m")
+ .replace(" 's", "'s")
+ .replace(" 've", "'ve")
+ .replace(" 're", "'re")
+ )
+ return out_string
+
+ def _eventual_warn_about_too_long_sequence(self, ids: List[int], max_length: Optional[int], verbose: bool):
+ """
+ Depending on the input and internal state we might trigger a warning about a sequence that is too long for its
+ corresponding model
+
+ Args:
+ ids (`List[str]`): The ids produced by the tokenization
+ max_length (`int`, *optional*): The max_length desired (does not trigger a warning if it is set)
+ verbose (`bool`): Whether or not to print more information and warnings.
+
+ """
+ if max_length is None and len(ids) > self.model_max_length and verbose:
+ if not self.deprecation_warnings.get("sequence-length-is-longer-than-the-specified-maximum", False):
+ logger.warning(
+ "Token indices sequence length is longer than the specified maximum sequence length "
+ f"for this model ({len(ids)} > {self.model_max_length}). Running this sequence through the model "
+ "will result in indexing errors"
+ )
+ self.deprecation_warnings["sequence-length-is-longer-than-the-specified-maximum"] = True
+
+ def _switch_to_input_mode(self):
+ """
+ Private method to put the tokenizer in input mode (when it has different modes for input/outputs)
+ """
+ pass
+
+ def _switch_to_target_mode(self):
+ """
+ Private method to put the tokenizer in target mode (when it has different modes for input/outputs)
+ """
+ pass
+
+ @contextmanager
+ def as_target_tokenizer(self):
+ """
+ Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to
+ sequence-to-sequence models that need a slightly different processing for the labels.
+ """
+ warnings.warn(
+ "`as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your "
+ "labels by using the argument `text_target` of the regular `__call__` method (either in the same call as "
+ "your input texts if you use the same keyword arguments, or in a separate call."
+ )
+ self._switch_to_target_mode()
+ self._in_target_context_manager = True
+ yield
+ self._in_target_context_manager = False
+ self._switch_to_input_mode()
+
+ @classmethod
+ def register_for_auto_class(cls, auto_class="AutoTokenizer"):
+ """
+ Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the
+ library are already mapped with `AutoTokenizer`.
+
+
+
+ This API is experimental and may have some slight breaking changes in the next releases.
+
+
+
+ Args:
+ auto_class (`str` or `type`, *optional*, defaults to `"AutoTokenizer"`):
+ The auto class to register this new tokenizer with.
+ """
+ if not isinstance(auto_class, str):
+ auto_class = auto_class.__name__
+
+ import transformers.models.auto as auto_module
+
+ if not hasattr(auto_module, auto_class):
+ raise ValueError(f"{auto_class} is not a valid auto class.")
+
+ cls._auto_class = auto_class
+
+ def prepare_seq2seq_batch(
+ self,
+ src_texts: List[str],
+ tgt_texts: Optional[List[str]] = None,
+ max_length: Optional[int] = None,
+ max_target_length: Optional[int] = None,
+ padding: str = "longest",
+ return_tensors: str = None,
+ truncation: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Prepare model inputs for translation. For best performance, translate one sentence at a time.
+
+ Arguments:
+ src_texts (`List[str]`):
+ List of documents to summarize or source language texts.
+ tgt_texts (`list`, *optional*):
+ List of summaries or target language texts.
+ max_length (`int`, *optional*):
+ Controls the maximum length for encoder inputs (documents to summarize or source language texts) If
+ left unset or set to `None`, this will use the predefined model maximum length if a maximum length is
+ required by one of the truncation/padding parameters. If the model has no specific maximum input length
+ (like XLNet) truncation/padding to a maximum length will be deactivated.
+ max_target_length (`int`, *optional*):
+ Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set
+ to `None`, this will use the max_length value.
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
+ Activates and controls padding. Accepts the following values:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `True`):
+ Activates and controls truncation. Accepts the following values:
+
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
+ to the maximum acceptable input length for the model if that argument is not provided. This will
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
+ sequences (or a batch of pairs) is provided.
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
+ greater than the model maximum admissible input size).
+ **kwargs:
+ Additional keyword arguments passed along to `self.__call__`.
+
+ Return:
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
+
+ - **input_ids** -- List of token ids to be fed to the encoder.
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model.
+ - **labels** -- List of token ids for tgt_texts.
+
+ The full set of keys `[input_ids, attention_mask, labels]`, will only be returned if tgt_texts is passed.
+ Otherwise, input_ids, attention_mask will be the only keys.
+ """
+ # docstyle-ignore
+ formatted_warning = """
+`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of HuggingFace Transformers. Use the regular
+`__call__` method to prepare your inputs and targets.
+
+Here is a short example:
+
+model_inputs = tokenizer(src_texts, text_target=tgt_texts, ...)
+
+If you either need to use different keyword arguments for the source and target texts, you should do two calls like
+this:
+
+model_inputs = tokenizer(src_texts, ...)
+labels = tokenizer(text_target=tgt_texts, ...)
+model_inputs["labels"] = labels["input_ids"]
+
+See the documentation of your specific tokenizer for more details on the specific arguments to the tokenizer of choice.
+For a more complete example, see the implementation of `prepare_seq2seq_batch`.
+"""
+ warnings.warn(formatted_warning, FutureWarning)
+ # mBART-specific kwargs that should be ignored by other models.
+ kwargs.pop("src_lang", None)
+ kwargs.pop("tgt_lang", None)
+ if max_length is None:
+ max_length = self.model_max_length
+ model_inputs = self(
+ src_texts,
+ add_special_tokens=True,
+ return_tensors=return_tensors,
+ max_length=max_length,
+ padding=padding,
+ truncation=truncation,
+ **kwargs,
+ )
+ if tgt_texts is None:
+ return model_inputs
+ # Process tgt_texts
+ if max_target_length is None:
+ max_target_length = max_length
+ with self.as_target_tokenizer():
+ labels = self(
+ tgt_texts,
+ add_special_tokens=True,
+ return_tensors=return_tensors,
+ padding=padding,
+ max_length=max_target_length,
+ truncation=truncation,
+ **kwargs,
+ )
+ model_inputs["labels"] = labels["input_ids"]
+ return model_inputs
+
+
+def get_fast_tokenizer_file(tokenization_files: List[str]) -> str:
+ """
+ Get the tokenization file to use for this version of transformers.
+
+ Args:
+ tokenization_files (`List[str]`): The list of available configuration files.
+
+ Returns:
+ `str`: The tokenization file to use.
+ """
+ tokenizer_files_map = {}
+ for file_name in tokenization_files:
+ search = _re_tokenizer_file.search(file_name)
+ if search is not None:
+ v = search.groups()[0]
+ tokenizer_files_map[v] = file_name
+ available_versions = sorted(tokenizer_files_map.keys())
+
+ # Defaults to FULL_TOKENIZER_FILE and then try to look at some newer versions.
+ tokenizer_file = FULL_TOKENIZER_FILE
+ transformers_version = version.parse(__version__)
+ for v in available_versions:
+ if version.parse(v) <= transformers_version:
+ tokenizer_file = tokenizer_files_map[v]
+ else:
+ # No point going further since the versions are sorted.
+ break
+
+ return tokenizer_file
+
+
+# To update the docstring, we need to copy the method, otherwise we change the original docstring.
+PreTrainedTokenizerBase.push_to_hub = copy_func(PreTrainedTokenizerBase.push_to_hub)
+if PreTrainedTokenizerBase.push_to_hub.__doc__ is not None:
+ PreTrainedTokenizerBase.push_to_hub.__doc__ = PreTrainedTokenizerBase.push_to_hub.__doc__.format(
+ object="tokenizer", object_class="AutoTokenizer", object_files="tokenizer files"
+ )